repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
mikewiebe-ansible/ansible | lib/ansible/modules/cloud/openstack/os_volume_snapshot.py | 63 | 6004 | #!/usr/bin/python
# coding: utf-8 -*-
# Copyright (c) 2016, Mario Santos <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: os_volume_snapshot
short_description: Create/Delete Cinder Volume Snapshots
extends_documentation_fragment: openstack
version_added: "2.6"
author: "Mario Santos (@ruizink)"
description:
- Create or Delete cinder block storage volume snapshots
options:
display_name:
description:
- Name of the snapshot
required: true
aliases: ['name']
display_description:
description:
- String describing the snapshot
aliases: ['description']
volume:
description:
- The volume name or id to create/delete the snapshot
required: True
force:
description:
- Allows or disallows snapshot of a volume to be created when the volume
is attached to an instance.
type: bool
default: 'no'
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
availability_zone:
description:
- Availability zone in which to create the snapshot.
requirements:
- "python >= 2.7"
- "openstacksdk"
'''
EXAMPLES = '''
# Creates a snapshot on volume 'test_volume'
- name: create and delete snapshot
hosts: localhost
tasks:
- name: create snapshot
os_volume_snapshot:
state: present
cloud: mordred
availability_zone: az2
display_name: test_snapshot
volume: test_volume
- name: delete snapshot
os_volume_snapshot:
state: absent
cloud: mordred
availability_zone: az2
display_name: test_snapshot
volume: test_volume
'''
RETURN = '''
snapshot:
description: The snapshot instance after the change
returned: success
type: dict
sample:
id: 837aca54-c0ee-47a2-bf9a-35e1b4fdac0c
name: test_snapshot
volume_id: ec646a7c-6a35-4857-b38b-808105a24be6
size: 2
status: available
display_name: test_snapshot
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.openstack import (openstack_full_argument_spec,
openstack_module_kwargs,
openstack_cloud_from_module)
def _present_volume_snapshot(module, cloud):
volume = cloud.get_volume(module.params['volume'])
snapshot = cloud.get_volume_snapshot(module.params['display_name'],
filters={'volume_id': volume.id})
if not snapshot:
snapshot = cloud.create_volume_snapshot(volume.id,
force=module.params['force'],
wait=module.params['wait'],
timeout=module.params[
'timeout'],
name=module.params['display_name'],
description=module.params.get(
'display_description')
)
module.exit_json(changed=True, snapshot=snapshot)
else:
module.exit_json(changed=False, snapshot=snapshot)
def _absent_volume_snapshot(module, cloud):
volume = cloud.get_volume(module.params['volume'])
snapshot = cloud.get_volume_snapshot(module.params['display_name'],
filters={'volume_id': volume.id})
if not snapshot:
module.exit_json(changed=False)
else:
cloud.delete_volume_snapshot(name_or_id=snapshot.id,
wait=module.params['wait'],
timeout=module.params['timeout'],
)
module.exit_json(changed=True, snapshot_id=snapshot.id)
def _system_state_change(module, cloud):
volume = cloud.get_volume(module.params['volume'])
snapshot = cloud.get_volume_snapshot(module.params['display_name'],
filters={'volume_id': volume.id})
state = module.params['state']
if state == 'present':
return snapshot is None
if state == 'absent':
return snapshot is not None
def main():
argument_spec = openstack_full_argument_spec(
display_name=dict(required=True, aliases=['name']),
display_description=dict(default=None, aliases=['description']),
volume=dict(required=True),
force=dict(required=False, default=False, type='bool'),
state=dict(default='present', choices=['absent', 'present']),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec,
supports_check_mode=True,
**module_kwargs)
sdk, cloud = openstack_cloud_from_module(module)
state = module.params['state']
try:
if cloud.volume_exists(module.params['volume']):
if module.check_mode:
module.exit_json(changed=_system_state_change(module, cloud))
if state == 'present':
_present_volume_snapshot(module, cloud)
if state == 'absent':
_absent_volume_snapshot(module, cloud)
else:
module.fail_json(
msg="No volume with name or id '{0}' was found.".format(
module.params['volume']))
except (sdk.exceptions.OpenStackCloudException, sdk.exceptions.ResourceTimeout) as e:
module.fail_json(msg=e.message)
if __name__ == '__main__':
main()
| gpl-3.0 |
laterpay/amphtml | validator/validator_gen.py | 112 | 11182 | #
# Copyright 2015 The AMP HTML Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the license.
#
"""Generates validator-generated.js.
This script reads validator.protoascii and reflects over its contents
to generate Javascript. This Javascript consists of Closure-style
classes and enums, as well as a createRules function which
instantiates the data structures specified in validator.protoascii -
the validator rules.
From a Javascript perspective, this approach looks elaborate - you may
wonder why we're not just writing Javascript directly, or why we're
not encoding our rules in JSON or YAML or even, gasp, XML? Besides the
additional type safety that we gain from our approach, it allows us to
share the rule specifications, error codes, etc. between multiple
validator implemenations, including an implementation in C++. This
makes it much easier to keep otherwise likely divergent behavior in
sync.
"""
import os
def UnderscoreToCamelCase(under_score):
"""Helper function which converts under_score names to camelCase.
In proto buffers, fields have under_scores. In Javascript, fields
have camelCase.
Args:
under_score: A name, segmented by under_scores.
Returns:
A name, segmented as camelCase.
"""
segments = under_score.split('_')
return '%s%s' % (segments[0], ''.join([s.title() for s in segments[1:]]))
def FindDescriptors(validator_pb2, msg_desc_by_name, enum_desc_by_name):
"""Finds the message and enum descriptors in the file.
This method finds the message and enum descriptors from a file descriptor;
it will visit the top-level messages, and within those the enums.
Args:
validator_pb2: The proto2 Python module generated from validator.proto.
msg_desc_by_name: A map of message descriptors, keyed by full_name.
enum_desc_by_name: A map of enum descriptors, keyed by full name.
"""
for msg_type in validator_pb2.DESCRIPTOR.message_types_by_name.values():
msg_desc_by_name[msg_type.full_name] = msg_type
for enum_type in msg_type.enum_types:
enum_desc_by_name[enum_type.full_name] = enum_type
def FieldTypeFor(descriptor, field_desc):
"""Returns the Javascript type for a given field descriptor.
Args:
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
field_desc: A field descriptor for a particular field in a message.
Returns:
The Javascript type for the given field descriptor.
"""
element_type = {
descriptor.FieldDescriptor.TYPE_DOUBLE: lambda: 'number',
descriptor.FieldDescriptor.TYPE_INT32: lambda: 'number',
descriptor.FieldDescriptor.TYPE_BOOL: lambda: 'boolean',
descriptor.FieldDescriptor.TYPE_STRING: lambda: 'string',
descriptor.FieldDescriptor.TYPE_ENUM: (
lambda: field_desc.enum_type.full_name),
descriptor.FieldDescriptor.TYPE_MESSAGE: (
lambda: field_desc.message_type.full_name),
}[field_desc.type]()
if field_desc.label == descriptor.FieldDescriptor.LABEL_REPEATED:
return '!Array<!%s>' % element_type
else:
return element_type
def NonRepeatedValueToString(descriptor, field_desc, value):
"""For a non-repeated field, renders the value as a Javascript literal.
Helper function for ValueToString.
Args:
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
field_desc: The type descriptor for the field value to be rendered.
value: The value of the non-repeated field to be rendered.
Returns:
A Javascript literal for the provided non-repeated value.
"""
if field_desc.type == descriptor.FieldDescriptor.TYPE_STRING:
escaped = ('' + value).encode('unicode-escape')
return "'%s'" % escaped.replace("'", "\\'")
if field_desc.type == descriptor.FieldDescriptor.TYPE_BOOL:
if value:
return 'true'
return 'false'
if field_desc.type == descriptor.FieldDescriptor.TYPE_ENUM:
enum_value_name = field_desc.enum_type.values_by_number[value].name
return '%s.%s' % (field_desc.enum_type.full_name, enum_value_name)
if value is None:
return 'null'
return str(value)
def ValueToString(descriptor, field_desc, value):
"""Renders a field value as a Javascript literal.
Args:
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
field_desc: The type descriptor for the field value to be rendered.
value: The value of the field to be rendered.
Returns:
A Javascript literal for the provided value.
"""
if field_desc.label == descriptor.FieldDescriptor.LABEL_REPEATED:
if value:
return '[%s]' % ', '.join([NonRepeatedValueToString(descriptor,
field_desc, s)
for s in value])
return '[]'
return NonRepeatedValueToString(descriptor, field_desc, value)
def PrintClassFor(descriptor, msg_desc, out):
"""Prints a Javascript class for the given proto message.
This method emits a Javascript class (Closure-style) for the given
proto message to sys.stdout.
Args:
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
msg_desc: The descriptor for a particular message type.
out: a list of lines to output (without the newline characters), to
which this function will append.
"""
# TODO(johannes): Should we provide access to the default values?
# Those are given in field.default_value for each field.
out.append('/**')
out.append(' * @constructor')
if (msg_desc.name == 'ValidationResult' or
msg_desc.name == 'ValidationError'):
out.append(' * @export')
out.append(' */')
out.append('%s = function() {' % msg_desc.full_name)
for field in msg_desc.fields:
if field.label == descriptor.FieldDescriptor.LABEL_REPEATED:
out.append(' /** @export {%s} */' % FieldTypeFor(descriptor, field))
out.append(' this.%s = [];' % UnderscoreToCamelCase(field.name))
else:
out.append(' /** @export {?%s} */' % FieldTypeFor(descriptor, field))
out.append(' this.%s = null;' % UnderscoreToCamelCase(field.name))
out.append('};')
out.append('')
def PrintEnumFor(enum_desc, out):
"""Prints a Javascript enum for the given enum descriptor.
Args:
enum_desc: The descriptor for a particular enum type.
out: a list of lines to output (without the newline characters), to
which this function will append.
"""
out.append('/**')
out.append(' * @enum {string}')
out.append(' */')
out.append('%s = {' % enum_desc.full_name)
out.append(',\n'.join([" %s: '%s'" % (v.name, v.name)
for v in enum_desc.values]))
out.append('};')
out.append('')
def PrintObject(descriptor, msg, this_id, out):
"""Prints an object, by recursively constructing it.
This routine emits Javascript which will construct an object modeling
the provided message (in practice the ValidatorRules message).
It references the classes and enums enitted by PrintClassFor and PrintEnumFor.
Args:
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
msg: A protocol message instance.
this_id: The id for the object being printed (all variables have the form
o_${num} with ${num} being increasing integers
out: a list of lines to output (without the newline characters), to
which this function will append.
Returns:
The next object id, that is, next variable available for creating objects.
"""
out.append(' var o_%d = new %s();' % (this_id, msg.DESCRIPTOR.full_name))
next_id = this_id + 1
for (field_desc, field_val) in msg.ListFields():
if field_desc.type == descriptor.FieldDescriptor.TYPE_MESSAGE:
if field_desc.label == descriptor.FieldDescriptor.LABEL_REPEATED:
for val in field_val:
field_id = next_id
next_id = PrintObject(descriptor, val, field_id, out)
out.append(' o_%d.%s.push(o_%d);' % (
this_id, UnderscoreToCamelCase(field_desc.name), field_id))
else:
field_id = next_id
next_id = PrintObject(descriptor, field_val, field_id, out)
out.append(' o_%d.%s = o_%d;' % (
this_id, UnderscoreToCamelCase(field_desc.name), field_id))
else:
out.append(' o_%d.%s = %s;' % (
this_id, UnderscoreToCamelCase(field_desc.name),
ValueToString(descriptor, field_desc, field_val)))
return next_id
def GenerateValidatorGeneratedJs(specfile, validator_pb2, text_format,
descriptor, out):
"""Main method for the code generator.
This method reads the specfile and emits Javascript to sys.stdout.
Args:
specfile: Path to validator.protoascii, the specfile to generate
Javascript from.
validator_pb2: The proto2 Python module generated from validator.proto.
text_format: The text_format module from the protobuf package, e.g.
google.protobuf.text_format.
descriptor: The descriptor module from the protobuf package, e.g.
google.protobuf.descriptor.
out: a list of lines to output (without the newline characters), to
which this function will append.
"""
# First, find the descriptors and enums and generate Javascript
# classes and enums.
msg_desc_by_name = {}
enum_desc_by_name = {}
FindDescriptors(validator_pb2, msg_desc_by_name, enum_desc_by_name)
rules_obj = '%s.RULES' % validator_pb2.DESCRIPTOR.package
all_names = [rules_obj] + msg_desc_by_name.keys() + enum_desc_by_name.keys()
all_names.sort()
out.append('//')
out.append('// Generated by %s - do not edit.' % os.path.basename(__file__))
out.append('//')
out.append('')
for name in all_names:
out.append("goog.provide('%s');" % name)
out.append('')
for name in all_names:
if name in msg_desc_by_name:
PrintClassFor(descriptor, msg_desc_by_name[name], out)
elif name in enum_desc_by_name:
PrintEnumFor(enum_desc_by_name[name], out)
# Read the rules file, validator.protoascii by parsing it as a text
# message of type ValidatorRules.
rules = validator_pb2.ValidatorRules()
text_format.Merge(open(specfile).read(), rules)
out.append('/**')
out.append(' * @return {!%s}' % rules.DESCRIPTOR.full_name)
out.append(' */')
out.append('function createRules() {')
PrintObject(descriptor, rules, 0, out)
out.append(' return o_0;')
out.append('}')
out.append('')
out.append('/**')
out.append(' * @type {!%s}' % rules.DESCRIPTOR.full_name)
out.append(' */')
out.append('%s = createRules();' % rules_obj)
| apache-2.0 |
popazerty/e2-gui | lib/python/Screens/InputBox.py | 14 | 5004 | from enigma import getPrevAsciiCode
from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Components.ActionMap import NumberActionMap
from Components.Label import Label
from Components.Input import Input
from Components.config import config
from Tools.BoundFunction import boundFunction
from Tools.Notifications import AddPopup
from time import time
class InputBox(Screen):
def __init__(self, session, title = "", windowTitle = _("Input"), useableChars = None, **kwargs):
Screen.__init__(self, session)
self["text"] = Label(title)
self["input"] = Input(**kwargs)
self.onShown.append(boundFunction(self.setTitle, windowTitle))
if useableChars is not None:
self["input"].setUseableChars(useableChars)
self["actions"] = NumberActionMap(["WizardActions", "InputBoxActions", "InputAsciiActions", "KeyboardInputActions"],
{
"gotAsciiCode": self.gotAsciiCode,
"ok": self.go,
"back": self.cancel,
"left": self.keyLeft,
"right": self.keyRight,
"home": self.keyHome,
"end": self.keyEnd,
"deleteForward": self.keyDelete,
"deleteBackward": self.keyBackspace,
"tab": self.keyTab,
"toggleOverwrite": self.keyInsert,
"1": self.keyNumberGlobal,
"2": self.keyNumberGlobal,
"3": self.keyNumberGlobal,
"4": self.keyNumberGlobal,
"5": self.keyNumberGlobal,
"6": self.keyNumberGlobal,
"7": self.keyNumberGlobal,
"8": self.keyNumberGlobal,
"9": self.keyNumberGlobal,
"0": self.keyNumberGlobal
}, -1)
if self["input"].type == Input.TEXT:
if config.misc.remotecontrol_text_support.value:
self.onExecBegin.append(self.setKeyboardModeNone)
else:
self.onExecBegin.append(self.setKeyboardModeAscii)
else:
self.onExecBegin.append(self.setKeyboardModeNone)
def gotAsciiCode(self):
self["input"].handleAscii(getPrevAsciiCode())
def keyLeft(self):
self["input"].left()
def keyRight(self):
self["input"].right()
def keyNumberGlobal(self, number):
self["input"].number(number)
def keyDelete(self):
self["input"].delete()
def go(self):
self.close(self["input"].getText())
def cancel(self):
self.close(None)
def keyHome(self):
self["input"].home()
def keyEnd(self):
self["input"].end()
def keyBackspace(self):
self["input"].deleteBackward()
def keyTab(self):
self["input"].tab()
def keyInsert(self):
self["input"].toggleOverwrite()
class PinInput(InputBox):
def __init__(self, session, service="", triesEntry=None, pinList=None, popup=False, simple=True, *args, **kwargs):
if not pinList: pinList = []
InputBox.__init__(self, session = session, text = " ", maxSize = True, type = Input.PIN, *args, **kwargs)
self.waitTime = 15
self.triesEntry = triesEntry
self.pinList = pinList
self["service"] = Label(service)
if service and simple:
self.skinName = "PinInputPopup"
if self.getTries() == 0:
if (self.triesEntry.time.value + (self.waitTime * 60)) > time():
remaining = (self.triesEntry.time.value + (self.waitTime * 60)) - time()
remainingMinutes = int(remaining / 60)
remainingSeconds = int(remaining % 60)
messageText = _("You have to wait %s!") % (str(remainingMinutes) + " " + _("minutes") + ", " + str(remainingSeconds) + " " + _("seconds"))
if service and simple:
AddPopup(messageText, type = MessageBox.TYPE_ERROR, timeout = 3)
self.closePinCancel()
else:
self.onFirstExecBegin.append(boundFunction(self.session.openWithCallback, self.closePinCancel, MessageBox, messageText, MessageBox.TYPE_ERROR, timeout = 3))
else:
self.setTries(3)
self["tries"] = Label("")
self.onShown.append(self.showTries)
def gotAsciiCode(self):
if self["input"].currPos == len(self["input"]) - 1:
InputBox.gotAsciiCode(self)
self.go()
else:
InputBox.gotAsciiCode(self)
def keyNumberGlobal(self, number):
if self["input"].currPos == len(self["input"]) - 1:
InputBox.keyNumberGlobal(self, number)
self.go()
else:
InputBox.keyNumberGlobal(self, number)
def checkPin(self, pin):
if pin is not None and " " not in pin and int(pin) in self.pinList:
return True
return False
def go(self):
self.triesEntry.time.value = int(time())
self.triesEntry.time.save()
if self.checkPin(self["input"].getText()):
self.setTries(3)
self.closePinCorrect()
else:
self.keyHome()
self.decTries()
if self.getTries() == 0:
self.closePinWrong()
else:
pass
def closePinWrong(self, *args):
print "args:", args
self.close(False)
def closePinCorrect(self, *args):
self.setTries(3)
self.close(True)
def closePinCancel(self, *args):
self.close(None)
def cancel(self):
self.closePinCancel()
def getTries(self):
return self.triesEntry.tries.value
def decTries(self):
self.setTries(self.triesEntry.tries.value - 1)
self.showTries()
def setTries(self, tries):
self.triesEntry.tries.value = tries
self.triesEntry.tries.save()
def showTries(self):
self["tries"].setText(_("Tries left:") + " " + str(self.getTries()))
| gpl-2.0 |
DepthDeluxe/ansible | lib/ansible/utils/display.py | 45 | 11885 | # (c) 2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import errno
import fcntl
import getpass
import locale
import logging
import os
import random
import subprocess
import sys
import textwrap
import time
from struct import unpack, pack
from termios import TIOCGWINSZ
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_bytes, to_text
from ansible.utils.color import stringc
try:
# Python 2
input = raw_input
except NameError:
# Python 3, we already have raw_input
pass
logger = None
# TODO: make this a logging callback instead
if C.DEFAULT_LOG_PATH:
path = C.DEFAULT_LOG_PATH
if (os.path.exists(path) and os.access(path, os.W_OK)) or os.access(os.path.dirname(path), os.W_OK):
logging.basicConfig(filename=path, level=logging.DEBUG, format='%(asctime)s %(name)s %(message)s')
mypid = str(os.getpid())
user = getpass.getuser()
logger = logging.getLogger("p=%s u=%s | " % (mypid, user))
else:
print("[WARNING]: log file at %s is not writeable and we cannot create it, aborting\n" % path, file=sys.stderr)
b_COW_PATHS = (
b"/usr/bin/cowsay",
b"/usr/games/cowsay",
b"/usr/local/bin/cowsay", # BSD path for cowsay
b"/opt/local/bin/cowsay", # MacPorts path for cowsay
)
class Display:
def __init__(self, verbosity=0):
self.columns = None
self.verbosity = verbosity
# list of all deprecation messages to prevent duplicate display
self._deprecations = {}
self._warns = {}
self._errors = {}
self.b_cowsay = None
self.noncow = C.ANSIBLE_COW_SELECTION
self.set_cowsay_info()
if self.b_cowsay:
try:
cmd = subprocess.Popen([self.b_cowsay, "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
self.cows_available = set([to_text(c) for c in out.split()])
if C.ANSIBLE_COW_WHITELIST:
self.cows_available = set(C.ANSIBLE_COW_WHITELIST).intersection(self.cows_available)
except:
# could not execute cowsay for some reason
self.b_cowsay = False
self._set_column_width()
def set_cowsay_info(self):
if not C.ANSIBLE_NOCOWS:
for b_cow_path in b_COW_PATHS:
if os.path.exists(b_cow_path):
self.b_cowsay = b_cow_path
def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False):
""" Display a message to the user
Note: msg *must* be a unicode string to prevent UnicodeError tracebacks.
"""
nocolor = msg
if color:
msg = stringc(msg, color)
if not log_only:
if not msg.endswith(u'\n'):
msg2 = msg + u'\n'
else:
msg2 = msg
msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr))
if sys.version_info >= (3,):
# Convert back to text string on python3
# We first convert to a byte string so that we get rid of
# characters that are invalid in the user's locale
msg2 = to_text(msg2, self._output_encoding(stderr=stderr), errors='replace')
if not stderr:
fileobj = sys.stdout
else:
fileobj = sys.stderr
fileobj.write(msg2)
try:
fileobj.flush()
except IOError as e:
# Ignore EPIPE in case fileobj has been prematurely closed, eg.
# when piping to "head -n1"
if e.errno != errno.EPIPE:
raise
if logger and not screen_only:
msg2 = nocolor.lstrip(u'\n')
msg2 = to_bytes(msg2)
if sys.version_info >= (3,):
# Convert back to text string on python3
# We first convert to a byte string so that we get rid of
# characters that are invalid in the user's locale
msg2 = to_text(msg2, self._output_encoding(stderr=stderr))
if color == C.COLOR_ERROR:
logger.error(msg2)
else:
logger.info(msg2)
def v(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=0)
def vv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=1)
def vvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=2)
def vvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=3)
def vvvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=4)
def vvvvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=5)
def debug(self, msg):
if C.DEFAULT_DEBUG:
self.display("%6d %0.5f: %s" % (os.getpid(), time.time(), msg), color=C.COLOR_DEBUG)
def verbose(self, msg, host=None, caplevel=2):
if self.verbosity > caplevel:
if host is None:
self.display(msg, color=C.COLOR_VERBOSE)
else:
self.display("<%s> %s" % (host, msg), color=C.COLOR_VERBOSE, screen_only=True)
def deprecated(self, msg, version=None, removed=False):
''' used to print out a deprecation message.'''
if not removed and not C.DEPRECATION_WARNINGS:
return
if not removed:
if version:
new_msg = "[DEPRECATION WARNING]: %s.\nThis feature will be removed in version %s." % (msg, version)
else:
new_msg = "[DEPRECATION WARNING]: %s.\nThis feature will be removed in a future release." % (msg)
new_msg = new_msg + " Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.\n\n"
else:
raise AnsibleError("[DEPRECATED]: %s.\nPlease update your playbooks." % msg)
wrapped = textwrap.wrap(new_msg, self.columns, replace_whitespace=False, drop_whitespace=False)
new_msg = "\n".join(wrapped) + "\n"
if new_msg not in self._deprecations:
self.display(new_msg.strip(), color=C.COLOR_DEPRECATE, stderr=True)
self._deprecations[new_msg] = 1
def warning(self, msg, formatted=False):
if not formatted:
new_msg = "\n[WARNING]: %s" % msg
wrapped = textwrap.wrap(new_msg, self.columns)
new_msg = "\n".join(wrapped) + "\n"
else:
new_msg = "\n[WARNING]: \n%s" % msg
if new_msg not in self._warns:
self.display(new_msg, color=C.COLOR_WARN, stderr=True)
self._warns[new_msg] = 1
def system_warning(self, msg):
if C.SYSTEM_WARNINGS:
self.warning(msg)
def banner(self, msg, color=None, cows=True):
'''
Prints a header-looking line with cowsay or stars wit hlength depending on terminal width (3 minimum)
'''
if self.b_cowsay and cows:
try:
self.banner_cowsay(msg)
return
except OSError:
self.warning("somebody cleverly deleted cowsay or something during the PB run. heh.")
msg = msg.strip()
star_len = self.columns - len(msg)
if star_len <= 3:
star_len = 3
stars = u"*" * star_len
self.display(u"\n%s %s" % (msg, stars), color=color)
def banner_cowsay(self, msg, color=None):
if u": [" in msg:
msg = msg.replace(u"[", u"")
if msg.endswith(u"]"):
msg = msg[:-1]
runcmd = [self.b_cowsay, b"-W", b"60"]
if self.noncow:
thecow = self.noncow
if thecow == 'random':
thecow = random.choice(list(self.cows_available))
runcmd.append(b'-f')
runcmd.append(to_bytes(thecow))
runcmd.append(to_bytes(msg))
cmd = subprocess.Popen(runcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
self.display(u"%s\n" % to_text(out), color=color)
def error(self, msg, wrap_text=True):
if wrap_text:
new_msg = u"\n[ERROR]: %s" % msg
wrapped = textwrap.wrap(new_msg, self.columns)
new_msg = u"\n".join(wrapped) + u"\n"
else:
new_msg = u"ERROR! %s" % msg
if new_msg not in self._errors:
self.display(new_msg, color=C.COLOR_ERROR, stderr=True)
self._errors[new_msg] = 1
@staticmethod
def prompt(msg, private=False):
prompt_string = to_bytes(msg, encoding=Display._output_encoding())
if sys.version_info >= (3,):
# Convert back into text on python3. We do this double conversion
# to get rid of characters that are illegal in the user's locale
prompt_string = to_text(prompt_string)
if private:
return getpass.getpass(msg)
else:
return input(prompt_string)
def do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
result = None
if sys.__stdin__.isatty():
do_prompt = self.prompt
if prompt and default is not None:
msg = "%s [%s]: " % (prompt, default)
elif prompt:
msg = "%s: " % prompt
else:
msg = 'input for %s: ' % varname
if confirm:
while True:
result = do_prompt(msg, private)
second = do_prompt("confirm " + msg, private)
if result == second:
break
self.display("***** VALUES ENTERED DO NOT MATCH ****")
else:
result = do_prompt(msg, private)
else:
result = None
self.warning("Not prompting as we are not in interactive mode")
# if result is false and default is not None
if not result and default is not None:
result = default
if encrypt:
# Circular import because encrypt needs a display class
from ansible.utils.encrypt import do_encrypt
result = do_encrypt(result, encrypt, salt_size, salt)
# handle utf-8 chars
result = to_text(result, errors='surrogate_or_strict')
return result
@staticmethod
def _output_encoding(stderr=False):
encoding = locale.getpreferredencoding()
# https://bugs.python.org/issue6202
# Python2 hardcodes an obsolete value on Mac. Use MacOSX defaults
# instead.
if encoding in ('mac-roman',):
encoding = 'utf-8'
return encoding
def _set_column_width(self):
if os.isatty(0):
tty_size = unpack('HHHH', fcntl.ioctl(0, TIOCGWINSZ, pack('HHHH', 0, 0, 0, 0)))[1]
else:
tty_size = 0
self.columns = max(79, tty_size - 1)
| gpl-3.0 |
wakermahmud/sync-engine | inbox/models/secret.py | 6 | 1400 | from sqlalchemy import Column, Enum, Integer
from sqlalchemy.types import BLOB
from sqlalchemy.orm import validates
from inbox.models.base import MailSyncBase
from inbox.security.oracles import get_encryption_oracle, get_decryption_oracle
class Secret(MailSyncBase):
"""Simple local secrets table."""
_secret = Column(BLOB, nullable=False)
# Type of secret
type = Column(Enum('password', 'token'), nullable=False)
# Scheme used
encryption_scheme = Column(Integer, server_default='0', nullable=False)
@property
def secret(self):
with get_decryption_oracle('SECRET_ENCRYPTION_KEY') as d_oracle:
return d_oracle.decrypt(
self._secret,
encryption_scheme=self.encryption_scheme)
@secret.setter
def secret(self, plaintext):
"""
The secret must be a byte sequence.
The type must be specified as 'password'/'token'.
"""
if not isinstance(plaintext, bytes):
raise TypeError('Invalid secret')
with get_encryption_oracle('SECRET_ENCRYPTION_KEY') as e_oracle:
self._secret, self.encryption_scheme = e_oracle.encrypt(plaintext)
@validates('type')
def validate_type(self, k, type):
if type != 'password' and type != 'token':
raise TypeError('Invalid secret type: must be password or token')
return type
| agpl-3.0 |
z1gm4/desarrollo_web_udp | env/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/util/response.py | 199 | 2167 | from __future__ import absolute_import
from ..packages.six.moves import http_client as httplib
from ..exceptions import HeaderParsingError
def is_fp_closed(obj):
"""
Checks whether a given file-like object is closed.
:param obj:
The file-like object to check.
"""
try:
# Check via the official file-like-object way.
return obj.closed
except AttributeError:
pass
try:
# Check if the object is a container for another file-like object that
# gets released on exhaustion (e.g. HTTPResponse).
return obj.fp is None
except AttributeError:
pass
raise ValueError("Unable to determine whether fp is closed.")
def assert_header_parsing(headers):
"""
Asserts whether all headers have been successfully parsed.
Extracts encountered errors from the result of parsing headers.
Only works on Python 3.
:param headers: Headers to verify.
:type headers: `httplib.HTTPMessage`.
:raises urllib3.exceptions.HeaderParsingError:
If parsing errors are found.
"""
# This will fail silently if we pass in the wrong kind of parameter.
# To make debugging easier add an explicit check.
if not isinstance(headers, httplib.HTTPMessage):
raise TypeError('expected httplib.Message, got {0}.'.format(
type(headers)))
defects = getattr(headers, 'defects', None)
get_payload = getattr(headers, 'get_payload', None)
unparsed_data = None
if get_payload: # Platform-specific: Python 3.
unparsed_data = get_payload()
if defects or unparsed_data:
raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
def is_response_to_head(response):
"""
Checks, wether a the request of a response has been a HEAD-request.
Handles the quirks of AppEngine.
:param conn:
:type conn: :class:`httplib.HTTPResponse`
"""
# FIXME: Can we do this somehow without accessing private httplib _method?
method = response._method
if isinstance(method, int): # Platform-specific: Appengine
return method == 3
return method.upper() == 'HEAD'
| gpl-3.0 |
codeworldprodigy/lab4 | lib/jinja2/defaults.py | 659 | 1068 | # -*- coding: utf-8 -*-
"""
jinja2.defaults
~~~~~~~~~~~~~~~
Jinja default filters and tags.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2._compat import range_type
from jinja2.utils import generate_lorem_ipsum, Cycler, Joiner
# defaults for the parser / lexer
BLOCK_START_STRING = '{%'
BLOCK_END_STRING = '%}'
VARIABLE_START_STRING = '{{'
VARIABLE_END_STRING = '}}'
COMMENT_START_STRING = '{#'
COMMENT_END_STRING = '#}'
LINE_STATEMENT_PREFIX = None
LINE_COMMENT_PREFIX = None
TRIM_BLOCKS = False
LSTRIP_BLOCKS = False
NEWLINE_SEQUENCE = '\n'
KEEP_TRAILING_NEWLINE = False
# default filters, tests and namespace
from jinja2.filters import FILTERS as DEFAULT_FILTERS
from jinja2.tests import TESTS as DEFAULT_TESTS
DEFAULT_NAMESPACE = {
'range': range_type,
'dict': lambda **kw: kw,
'lipsum': generate_lorem_ipsum,
'cycler': Cycler,
'joiner': Joiner
}
# export all constants
__all__ = tuple(x for x in locals().keys() if x.isupper())
| apache-2.0 |
CallaJun/hackprince | indico/networkx/algorithms/components/tests/test_strongly_connected.py | 11 | 5264 | #!/usr/bin/env python
from nose.tools import *
import networkx as nx
from networkx import NetworkXNotImplemented
class TestStronglyConnected:
def setUp(self):
self.gc=[]
G=nx.DiGraph()
G.add_edges_from([(1,2),(2,3),(2,8),(3,4),(3,7),
(4,5),(5,3),(5,6),(7,4),(7,6),(8,1),(8,7)])
C=[[3, 4, 5, 7], [1, 2, 8], [6]]
self.gc.append((G,C))
G= nx.DiGraph()
G.add_edges_from([(1,2),(1,3),(1,4),(4,2),(3,4),(2,3)])
C = [[2, 3, 4],[1]]
self.gc.append((G,C))
G = nx.DiGraph()
G.add_edges_from([(1,2),(2,3),(3,2),(2,1)])
C = [[1, 2, 3]]
self.gc.append((G,C))
# Eppstein's tests
G = nx.DiGraph({ 0:[1],1:[2,3],2:[4,5],3:[4,5],4:[6],5:[],6:[]})
C = [[0],[1],[2],[3],[4],[5],[6]]
self.gc.append((G,C))
G = nx.DiGraph({0:[1],1:[2,3,4],2:[0,3],3:[4],4:[3]})
C = [[0,1,2],[3,4]]
self.gc.append((G,C))
def test_tarjan(self):
scc=nx.strongly_connected_components
for G,C in self.gc:
assert_equal(sorted([sorted(g) for g in scc(G)]),sorted(C))
def test_tarjan_recursive(self):
scc=nx.strongly_connected_components_recursive
for G,C in self.gc:
assert_equal(sorted([sorted(g) for g in scc(G)]),sorted(C))
def test_kosaraju(self):
scc=nx.kosaraju_strongly_connected_components
for G,C in self.gc:
assert_equal(sorted([sorted(g) for g in scc(G)]),sorted(C))
def test_number_strongly_connected_components(self):
ncc=nx.number_strongly_connected_components
for G,C in self.gc:
assert_equal(ncc(G),len(C))
def test_is_strongly_connected(self):
for G,C in self.gc:
if len(C)==1:
assert_true(nx.is_strongly_connected(G))
else:
assert_false(nx.is_strongly_connected(G))
def test_strongly_connected_component_subgraphs(self):
scc=nx.strongly_connected_component_subgraphs
for G,C in self.gc:
assert_equal(sorted([sorted(g.nodes()) for g in scc(G)]),sorted(C))
G,C=self.gc[0]
G.add_edge(1,2,eattr='red')
G.node[1]['nattr']='blue'
G.graph['gattr']='green'
sgs=sorted(list(scc(G)), key=len, reverse=False)[1]
assert_equal(sgs[1][2]['eattr'],'red')
assert_equal(sgs.node[1]['nattr'],'blue')
assert_equal(sgs.graph['gattr'],'green')
sgs[1][2]['eattr']='blue'
assert_equal(G[1][2]['eattr'],'red')
assert_equal(sgs[1][2]['eattr'],'blue')
def test_contract_scc1(self):
G = nx.DiGraph()
G.add_edges_from([(1,2),(2,3),(2,11),(2,12),(3,4),(4,3),(4,5),
(5,6),(6,5),(6,7),(7,8),(7,9),(7,10),(8,9),
(9,7),(10,6),(11,2),(11,4),(11,6),(12,6),(12,11)])
scc = list(nx.strongly_connected_components(G))
cG = nx.condensation(G, scc)
# DAG
assert_true(nx.is_directed_acyclic_graph(cG))
# # nodes
assert_equal(sorted(cG.nodes()),[0,1,2,3])
# # edges
mapping={}
for i,component in enumerate(scc):
for n in component:
mapping[n] = i
edge=(mapping[2],mapping[3])
assert_true(cG.has_edge(*edge))
edge=(mapping[2],mapping[5])
assert_true(cG.has_edge(*edge))
edge=(mapping[3],mapping[5])
assert_true(cG.has_edge(*edge))
def test_contract_scc_isolate(self):
# Bug found and fixed in [1687].
G = nx.DiGraph()
G.add_edge(1,2)
G.add_edge(2,1)
scc = list(nx.strongly_connected_components(G))
cG = nx.condensation(G, scc)
assert_equal(cG.nodes(),[0])
assert_equal(cG.edges(),[])
def test_contract_scc_edge(self):
G = nx.DiGraph()
G.add_edge(1,2)
G.add_edge(2,1)
G.add_edge(2,3)
G.add_edge(3,4)
G.add_edge(4,3)
scc = list(nx.strongly_connected_components(G))
cG = nx.condensation(G, scc)
assert_equal(cG.nodes(),[0,1])
if 1 in scc[0]:
edge = (0,1)
else:
edge = (1,0)
assert_equal(cG.edges(),[edge])
def test_condensation_mapping_and_members(self):
G, C = self.gc[1]
cG = nx.condensation(G)
mapping = cG.graph['mapping']
assert_true(all(n in G for n in mapping))
assert_true(all(0 == cN for n, cN in mapping.items() if n in C[0]))
assert_true(all(1 == cN for n, cN in mapping.items() if n in C[1]))
for n, d in cG.nodes(data=True):
assert_equal(C[n], cG.node[n]['members'])
def test_connected_raise(self):
G=nx.Graph()
assert_raises(NetworkXNotImplemented,nx.strongly_connected_components,G)
assert_raises(NetworkXNotImplemented,nx.kosaraju_strongly_connected_components,G)
assert_raises(NetworkXNotImplemented,nx.strongly_connected_components_recursive,G)
assert_raises(NetworkXNotImplemented,nx.strongly_connected_component_subgraphs,G)
assert_raises(NetworkXNotImplemented,nx.is_strongly_connected,G)
assert_raises(NetworkXNotImplemented,nx.condensation,G)
| lgpl-3.0 |
Ross-cz/shaka-player | third_party/gjslint/closure_linter-2.3.13/closure_linter/not_strict_test.py | 129 | 2318 | #!/usr/bin/env python
#
# Copyright 2011 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gjslint --nostrict.
Tests errors that can be thrown by gjslint when not in strict mode.
"""
import os
import sys
import unittest
import gflags as flags
import unittest as googletest
from closure_linter import errors
from closure_linter import runner
from closure_linter.common import filetestcase
_RESOURCE_PREFIX = 'closure_linter/testdata'
flags.FLAGS.strict = False
flags.FLAGS.custom_jsdoc_tags = ('customtag', 'requires')
flags.FLAGS.closurized_namespaces = ('goog', 'dummy')
flags.FLAGS.limited_doc_files = ('externs.js', 'dummy.js',
'limited_doc_checks.js')
# List of files under testdata to test.
# We need to list files explicitly since pyglib can't list directories.
_TEST_FILES = [
'not_strict.js'
]
class GJsLintTestSuite(unittest.TestSuite):
"""Test suite to run a GJsLintTest for each of several files.
If sys.argv[1:] is non-empty, it is interpreted as a list of filenames in
testdata to test. Otherwise, _TEST_FILES is used.
"""
def __init__(self, tests=()):
unittest.TestSuite.__init__(self, tests)
argv = sys.argv and sys.argv[1:] or []
if argv:
test_files = argv
else:
test_files = _TEST_FILES
for test_file in test_files:
resource_path = os.path.join(_RESOURCE_PREFIX, test_file)
self.addTest(filetestcase.AnnotatedFileTestCase(resource_path,
runner.Run,
errors.ByName))
if __name__ == '__main__':
# Don't let main parse args; it happens in the TestSuite.
googletest.main(argv=sys.argv[0:1], defaultTest='GJsLintTestSuite')
| apache-2.0 |
sajeeshcs/nested_quota_final | nova/cells/rpc_driver.py | 63 | 7123 | # Copyright (c) 2012 Rackspace Hosting
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Cells RPC Communication Driver
"""
from oslo_config import cfg
import oslo_messaging as messaging
from nova.cells import driver
from nova import rpc
cell_rpc_driver_opts = [
cfg.StrOpt('rpc_driver_queue_base',
default='cells.intercell',
help="Base queue name to use when communicating between "
"cells. Various topics by message type will be "
"appended to this.")]
CONF = cfg.CONF
CONF.register_opts(cell_rpc_driver_opts, group='cells')
CONF.import_opt('call_timeout', 'nova.cells.opts', group='cells')
rpcapi_cap_opt = cfg.StrOpt('intercell',
help='Set a version cap for messages sent between cells services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
class CellsRPCDriver(driver.BaseCellsDriver):
"""Driver for cell<->cell communication via RPC. This is used to
setup the RPC consumers as well as to send a message to another cell.
One instance of this class will be created for every neighbor cell
that we find in the DB and it will be associated with the cell in
its CellState.
One instance is also created by the cells manager for setting up
the consumers.
"""
def __init__(self, *args, **kwargs):
super(CellsRPCDriver, self).__init__(*args, **kwargs)
self.rpc_servers = []
self.intercell_rpcapi = InterCellRPCAPI()
def start_servers(self, msg_runner):
"""Start RPC servers.
Start up 2 separate servers for handling inter-cell
communication via RPC. Both handle the same types of
messages, but requests/replies are separated to solve
potential deadlocks. (If we used the same queue for both,
it's possible to exhaust the RPC thread pool while we wait
for replies.. such that we'd never consume a reply.)
"""
topic_base = CONF.cells.rpc_driver_queue_base
proxy_manager = InterCellRPCDispatcher(msg_runner)
for msg_type in msg_runner.get_message_types():
target = messaging.Target(topic='%s.%s' % (topic_base, msg_type),
server=CONF.host)
# NOTE(comstud): We do not need to use the object serializer
# on this because object serialization is taken care for us in
# the nova.cells.messaging module.
server = rpc.get_server(target, endpoints=[proxy_manager])
server.start()
self.rpc_servers.append(server)
def stop_servers(self):
"""Stop RPC servers.
NOTE: Currently there's no hooks when stopping services
to have managers cleanup, so this is not currently called.
"""
for server in self.rpc_servers:
server.stop()
def send_message_to_cell(self, cell_state, message):
"""Use the IntercellRPCAPI to send a message to a cell."""
self.intercell_rpcapi.send_message_to_cell(cell_state, message)
class InterCellRPCAPI(object):
"""Client side of the Cell<->Cell RPC API.
The CellsRPCDriver uses this to make calls to another cell.
API version history:
1.0 - Initial version.
... Grizzly supports message version 1.0. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 1.0.
"""
VERSION_ALIASES = {
'grizzly': '1.0',
}
def __init__(self):
super(InterCellRPCAPI, self).__init__()
self.version_cap = (
self.VERSION_ALIASES.get(CONF.upgrade_levels.intercell,
CONF.upgrade_levels.intercell))
self.transports = {}
def _get_client(self, next_hop, topic):
"""Turn the DB information for a cell into a messaging.RPCClient."""
transport = self._get_transport(next_hop)
target = messaging.Target(topic=topic, version='1.0')
serializer = rpc.RequestContextSerializer(None)
return messaging.RPCClient(transport,
target,
version_cap=self.version_cap,
serializer=serializer)
def _get_transport(self, next_hop):
"""NOTE(belliott) Each Transport object contains connection pool
state. Maintain references to them to avoid continual reconnects
to the message broker.
"""
transport_url = next_hop.db_info['transport_url']
if transport_url not in self.transports:
transport = messaging.get_transport(cfg.CONF, transport_url,
rpc.TRANSPORT_ALIASES)
self.transports[transport_url] = transport
else:
transport = self.transports[transport_url]
return transport
def send_message_to_cell(self, cell_state, message):
"""Send a message to another cell by JSON-ifying the message and
making an RPC cast to 'process_message'. If the message says to
fanout, do it. The topic that is used will be
'CONF.rpc_driver_queue_base.<message_type>'.
"""
topic_base = CONF.cells.rpc_driver_queue_base
topic = '%s.%s' % (topic_base, message.message_type)
cctxt = self._get_client(cell_state, topic)
if message.fanout:
cctxt = cctxt.prepare(fanout=message.fanout)
return cctxt.cast(message.ctxt, 'process_message',
message=message.to_json())
class InterCellRPCDispatcher(object):
"""RPC Dispatcher to handle messages received from other cells.
All messages received here have come from a sibling cell. Depending
on the ultimate target and type of message, we may process the message
in this cell, relay the message to another sibling cell, or both. This
logic is defined by the message class in the nova.cells.messaging module.
"""
target = messaging.Target(version='1.0')
def __init__(self, msg_runner):
"""Init the Intercell RPC Dispatcher."""
self.msg_runner = msg_runner
def process_message(self, _ctxt, message):
"""We received a message from another cell. Use the MessageRunner
to turn this from JSON back into an instance of the correct
Message class. Then process it!
"""
message = self.msg_runner.message_from_json(message)
message.process()
| apache-2.0 |
12AngryMen/votca-scripts | xtp/xtp_sendjobstocluster.py | 2 | 5551 | #!/usr/bin/env python
from __tools__ import MyParser
from __tools__ import XmlParser
from __tools__ import XmlWriter
from __tools__ import make_sure_path_exists
from __tools__ import addsuffixtofile
from __cluster__ import write_cluster_batch
from __xtpJobfile__ import splittjobfile
from __xtpJobfile__ import mergejobfiles
from __xtpJobfile__ import infojobfile
from __xtpJobfile__ import resetjobfile
import lxml.etree as lxml
import subprocess as sp
import os
import sys
from __tools__ import cd
parser=MyParser(description="Environment to split a jobfile into many and submit to cluster")
parser.add_argument("--options","-o",type=str,required=True,help="optionfile")
parser.add_argument("--submit",action='store_const', const=1, default=0,help="Submit to cluster")
parser.add_argument("--setup",action='store_const', const=1, default=0,help="Setup")
parser.add_argument("--merge",action='store_const', const=1, default=0,help="Merge jobfiles")
parser.add_argument("--info",action='store_const', const=1, default=0,help="Display info about each jobfile")
parser.add_argument("--reset",type=str, nargs="+",default=False,help="Reset FAILED and or ASSIGNED to AVAILABLE")
parser.add_argument("--exclude",type=int, nargs="+", default=False,help="Exclude certain jobs from action,give the numbers of the jobs")
parser.add_argument("--include",type=int, nargs="+", default=False,help="Limit action to only the jobs, give the numbers of the jobs")
args=parser.parse_args()
if args.exclude!=False and args.include!=False:
print "ERROR: Excluding and Including at the same time does not work. Choose different options!"
sys.exit()
root=XmlParser(args.options)
queue=root.find("queue").text
procs=int(root.find("procs").text)
tag=root.find("tag").text
jobfile=root.find("jobfile").text
calculator=root.find("calculator").text
optionfile=root.find("optfile").text
sql=root.find("sqlfile").text
threads=int(root.find("threads").text)
cache=int(root.find("cache").text)
rsync=(root.find("rsync").text)
numberofjobs=int(root.find("clusterjobs").text)
workdir=root.find("workdir").text
modules=root.find("modules").text
source=root.find("source").text
if(root.find("xml_optionstring")!=None):
optionstring=root.find("xml_optionstring").text
else:
optionstring=calculator
modules=modules.split()
source=source.split()
if len(source)==0:
source=None
if len(modules)==0:
modules=None
if rsync=="true" or rsync=="1" or rsync=="True":
rsync=True
elif rsync=="false" or rsync=="0" or rsync=="False":
rsync=False
options=XmlParser(optionfile,entry=optionstring)
jobfiles=[]
optionfiles=[]
submitfiles=[]
logfiles=[]
tags=[]
currentdir=os.getcwd()
workdir=os.path.join(currentdir,workdir)
rangejobs=range(numberofjobs)
if args.include!=False:
rangejobs=args.include
print "Only working on jobs {}".format(" ".join(map(str,rangejobs)))
if args.exclude!=False:
temp=[]
for i in rangejobs:
if i in args.exclude:
print "Skipping job {}".format(i)
continue
else:
temp.append(i)
rangejobs=temp
for i in rangejobs:
jobfiles.append(os.path.join(workdir,addsuffixtofile(jobfile,i)))
optionfiles.append(os.path.join(workdir,addsuffixtofile(optionfile,i)))
submitfiles.append(os.path.join(workdir,"xtp_batch_{}.sh".format(i)))
logfiles.append(os.path.join(workdir,"log_batch_{}.txt".format(i)))
tags.append("{}_{}".format(tag,i))
if args.setup:
print "Setting up directory {}".format(workdir)
make_sure_path_exists(workdir)
splittjobfile(jobfile,jobfiles)
for i,optfile,jfile,subfile,logfile,tag in zip(rangejobs,optionfiles,jobfiles,submitfiles,logfiles,tags):
root=lxml.Element("options")
if options.find("job_file")!=None:
options.find("job_file").text=jfile
elif options.find("jobcontrol")!=None:
jobcontrol=options.find("jobcontrol");
jobcontrol.find("job_file").text=jfile
elif options.find("control")!=None:
control=options.find("control");
control.find("job_file").text=jfile
else:
print "Could not find a jobfile in xml options file. Exiting..."
sys.exit()
root.append(options)
XmlWriter(root,optfile)
if rsync!=False:
execdir=None
else:
execdir=currentdir
command="xtp_parallel -e {} -o {} -f {} -s 0 -t {} -c {} > {}".format(calculator,optfile,sql,threads,cache,logfile)
write_cluster_batch(command,tag,outfile=subfile,outlog="{}.log".format(i),errlog="{}.err".format(i),queue=queue,procs=procs,module=modules,source=source,execdir=execdir,rsync=rsync)
if args.submit:
for submitfile in submitfiles:
with cd(currentdir):
sp.call("qsub {}".format(os.path.join(workdir,submitfile)),shell=True)
if args.merge:
"Merging files into {}".format(jobfile)
mergejobfiles(jobfiles,jobfile)
if args.info:
total=0
complete=0
available=0
assigned=0
failed=0
print "{:^18}|{:^12}|{:^12}|{:^12}|{:^12}|{:^12}".format("Jobfile","TOTAL","COMPLETE","AVAILABLE","ASSIGNED","FAILED")
print '-' * 83
for jobfile in jobfiles:
t,c,ava,ass,f=infojobfile(jobfile)
total+=t
complete+=c
available+=ava
assigned+=ass
failed+=f
print "{:^18}|{:^12}|{:^12}|{:^12}|{:^12}|{:^12}".format(os.path.basename(jobfile),t,c,ava,ass,f)
print '-' * 83
print "{:^18}|{:^12}|{:^12}|{:^12}|{:^12}|{:^12}".format("SUM",total,complete,available,assigned,failed)
if args.reset!=False:
failed=False
assigned=False
complete=False
if "FAILED" in args.reset:
failed=True
if "ASSIGNED" in args.reset:
assigned=True
if "COMPLETE" in args.reset:
complete=True
for jobfile in jobfiles:
resetjobfile(jobfile,failed=failed,assigned=assigned,complete=complete)
| apache-2.0 |
alexanderturner/ansible | lib/ansible/playbook/conditional.py | 12 | 9210 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import re
from jinja2.compiler import generate
from jinja2.exceptions import UndefinedError
from ansible.compat.six import text_type
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.playbook.attribute import FieldAttribute
from ansible.template import Templar
from ansible.module_utils._text import to_native
DEFINED_REGEX = re.compile(r'(hostvars\[.+\]|[\w_]+)\s+(not\s+is|is|is\s+not)\s+(defined|undefined)')
LOOKUP_REGEX = re.compile(r'lookup\s*\(')
VALID_VAR_REGEX = re.compile("^[_A-Za-z][_a-zA-Z0-9]*$")
class Conditional:
'''
This is a mix-in class, to be used with Base to allow the object
to be run conditionally when a condition is met or skipped.
'''
_when = FieldAttribute(isa='list', default=[])
def __init__(self, loader=None):
# when used directly, this class needs a loader, but we want to
# make sure we don't trample on the existing one if this class
# is used as a mix-in with a playbook base class
if not hasattr(self, '_loader'):
if loader is None:
raise AnsibleError("a loader must be specified when using Conditional() directly")
else:
self._loader = loader
super(Conditional, self).__init__()
def _validate_when(self, attr, name, value):
if not isinstance(value, list):
setattr(self, name, [ value ])
def _get_attr_when(self):
'''
Override for the 'tags' getattr fetcher, used from Base.
'''
when = self._attributes['when']
if when is None:
when = []
if hasattr(self, '_get_parent_attribute'):
when = self._get_parent_attribute('when', extend=True, prepend=True)
return when
def extract_defined_undefined(self, conditional):
results = []
cond = conditional
m = DEFINED_REGEX.search(cond)
while m:
results.append(m.groups())
cond = cond[m.end():]
m = DEFINED_REGEX.search(cond)
return results
def evaluate_conditional(self, templar, all_vars):
'''
Loops through the conditionals set on this object, returning
False if any of them evaluate as such.
'''
# since this is a mix-in, it may not have an underlying datastructure
# associated with it, so we pull it out now in case we need it for
# error reporting below
ds = None
if hasattr(self, '_ds'):
ds = getattr(self, '_ds')
try:
# this allows for direct boolean assignments to conditionals "when: False"
if isinstance(self.when, bool):
return self.when
for conditional in self.when:
if not self._check_conditional(conditional, templar, all_vars):
return False
except Exception as e:
raise AnsibleError("The conditional check '%s' failed. The error was: %s" % (to_native(conditional), to_native(e)), obj=ds)
return True
def _check_conditional(self, conditional, templar, all_vars):
'''
This method does the low-level evaluation of each conditional
set on this object, using jinja2 to wrap the conditionals for
evaluation.
'''
original = conditional
if conditional is None or conditional == '':
return True
# pull the "bare" var out, which allows for nested conditionals
# and things like:
# - assert:
# that:
# - item
# with_items:
# - 1 == 1
if conditional in all_vars and VALID_VAR_REGEX.match(conditional):
conditional = all_vars[conditional]
# make sure the templar is using the variables specified with this method
templar.set_available_variables(variables=all_vars)
try:
# if the conditional is "unsafe", disable lookups
disable_lookups = hasattr(conditional, '__UNSAFE__')
conditional = templar.template(conditional, disable_lookups=disable_lookups)
if not isinstance(conditional, text_type) or conditional == "":
return conditional
# update the lookups flag, as the string returned above may now be unsafe
# and we don't want future templating calls to do unsafe things
disable_lookups |= hasattr(conditional, '__UNSAFE__')
# now we generated the "presented" string, which is a jinja2 if/else block
# used to evaluate the conditional. First, we do some low-level jinja2 parsing
# involving the AST format of the statement to ensure we don't do anything
# unsafe (using the disable_lookup flag above)
e = templar.environment.overlay()
e.filters.update(templar._get_filters())
e.tests.update(templar._get_tests())
presented = "{%% if %s %%} True {%% else %%} False {%% endif %%}" % conditional
res = e._parse(presented, None, None)
res = generate(res, e, None, None)
parsed = ast.parse(res, mode='exec')
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False):
if isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Str):
# calling things with a dunder is generally bad at this point...
if inside_call and disable_lookups and node.s.startswith("__"):
raise AnsibleError("Invalid access found in the presented conditional: '%s'" % conditional)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(child_node, inside_call=inside_call)
cnv = CleansingNodeVisitor()
cnv.visit(parsed)
# and finally we templated the presented string and look at the resulting string
val = templar.template(presented, disable_lookups=disable_lookups).strip()
if val == "True":
return True
elif val == "False":
return False
else:
raise AnsibleError("unable to evaluate conditional: %s" % original)
except (AnsibleUndefinedVariable, UndefinedError) as e:
# the templating failed, meaning most likely a variable was undefined. If we happened to be
# looking for an undefined variable, return True, otherwise fail
try:
# first we extract the variable name from the error message
var_name = re.compile(r"'(hostvars\[.+\]|[\w_]+)' is undefined").search(str(e)).groups()[0]
# next we extract all defined/undefined tests from the conditional string
def_undef = self.extract_defined_undefined(conditional)
# then we loop through these, comparing the error variable name against
# each def/undef test we found above. If there is a match, we determine
# whether the logic/state mean the variable should exist or not and return
# the corresponding True/False
for (du_var, logic, state) in def_undef:
# when we compare the var names, normalize quotes because something
# like hostvars['foo'] may be tested against hostvars["foo"]
if var_name.replace("'", '"') == du_var.replace("'", '"'):
# the should exist is a xor test between a negation in the logic portion
# against the state (defined or undefined)
should_exist = ('not' in logic) != (state == 'defined')
if should_exist:
return False
else:
return True
# as nothing above matched the failed var name, re-raise here to
# trigger the AnsibleUndefinedVariable exception again below
raise
except Exception as new_e:
raise AnsibleUndefinedVariable("error while evaluating conditional (%s): %s" % (original, e))
| gpl-3.0 |
rjoudrey/volatility | volatility/win32/lsasecrets.py | 44 | 4501 | # Volatility
# Copyright (c) 2008-2013 Volatility Foundation
# Copyright (c) 2008 Brendan Dolan-Gavitt <[email protected]>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
#pylint: disable-msg=C0111
"""
@author: Brendan Dolan-Gavitt
@license: GNU General Public License 2.0
@contact: [email protected]
"""
import struct
import volatility.win32.rawreg as rawreg
import volatility.win32.hive as hive
import volatility.win32.hashdump as hashdump
from Crypto.Hash import MD5
from Crypto.Cipher import ARC4, DES
def get_lsa_key(secaddr, bootkey):
if not bootkey:
return None
root = rawreg.get_root(secaddr)
if not root:
return None
enc_reg_key = rawreg.open_key(root, ["Policy", "PolSecretEncryptionKey"])
if not enc_reg_key:
return None
enc_reg_value = enc_reg_key.ValueList.List.dereference()[0]
if not enc_reg_value:
return None
obf_lsa_key = secaddr.read(enc_reg_value.Data,
enc_reg_value.DataLength)
if not obf_lsa_key:
return None
md5 = MD5.new()
md5.update(bootkey)
for _i in range(1000):
md5.update(obf_lsa_key[60:76])
rc4key = md5.digest()
rc4 = ARC4.new(rc4key)
lsa_key = rc4.decrypt(obf_lsa_key[12:60])
return lsa_key[0x10:0x20]
def decrypt_secret(secret, key):
"""Python implementation of SystemFunction005.
Decrypts a block of data with DES using given key.
Note that key can be longer than 7 bytes."""
decrypted_data = ''
j = 0 # key index
for i in range(0, len(secret), 8):
enc_block = secret[i:i + 8]
block_key = key[j:j + 7]
des_key = hashdump.str_to_key(block_key)
des = DES.new(des_key, DES.MODE_ECB)
decrypted_data += des.decrypt(enc_block)
j += 7
if len(key[j:j + 7]) < 7:
j = len(key[j:j + 7])
(dec_data_len,) = struct.unpack("<L", decrypted_data[:4])
return decrypted_data[8:8 + dec_data_len]
def get_secret_by_name(secaddr, name, lsakey):
root = rawreg.get_root(secaddr)
if not root:
return None
enc_secret_key = rawreg.open_key(root, ["Policy", "Secrets", name, "CurrVal"])
if not enc_secret_key:
return None
enc_secret_value = enc_secret_key.ValueList.List.dereference()[0]
if not enc_secret_value:
return None
enc_secret = secaddr.read(enc_secret_value.Data,
enc_secret_value.DataLength)
if not enc_secret:
return None
return decrypt_secret(enc_secret[0xC:], lsakey)
def get_secrets(sysaddr, secaddr):
root = rawreg.get_root(secaddr)
if not root:
return None
bootkey = hashdump.get_bootkey(sysaddr)
lsakey = get_lsa_key(secaddr, bootkey)
if not bootkey or not lsakey:
return None
secrets_key = rawreg.open_key(root, ["Policy", "Secrets"])
if not secrets_key:
return None
secrets = {}
for key in rawreg.subkeys(secrets_key):
sec_val_key = rawreg.open_key(key, ["CurrVal"])
if not sec_val_key:
continue
enc_secret_value = sec_val_key.ValueList.List.dereference()[0]
if not enc_secret_value:
continue
enc_secret = secaddr.read(enc_secret_value.Data,
enc_secret_value.DataLength)
if not enc_secret:
continue
secret = decrypt_secret(enc_secret[0xC:], lsakey)
secrets[key.Name] = secret
return secrets
def get_memory_secrets(addr_space, config, syshive, sechive):
sysaddr = hive.HiveAddressSpace(addr_space, config, syshive)
secaddr = hive.HiveAddressSpace(addr_space, config, sechive)
return get_secrets(sysaddr, secaddr)
def get_file_secrets(sysfile, secfile):
sysaddr = hive.HiveFileAddressSpace(sysfile)
secaddr = hive.HiveFileAddressSpace(secfile)
return get_secrets(sysaddr, secaddr)
| gpl-2.0 |
rantonmattei/garcon | tests/test_decider.py | 1 | 1045 | from unittest.mock import MagicMock
import boto.swf.layer2 as swf
from boto.swf import layer1
import pytest
from garcon import activity
from garcon import decider
def mock(monkeypatch):
for base in [swf.Decider, swf.WorkflowType, swf.ActivityType, swf.Domain]:
monkeypatch.setattr(base, '__init__', MagicMock(return_value=None))
if base is not swf.Decider:
monkeypatch.setattr(base, 'register', MagicMock())
def test_create_decider(monkeypatch):
"""Create a decider and check the behavior of the registration.
"""
mock(monkeypatch)
from tests.fixtures.flows import example
d = decider.DeciderWorker(example)
assert len(d.activities) == 4
assert d.flow
assert d.domain
monkeypatch.setattr(decider.DeciderWorker, 'register', MagicMock())
d = decider.DeciderWorker(example)
assert d.register.called
monkeypatch.setattr(decider.DeciderWorker, 'register', MagicMock())
dec = decider.DeciderWorker(example, register=False)
assert not dec.register.called
| mit |
jferreir/mbed | workspace_tools/host_tests/mbedrpc.py | 111 | 7047 | # mbedRPC.py - mbed RPC interface for Python
#
##Copyright (c) 2010 ARM Ltd
##
##Permission is hereby granted, free of charge, to any person obtaining a copy
##of this software and associated documentation files (the "Software"), to deal
##in the Software without restriction, including without limitation the rights
##to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
##copies of the Software, and to permit persons to whom the Software is
##furnished to do so, subject to the following conditions:
##
##The above copyright notice and this permission notice shall be included in
##all copies or substantial portions of the Software.
##
##THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
##IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
##FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
##AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
##LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
##OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
##THE SOFTWARE.
#
# Example:
# >from mbedRPC import*
# >mbed = SerialRPC("COM5",9600)
# >myled = DigitalOut(mbed,"myled") <--- Where the text in quotations matches your RPC pin definition's second parameter, in this case it could be RpcDigitalOut myled(LED1,"myled");
# >myled.write(1)
# >
import serial, urllib2, time
# mbed super class
class mbed:
def __init__(self):
print("This will work as a demo but no transport mechanism has been selected")
def rpc(self, name, method, args):
print("Superclass method not overridden")
# Transport mechanisms, derived from mbed
class SerialRPC(mbed):
def __init__(self, port, baud):
self.ser = serial.Serial(port)
self.ser.setBaudrate(baud)
def rpc(self, name, method, args):
# creates the command to be sent serially - /name/method arg1 arg2 arg3 ... argN
str = "/" + name + "/" + method + " " + " ".join(args) + "\n"
# prints the command being executed
print str
# writes the command to serial
self.ser.write(str)
# strips trailing characters from the line just written
ret_val = self.ser.readline().strip()
return ret_val
class HTTPRPC(mbed):
def __init__(self, ip):
self.host = "http://" + ip
def rpc(self, name, method, args):
response = urllib2.urlopen(self.host + "/rpc/" + name + "/" + method + "%20" + "%20".join(args))
return response.read().strip()
# generic mbed interface super class
class mbed_interface():
# initialize an mbed interface with a transport mechanism and pin name
def __init__(self, this_mbed, mpin):
self.mbed = this_mbed
if isinstance(mpin, str):
self.name = mpin
def __del__(self):
r = self.mbed.rpc(self.name, "delete", [])
def new(self, class_name, name, pin1, pin2 = "", pin3 = ""):
args = [arg for arg in [pin1,pin2,pin3,name] if arg != ""]
r = self.mbed.rpc(class_name, "new", args)
# generic read
def read(self):
r = self.mbed.rpc(self.name, "read", [])
return int(r)
# for classes that need write functionality - inherits from the generic reading interface
class mbed_interface_write(mbed_interface):
def __init__(self, this_mbed, mpin):
mbed_interface.__init__(self, this_mbed, mpin)
# generic write
def write(self, value):
r = self.mbed.rpc(self.name, "write", [str(value)])
# mbed interfaces
class DigitalOut(mbed_interface_write):
def __init__(self, this_mbed, mpin):
mbed_interface_write.__init__(self, this_mbed, mpin)
class AnalogIn(mbed_interface):
def __init__(self, this_mbed, mpin):
mbed_interface.__init__(self, this_mbed, mpin)
def read_u16(self):
r = self.mbed.rpc(self.name, "read_u16", [])
return int(r)
class AnalogOut(mbed_interface_write):
def __init__(self, this_mbed, mpin):
mbed_interface_write.__init__(self, this_mbed, mpin)
def write_u16(self, value):
self.mbed.rpc(self.name, "write_u16", [str(value)])
def read(self):
r = self.mbed.rpc(self.name, "read", [])
return float(r)
class DigitalIn(mbed_interface):
def __init__(self, this_mbed, mpin):
mbed_interface.__init__(self, this_mbed, mpin)
class PwmOut(mbed_interface_write):
def __init__(self, this_mbed, mpin):
mbed_interface_write.__init__(self, this_mbed, mpin)
def read(self):
r = self.mbed.rpc(self.name, "read", [])
return r
def period(self, value):
self.mbed.rpc(self.name, "period", [str(value)])
def period_ms(self, value):
self.mbed.rpc(self.name, "period_ms", [str(value)])
def period_us(self, value):
self.mbed.rpc(self.name, "period_us", [str(value)])
def pulsewidth(self, value):
self.mbed.rpc(self.name, "pulsewidth", [str(value)])
def pulsewidth_ms(self, value):
self.mbed.rpc(self.name, "pulsewidth_ms", [str(value)])
def pulsewidth_us(self, value):
self.mbed.rpc(self.name, "pulsewidth_us", [str(value)])
class RPCFunction(mbed_interface):
def __init__(self, this_mbed, name):
mbed_interface.__init__(self, this_mbed, name)
def run(self, input):
r = self.mbed.rpc(self.name, "run", [input])
return r
class RPCVariable(mbed_interface_write):
def __init__(self, this_mbed, name):
mbed_interface_write.__init__(self, this_mbed, name)
def read(self):
r = self.mbed.rpc(self.name, "read", [])
return r
class Timer(mbed_interface):
def __init__(self, this_mbed, name):
mbed_interface.__init__(self, this_mbed, name)
def start(self):
r = self.mbed.rpc(self.name, "start", [])
def stop(self):
r = self.mbed.rpc(self.name, "stop", [])
def reset(self):
r = self.mbed.rpc(self.name, "reset", [])
def read(self):
r = self.mbed.rpc(self.name, "read", [])
return float(re.search('\d+\.*\d*', r).group(0))
def read_ms(self):
r = self.mbed.rpc(self.name, "read_ms", [])
return float(re.search('\d+\.*\d*', r).group(0))
def read_us(self):
r = self.mbed.rpc(self.name, "read_us", [])
return float(re.search('\d+\.*\d*', r).group(0))
# Serial
class Serial():
def __init__(self, this_mbed, tx, rx=""):
self.mbed = this_mbed
if isinstance(tx, str):
self.name = tx
def __del__(self):
r = self.mbed.rpc(self.name, "delete", [])
def baud(self, value):
r = self.mbed.rpc(self.name, "baud", [str(value)])
def putc(self, value):
r = self.mbed.rpc(self.name, "putc", [str(value)])
def puts(self, value):
r = self.mbed.rpc(self.name, "puts", ["\"" + str(value) + "\""])
def getc(self):
r = self.mbed.rpc(self.name, "getc", [])
return int(r)
def wait(s):
time.sleep(s)
| apache-2.0 |
moio/spacewalk | backend/upload_server/handlers/package_push/package_push.py | 1 | 5519 | #
# Code that drops files on the filesystem (/PKG-UPLOAD)
#
#
# Copyright (c) 2008--2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
#
import os
import base64
from rhn import rpclib
from spacewalk.common import apache, rhnFlags
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnConfig import CFG
from spacewalk.common.rhnException import rhnFault
from spacewalk.server import rhnPackageUpload, rhnSQL, basePackageUpload
class PackagePush(basePackageUpload.BasePackageUpload):
def __init__(self, req):
basePackageUpload.BasePackageUpload.__init__(self, req)
self.required_fields.extend([
'Auth',
'Force',
])
self.null_org = None
# Default packaging is rpm
self.packaging = 'rpm'
self.username = None
self.password = None
self.force = None
self.rel_package_path = None
self.org_id = None
self.package_path = None
def headerParserHandler(self, req):
ret = basePackageUpload.BasePackageUpload.headerParserHandler(self, req)
# Optional headers
maps = [['Null-Org', 'null_org'], ['Packaging', 'packaging']]
for hn, sn in maps:
header_name = "%s-%s" % (self.header_prefix, hn)
if req.headers_in.has_key(header_name):
setattr(self, sn, req.headers_in[header_name])
if ret != apache.OK:
return ret
if CFG.SEND_MESSAGE_TO_ALL:
rhnSQL.closeDB()
log_debug(1, "send_message_to_all is set")
rhnFlags.set("apache-return-code", apache.HTTP_NOT_FOUND)
try:
outage_message = open(CFG.MESSAGE_TO_ALL).read()
except IOError:
log_error("Missing outage message file")
outage_message = "Outage mode"
raise rhnFault(20001, outage_message, explain=0)
# Init the database connection
rhnSQL.initDB()
use_session = 0
if self.field_data.has_key('Auth-Session'):
session_token = self.field_data['Auth-Session']
use_session = 1
else:
encoded_auth_token = self.field_data['Auth']
if not use_session:
auth_token = self.get_auth_token(encoded_auth_token)
if len(auth_token) < 2:
log_debug(3, auth_token)
raise rhnFault(105, "Unable to autenticate")
self.username, self.password = auth_token[:2]
force = self.field_data['Force']
force = int(force)
log_debug(1, "Username", self.username, "Force", force)
if use_session:
self.org_id, self.force = rhnPackageUpload.authenticate_session(session_token,
force=force, null_org=self.null_org)
else:
# We don't push to any channels
self.org_id, self.force = rhnPackageUpload.authenticate(self.username,
self.password, force=force, null_org=self.null_org)
return apache.OK
def handler(self, req):
ret = basePackageUpload.BasePackageUpload.handler(self, req)
if ret != apache.OK:
return ret
a_pkg = rhnPackageUpload.save_uploaded_package(req,
(self.package_name, None, self.package_version,
self.package_release, self.package_arch),
str(self.org_id),
self.packaging,
self.file_checksum_type, self.file_checksum)
self.rel_package_path = rhnPackageUpload.relative_path_from_header(
a_pkg.header, org_id=self.org_id,
checksum_type=a_pkg.checksum_type, checksum=a_pkg.checksum)
self.package_path = os.path.join(CFG.MOUNT_POINT,
self.rel_package_path)
package_dict, diff_level = rhnPackageUpload.push_package(a_pkg,
force=self.force,
relative_path=self.rel_package_path, org_id=self.org_id)
if diff_level:
return self._send_package_diff(req, diff_level, package_dict)
# Everything went fine
rhnSQL.commit()
reply = "All OK"
req.headers_out['Content-Length'] = str(len(reply))
req.send_http_header()
req.write(reply)
log_debug(2, "Returning with OK")
return apache.OK
@staticmethod
def _send_package_diff(req, diff_level, diff):
args = {
'level' : diff_level,
'diff' : diff,
}
reply = rpclib.xmlrpclib.dumps((args, ))
ret_stat = apache.HTTP_BAD_REQUEST
req.status = ret_stat
req.err_headers_out['Content-Length'] = str(len(reply))
req.send_http_header()
req.write(reply)
return apache.OK
@staticmethod
def get_auth_token(value):
s = ''.join(map(lambda x: x.strip(), value.split(',')))
arr = map(base64.decodestring, s.split(':'))
return arr
| gpl-2.0 |
bert9bert/statsmodels | statsmodels/sandbox/distributions/tests/__init__.py | 219 | 6354 | '''
Econometrics for a Datarich Environment
=======================================
Introduction
------------
In many cases we are performing statistical analysis when many observed variables are
available, when we are in a data rich environment. Machine learning has a wide variety
of tools for dimension reduction and penalization when there are many varibles compared
to the number of observation. Chemometrics has a long tradition of using Partial Least
Squares, NIPALS and similar in these cases. In econometrics the same problem shows up
when there are either many possible regressors, many (weak) instruments or when there are
a large number of moment conditions in GMM.
This section is intended to collect some models and tools in this area that are relevant
for the statical analysis and econometrics.
Covariance Matrices
===================
Several methods are available to reduce the small sample noise in estimated covariance
matrices with many variable.
Some applications:
weighting matrix with many moments,
covariance matrix for portfolio choice
Dimension Reduction
===================
Principal Component and Partial Least Squares try to extract the important low dimensional
factors from the data with many variables.
Regression with many regressors
===============================
Factor models, selection of regressors and shrinkage and penalization are used to improve
the statistical properties, when the presence of too many regressors leads to over-fitting
and too noisy small sample estimators and statistics.
Regression with many moments or many instruments
================================================
The same tools apply and can be used in these two cases.
e.g. Tychonov regularization of weighting matrix in GMM, similar to Ridge regression, the
weighting matrix can be shrunk towards the identity matrix.
Simplest case will be part of GMM. I don't know how much will be standalone
functions.
Intended Content
================
PLS
---
what should be available in class?
Factormodel and supporting helper functions
-------------------------------------------
PCA based
~~~~~~~~~
First version based PCA on Stock/Watson and Bai/Ng, and recent papers on the
selection of the number of factors. Not sure about Forni et al. in approach.
Basic support of this needs additional results for PCA, error covariance matrix
of data on reduced factors, required for criteria in Bai/Ng.
Selection criteria based on eigenvalue cutoffs.
Paper on PCA and structural breaks. Could add additional results during
find_nfact to test for parameter stability. I haven't read the paper yet.
Idea: for forecasting, use up to h-step ahead endogenous variables to directly
get the forecasts.
Asymptotic results and distribution: not too much idea yet.
Standard OLS results are conditional on factors, paper by Haerdle (abstract
seems to suggest that this is ok, Park 2009).
Simulation: add function to simulate DGP of Bai/Ng and recent extension.
Sensitivity of selection criteria to heteroscedasticity and autocorrelation.
Bai, J. & Ng, S., 2002. Determining the Number of Factors in
Approximate Factor Models. Econometrica, 70(1), pp.191-221.
Kapetanios, G., 2010. A Testing Procedure for Determining the Number
of Factors in Approximate Factor Models With Large Datasets. Journal
of Business and Economic Statistics, 28(3), pp.397-409.
Onatski, A., 2010. Determining the Number of Factors from Empirical
Distribution of Eigenvalues. Review of Economics and Statistics,
92(4), pp.1004-1016.
Alessi, L., Barigozzi, M. & Capasso, M., 2010. Improved penalization
for determining the number of factors in approximate factor models.
Statistics & Probability Letters, 80(23-24), pp.1806-1813.
Breitung, J. & Eickmeier, S., Testing for structural breaks in dynamic
factor models. Journal of Econometrics, In Press, Accepted Manuscript.
Available at:
http://www.sciencedirect.com/science/article/B6VC0-51G3W92-1/2/f45ce2332443374fd770e42e5a68ddb4
[Accessed November 15, 2010].
Croux, C., Renault, E. & Werker, B., 2004. Dynamic factor models.
Journal of Econometrics, 119(2), pp.223-230.
Forni, M. et al., 2009. Opening the Black Box: Structural Factor
Models with Large Cross Sections. Econometric Theory, 25(05),
pp.1319-1347.
Forni, M. et al., 2000. The Generalized Dynamic-Factor Model:
Identification and Estimation. Review of Economics and Statistics,
82(4), pp.540-554.
Forni, M. & Lippi, M., The general dynamic factor model: One-sided
representation results. Journal of Econometrics, In Press, Accepted
Manuscript. Available at:
http://www.sciencedirect.com/science/article/B6VC0-51FNPJN-1/2/4fcdd0cfb66e3050ff5d19bf2752ed19
[Accessed November 15, 2010].
Kapetanios, G., 2010. A Testing Procedure for Determining the Number
of Factors in Approximate Factor Models With Large Datasets. Journal
of Business and Economic Statistics, 28(3), pp.397-409.
Onatski, A., 2010. Determining the Number of Factors from Empirical
Distribution of Eigenvalues. Review of Economics and Statistics,
92(4), pp.1004-1016.
Park, B.U. et al., 2009. Time Series Modelling With Semiparametric
Factor Dynamics. Journal of the American Statistical Association,
104(485), pp.284-298.
other factor algorithm
~~~~~~~~~~~~~~~~~~~~~~
PLS should fit in reasonably well.
Bai/Ng have a recent paper, where they compare LASSO, PCA, and similar, individual
and in combination.
Check how much we can use scikits.learn for this.
miscellaneous
~~~~~~~~~~~~~
Time series modeling of factors for prediction, ARMA, VARMA.
SUR and correlation structure
What about sandwich estimation, robust covariance matrices?
Similarity to Factor-Garch and Go-Garch
Updating: incremental PCA, ...?
TODO next
=========
MVOLS : OLS with multivariate endogenous and identical exogenous variables.
rewrite and expand current varma_process.VAR
PCA : write a class after all, and/or adjust the current donated class
and keep adding required statistics, e.g.
residual variance, projection of X on k-factors, ... updating ?
FactorModelUnivariate : started, does basic principal component regression,
based on standard information criteria, not Bai/Ng adjusted
FactorModelMultivariate : follow pattern for univariate version and use
MVOLS
'''
| bsd-3-clause |
crayzeewulf/android-quill | jni/libhpdf-2.3.0RC2/if/python/demo/outline_demo_jp.py | 32 | 3955 | ###
## * << Haru Free PDF Library 2.0.0 >> -- outline_demo_jp.c
## *
## * Copyright (c) 1999-2006 Takeshi Kanno <[email protected]>
## *
## * Permission to use, copy, modify, distribute and sell this software
## * and its documentation for any purpose is hereby granted without fee,
## * provided that the above copyright notice appear in all copies and
## * that both that copyright notice and this permission notice appear
## * in supporting documentation.
## * It is provided "as is" without express or implied warranty.
## *
##
## port to python by Li Jun
## http://groups.google.com/group/pythoncia
import os, sys
from ctypes import *
up=2
def setlibpath(up):
import sys
path=os.path.normpath(os.path.split(os.path.realpath(__file__))[0]+'\..'*up)
if path not in sys.path:
sys.path.append(path)
setlibpath(up)
from haru import *
from haru.c_func import *
from haru.hpdf_errorcode import *
for i in dir():
if 'CreateOutLine' in i:
print i
@HPDF_Error_Handler(None, HPDF_UINT, HPDF_UINT, c_void_p)
def error_handler (error_no, detail_no, user_data):
global pdf
printf ("ERROR: %s, detail_no=%u\n", error_detail[error_no],
detail_no)
HPDF_Free (pdf)
sys.exit(1)
def print_page (page, page_num):
HPDF_Page_SetWidth (page, 200)
HPDF_Page_SetHeight (page, 300)
HPDF_Page_BeginText (page)
HPDF_Page_MoveTextPos (page, 50, 250)
buf="Page:%d" % page_num
HPDF_Page_ShowText (page, buf)
HPDF_Page_EndText (page)
def main():
global pdf
page=[None for i in range(4)]
outline=[None for i in range(4)]
fname=os.path.realpath(sys.argv[0])
fname=fname[:fname.rfind('.')]+'.pdf'
try:
f = open ("mbtext/sjis.txt", "rb")
except:
printf ("error: cannot open 'mbtext/sjis.txt'\n")
return 1
SAMP_TXT=f.read(2048)
f.close ()
pdf = HPDF_New (error_handler, NULL)
if (not pdf):
printf ("error: cannot create PdfDoc object\n")
return 1
# declaration for using Japanese encoding.
HPDF_UseJPEncodings (pdf)
# create default-font
font = HPDF_GetFont (pdf, "Helvetica", NULL)
# Set page mode to use outlines.
HPDF_SetPageMode(pdf, HPDF_PAGE_MODE_USE_OUTLINE)
# Add 3 pages to the document.
page[0] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[0], font, 20)
print_page(page[0], 1)
page[1] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[1], font, 20)
print_page(page[1], 2)
page[2] = HPDF_AddPage (pdf)
HPDF_Page_SetFontAndSize (page[2], font, 20)
print_page(page[2], 3)
# create outline root.
root = HPDF_CreateOutLine (pdf, NULL, "OutlineRoot", NULL)
HPDF_Outline_SetOpened (root, HPDF_TRUE)
outline[0] = HPDF_CreateOutLine (pdf, root, "page1", NULL)
outline[1] = HPDF_CreateOutLine (pdf, root, "page2", NULL)
# create outline with test which is encoding
outline[2] = HPDF_CreateOutLine (pdf, root, SAMP_TXT,
HPDF_GetEncoder (pdf, "90ms-RKSJ-H"))
# create destination objects on each pages
# and link it to outline items.
dst = HPDF_Page_CreateDestination (page[0])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[0]), 1)
HPDF_Outline_SetDestination(outline[0], dst)
# HPDF_Catalog_SetOpenAction(dst)
dst = HPDF_Page_CreateDestination (page[1])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[1]), 1)
HPDF_Outline_SetDestination(outline[1], dst)
dst = HPDF_Page_CreateDestination (page[2])
HPDF_Destination_SetXYZ(dst, 0, HPDF_Page_GetHeight(page[2]), 1)
HPDF_Outline_SetDestination(outline[2], dst)
# save the document to a file
HPDF_SaveToFile (pdf, fname)
# clean up
HPDF_Free (pdf)
return 0
main() | gpl-3.0 |
mjfarmer/scada_py | env/lib/python2.7/site-packages/pip/download.py | 45 | 22491 | import cgi
import email.utils
import hashlib
import getpass
import mimetypes
import os
import platform
import re
import shutil
import sys
import tempfile
import pip
from pip.backwardcompat import urllib, urlparse, raw_input
from pip.exceptions import InstallationError, HashMismatch
from pip.util import (splitext, rmtree, format_size, display_path,
backup_dir, ask_path_exists, unpack_file,
create_download_cache_folder, cache_download)
from pip.vcs import vcs
from pip.log import logger
import requests, six
from requests.adapters import BaseAdapter
from requests.auth import AuthBase, HTTPBasicAuth
from requests.compat import IncompleteRead
from requests.exceptions import InvalidURL, ChunkedEncodingError
from requests.models import Response
from requests.structures import CaseInsensitiveDict
__all__ = ['get_file_content',
'is_url', 'url_to_path', 'path_to_url',
'is_archive_file', 'unpack_vcs_link',
'unpack_file_url', 'is_vcs_url', 'is_file_url', 'unpack_http_url']
def user_agent():
"""Return a string representing the user agent."""
_implementation = platform.python_implementation()
if _implementation == 'CPython':
_implementation_version = platform.python_version()
elif _implementation == 'PyPy':
_implementation_version = '%s.%s.%s' % (sys.pypy_version_info.major,
sys.pypy_version_info.minor,
sys.pypy_version_info.micro)
if sys.pypy_version_info.releaselevel != 'final':
_implementation_version = ''.join([
_implementation_version,
sys.pypy_version_info.releaselevel,
])
elif _implementation == 'Jython':
_implementation_version = platform.python_version() # Complete Guess
elif _implementation == 'IronPython':
_implementation_version = platform.python_version() # Complete Guess
else:
_implementation_version = 'Unknown'
try:
p_system = platform.system()
p_release = platform.release()
except IOError:
p_system = 'Unknown'
p_release = 'Unknown'
return " ".join(['pip/%s' % pip.__version__,
'%s/%s' % (_implementation, _implementation_version),
'%s/%s' % (p_system, p_release)])
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True):
self.prompting = prompting
self.passwords = {}
def __call__(self, req):
parsed = urlparse.urlparse(req.url)
# Get the netloc without any embedded credentials
netloc = parsed.netloc.split("@", 1)[-1]
# Set the url of the request to the url without any credentials
req.url = urlparse.urlunparse(parsed[:1] + (netloc,) + parsed[2:])
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
# Extract credentials embedded in the url if we have none stored
if username is None:
username, password = self.parse_credentials(parsed.netloc)
if username or password:
# Store the username and password
self.passwords[netloc] = (username, password)
# Send the basic auth with this request
req = HTTPBasicAuth(username or "", password or "")(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
def handle_401(self, resp, **kwargs):
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simple return the response
if not self.prompting:
return resp
parsed = urlparse.urlparse(resp.url)
# Prompt the user for a new username and password
username = raw_input("User for %s: " % parsed.netloc)
password = getpass.getpass("Password: ")
# Store the new username and password to use for future requests
if username or password:
self.passwords[parsed.netloc] = (username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def parse_credentials(self, netloc):
if "@" in netloc:
userinfo = netloc.rsplit("@", 1)[0]
if ":" in userinfo:
return userinfo.split(":", 1)
return userinfo, None
return None, None
class LocalFSResponse(object):
def __init__(self, fileobj):
self.fileobj = fileobj
def __getattr__(self, name):
return getattr(self.fileobj, name)
def read(self, amt=None, decode_content=None, cache_content=False):
return self.fileobj.read(amt)
# Insert Hacks to Make Cookie Jar work w/ Requests
@property
def _original_response(self):
class FakeMessage(object):
def getheaders(self, header):
return []
def get_all(self, header, default):
return []
class FakeResponse(object):
@property
def msg(self):
return FakeMessage()
return FakeResponse()
class LocalFSAdapter(BaseAdapter):
def send(self, request, stream=None, timeout=None, verify=None, cert=None,
proxies=None):
parsed_url = urlparse.urlparse(request.url)
# We only work for requests with a host of localhost
if parsed_url.netloc.lower() != "localhost":
raise InvalidURL("Invalid URL %r: Only localhost is allowed" %
request.url)
real_url = urlparse.urlunparse(parsed_url[:1] + ("",) + parsed_url[2:])
pathname = url_to_path(real_url)
resp = Response()
resp.status_code = 200
resp.url = real_url
stats = os.stat(pathname)
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
resp.headers = CaseInsensitiveDict({
"Content-Type": mimetypes.guess_type(pathname)[0] or "text/plain",
"Content-Length": stats.st_size,
"Last-Modified": modified,
})
resp.raw = LocalFSResponse(open(pathname, "rb"))
resp.close = resp.raw.close
return resp
def close(self):
pass
class PipSession(requests.Session):
timeout = None
def __init__(self, *args, **kwargs):
super(PipSession, self).__init__(*args, **kwargs)
# Attach our User Agent to the request
self.headers["User-Agent"] = user_agent()
# Attach our Authentication handler to the session
self.auth = MultiDomainBasicAuth()
# Enable file:// urls
self.mount("file://", LocalFSAdapter())
def request(self, method, url, *args, **kwargs):
# Make file:// urls not fail due to lack of a hostname
parsed = urlparse.urlparse(url)
if parsed.scheme == "file":
url = urlparse.urlunparse(parsed[:1] + ("localhost",) + parsed[2:])
# Allow setting a default timeout on a session
kwargs.setdefault("timeout", self.timeout)
# Dispatch the actual request
return super(PipSession, self).request(method, url, *args, **kwargs)
def get_file_content(url, comes_from=None, session=None):
"""Gets the content of a file; it may be a filename, file: URL, or
http: URL. Returns (location, content). Content is unicode."""
if session is None:
session = PipSession()
match = _scheme_re.search(url)
if match:
scheme = match.group(1).lower()
if (scheme == 'file' and comes_from
and comes_from.startswith('http')):
raise InstallationError(
'Requirements file %s references URL %s, which is local'
% (comes_from, url))
if scheme == 'file':
path = url.split(':', 1)[1]
path = path.replace('\\', '/')
match = _url_slash_drive_re.match(path)
if match:
path = match.group(1) + ':' + path.split('|', 1)[1]
path = urllib.unquote(path)
if path.startswith('/'):
path = '/' + path.lstrip('/')
url = path
else:
## FIXME: catch some errors
resp = session.get(url)
resp.raise_for_status()
if six.PY3:
return resp.url, resp.text
else:
return resp.url, resp.content
try:
f = open(url)
content = f.read()
except IOError:
e = sys.exc_info()[1]
raise InstallationError('Could not open requirements file: %s' % str(e))
else:
f.close()
return url, content
_scheme_re = re.compile(r'^(http|https|file):', re.I)
_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I)
def is_url(name):
"""Returns true if the name looks like a URL"""
if ':' not in name:
return False
scheme = name.split(':', 1)[0].lower()
return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes
def url_to_path(url):
"""
Convert a file: URL to a path.
"""
assert url.startswith('file:'), (
"You can only turn file: urls into filenames (not %r)" % url)
path = url[len('file:'):].lstrip('/')
path = urllib.unquote(path)
if _url_drive_re.match(path):
path = path[0] + ':' + path[2:]
else:
path = '/' + path
return path
_drive_re = re.compile('^([a-z]):', re.I)
_url_drive_re = re.compile('^([a-z])[:|]', re.I)
def path_to_url(path):
"""
Convert a path to a file: URL. The path will be made absolute and have
quoted path parts.
"""
path = os.path.normpath(os.path.abspath(path))
drive, path = os.path.splitdrive(path)
filepath = path.split(os.path.sep)
url = '/'.join([urllib.quote(part) for part in filepath])
if not drive:
url = url.lstrip('/')
return 'file:///' + drive + url
def is_archive_file(name):
"""Return True if `name` is a considered as an archive file."""
archives = ('.zip', '.tar.gz', '.tar.bz2', '.tgz', '.tar', '.pybundle',
'.whl')
ext = splitext(name)[1].lower()
if ext in archives:
return True
return False
def unpack_vcs_link(link, location, only_download=False):
vcs_backend = _get_used_vcs_backend(link)
if only_download:
vcs_backend.export(location)
else:
vcs_backend.unpack(location)
def _get_used_vcs_backend(link):
for backend in vcs.backends:
if link.scheme in backend.schemes:
vcs_backend = backend(link.url)
return vcs_backend
def is_vcs_url(link):
return bool(_get_used_vcs_backend(link))
def is_file_url(link):
return link.url.lower().startswith('file:')
def _check_hash(download_hash, link):
if download_hash.digest_size != hashlib.new(link.hash_name).digest_size:
logger.fatal("Hash digest size of the package %d (%s) doesn't match the expected hash name %s!"
% (download_hash.digest_size, link, link.hash_name))
raise HashMismatch('Hash name mismatch for package %s' % link)
if download_hash.hexdigest() != link.hash:
logger.fatal("Hash of the package %s (%s) doesn't match the expected hash %s!"
% (link, download_hash.hexdigest(), link.hash))
raise HashMismatch('Bad %s hash for package %s' % (link.hash_name, link))
def _get_hash_from_file(target_file, link):
try:
download_hash = hashlib.new(link.hash_name)
except (ValueError, TypeError):
logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
return None
fp = open(target_file, 'rb')
while True:
chunk = fp.read(4096)
if not chunk:
break
download_hash.update(chunk)
fp.close()
return download_hash
def _download_url(resp, link, temp_location):
fp = open(temp_location, 'wb')
download_hash = None
if link.hash and link.hash_name:
try:
download_hash = hashlib.new(link.hash_name)
except ValueError:
logger.warn("Unsupported hash name %s for package %s" % (link.hash_name, link))
try:
total_length = int(resp.headers['content-length'])
except (ValueError, KeyError, TypeError):
total_length = 0
downloaded = 0
show_progress = total_length > 40 * 1000 or not total_length
show_url = link.show_url
try:
if show_progress:
## FIXME: the URL can get really long in this message:
if total_length:
logger.start_progress('Downloading %s (%s): ' % (show_url, format_size(total_length)))
else:
logger.start_progress('Downloading %s (unknown size): ' % show_url)
else:
logger.notify('Downloading %s' % show_url)
logger.info('Downloading from URL %s' % link)
def resp_read(chunk_size):
try:
# Special case for urllib3.
try:
for chunk in resp.raw.stream(
chunk_size, decode_content=False):
yield chunk
except IncompleteRead as e:
raise ChunkedEncodingError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = resp.raw.read(chunk_size)
if not chunk:
break
yield chunk
for chunk in resp_read(4096):
downloaded += len(chunk)
if show_progress:
if not total_length:
logger.show_progress('%s' % format_size(downloaded))
else:
logger.show_progress('%3i%% %s' % (100 * downloaded / total_length, format_size(downloaded)))
if download_hash is not None:
download_hash.update(chunk)
fp.write(chunk)
fp.close()
finally:
if show_progress:
logger.end_progress('%s downloaded' % format_size(downloaded))
return download_hash
def _copy_file(filename, location, content_type, link):
copy = True
download_location = os.path.join(location, link.filename)
if os.path.exists(download_location):
response = ask_path_exists(
'The file %s exists. (i)gnore, (w)ipe, (b)ackup ' %
display_path(download_location), ('i', 'w', 'b'))
if response == 'i':
copy = False
elif response == 'w':
logger.warn('Deleting %s' % display_path(download_location))
os.remove(download_location)
elif response == 'b':
dest_file = backup_dir(download_location)
logger.warn('Backing up %s to %s'
% (display_path(download_location), display_path(dest_file)))
shutil.move(download_location, dest_file)
if copy:
shutil.copy(filename, download_location)
logger.notify('Saved %s' % display_path(download_location))
def unpack_http_url(link, location, download_cache, download_dir=None,
session=None):
if session is None:
session = PipSession()
temp_dir = tempfile.mkdtemp('-unpack', 'pip-')
temp_location = None
target_url = link.url.split('#', 1)[0]
already_cached = False
cache_file = None
cache_content_type_file = None
download_hash = None
# If a download cache is specified, is the file cached there?
if download_cache:
cache_file = os.path.join(download_cache,
urllib.quote(target_url, ''))
cache_content_type_file = cache_file + '.content-type'
already_cached = (
os.path.exists(cache_file) and
os.path.exists(cache_content_type_file)
)
if not os.path.isdir(download_cache):
create_download_cache_folder(download_cache)
# If a download dir is specified, is the file already downloaded there?
already_downloaded = None
if download_dir:
already_downloaded = os.path.join(download_dir, link.filename)
if not os.path.exists(already_downloaded):
already_downloaded = None
# If already downloaded, does it's hash match?
if already_downloaded:
temp_location = already_downloaded
content_type = mimetypes.guess_type(already_downloaded)[0]
logger.notify('File was already downloaded %s' % already_downloaded)
if link.hash:
download_hash = _get_hash_from_file(temp_location, link)
try:
_check_hash(download_hash, link)
except HashMismatch:
logger.warn(
'Previously-downloaded file %s has bad hash, '
're-downloading.' % temp_location
)
temp_location = None
os.unlink(already_downloaded)
already_downloaded = None
# If not a valid download, let's confirm the cached file is valid
if already_cached and not temp_location:
with open(cache_content_type_file) as fp:
content_type = fp.read().strip()
temp_location = cache_file
logger.notify('Using download cache from %s' % cache_file)
if link.hash and link.hash_name:
download_hash = _get_hash_from_file(cache_file, link)
try:
_check_hash(download_hash, link)
except HashMismatch:
logger.warn(
'Cached file %s has bad hash, '
're-downloading.' % temp_location
)
temp_location = None
os.unlink(cache_file)
os.unlink(cache_content_type_file)
already_cached = False
# We don't have either a cached or a downloaded copy
# let's download to a tmp dir
if not temp_location:
try:
resp = session.get(target_url, stream=True)
resp.raise_for_status()
except requests.HTTPError as exc:
logger.fatal("HTTP error %s while getting %s" %
(exc.response.status_code, link))
raise
content_type = resp.headers.get('content-type', '')
filename = link.filename # fallback
# Have a look at the Content-Disposition header for a better guess
content_disposition = resp.headers.get('content-disposition')
if content_disposition:
type, params = cgi.parse_header(content_disposition)
# We use ``or`` here because we don't want to use an "empty" value
# from the filename param.
filename = params.get('filename') or filename
ext = splitext(filename)[1]
if not ext:
ext = mimetypes.guess_extension(content_type)
if ext:
filename += ext
if not ext and link.url != resp.url:
ext = os.path.splitext(resp.url)[1]
if ext:
filename += ext
temp_location = os.path.join(temp_dir, filename)
download_hash = _download_url(resp, link, temp_location)
if link.hash and link.hash_name:
_check_hash(download_hash, link)
# a download dir is specified; let's copy the archive there
if download_dir and not already_downloaded:
_copy_file(temp_location, download_dir, content_type, link)
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(temp_location, location, content_type, link)
# if using a download cache, cache it, if needed
if cache_file and not already_cached:
cache_download(cache_file, temp_location, content_type)
if not (already_cached or already_downloaded):
os.unlink(temp_location)
os.rmdir(temp_dir)
def unpack_file_url(link, location, download_dir=None):
link_path = url_to_path(link.url_without_fragment)
already_downloaded = False
# If it's a url to a local directory
if os.path.isdir(link_path):
if os.path.isdir(location):
rmtree(location)
shutil.copytree(link_path, location, symlinks=True)
return
# if link has a hash, let's confirm it matches
if link.hash:
link_path_hash = _get_hash_from_file(link_path, link)
_check_hash(link_path_hash, link)
# If a download dir is specified, is the file already there and valid?
if download_dir:
download_path = os.path.join(download_dir, link.filename)
if os.path.exists(download_path):
content_type = mimetypes.guess_type(download_path)[0]
logger.notify('File was already downloaded %s' % download_path)
if link.hash:
download_hash = _get_hash_from_file(download_path, link)
try:
_check_hash(download_hash, link)
already_downloaded = True
except HashMismatch:
logger.warn(
'Previously-downloaded file %s has bad hash, '
're-downloading.' % link_path
)
os.unlink(download_path)
else:
already_downloaded = True
if already_downloaded:
from_path = download_path
else:
from_path = link_path
content_type = mimetypes.guess_type(from_path)[0]
# unpack the archive to the build dir location. even when only downloading
# archives, they have to be unpacked to parse dependencies
unpack_file(from_path, location, content_type, link)
# a download dir is specified and not already downloaded
if download_dir and not already_downloaded:
_copy_file(from_path, download_dir, content_type, link)
| gpl-3.0 |
fjbatresv/odoo | addons/account/wizard/account_fiscalyear_close.py | 222 | 15660 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_fiscalyear_close(osv.osv_memory):
"""
Closes Account Fiscalyear and Generate Opening entries for New Fiscalyear
"""
_name = "account.fiscalyear.close"
_description = "Fiscalyear Close"
_columns = {
'fy_id': fields.many2one('account.fiscalyear', \
'Fiscal Year to close', required=True, help="Select a Fiscal year to close"),
'fy2_id': fields.many2one('account.fiscalyear', \
'New Fiscal Year', required=True),
'journal_id': fields.many2one('account.journal', 'Opening Entries Journal', domain="[('type','=','situation')]", required=True, help='The best practice here is to use a journal dedicated to contain the opening entries of all fiscal years. Note that you should define it with default debit/credit accounts, of type \'situation\' and with a centralized counterpart.'),
'period_id': fields.many2one('account.period', 'Opening Entries Period', required=True),
'report_name': fields.char('Name of new entries', required=True, help="Give name of the new entries"),
}
_defaults = {
'report_name': lambda self, cr, uid, context: _('End of Fiscal Year Entry'),
}
def data_save(self, cr, uid, ids, context=None):
"""
This function close account fiscalyear and create entries in new fiscalyear
@param cr: the current row, from the database cursor,
@param uid: the current user’s ID for security checks,
@param ids: List of Account fiscalyear close state’s IDs
"""
def _reconcile_fy_closing(cr, uid, ids, context=None):
"""
This private function manually do the reconciliation on the account_move_line given as `ids´, and directly
through psql. It's necessary to do it this way because the usual `reconcile()´ function on account.move.line
object is really resource greedy (not supposed to work on reconciliation between thousands of records) and
it does a lot of different computation that are useless in this particular case.
"""
#check that the reconcilation concern journal entries from only one company
cr.execute('select distinct(company_id) from account_move_line where id in %s',(tuple(ids),))
if len(cr.fetchall()) > 1:
raise osv.except_osv(_('Warning!'), _('The entries to reconcile should belong to the same company.'))
r_id = self.pool.get('account.move.reconcile').create(cr, uid, {'type': 'auto', 'opening_reconciliation': True})
cr.execute('update account_move_line set reconcile_id = %s where id in %s',(r_id, tuple(ids),))
# reconcile_ref deptends from reconcile_id but was not recomputed
obj_acc_move_line._store_set_values(cr, uid, ids, ['reconcile_ref'], context=context)
obj_acc_move_line.invalidate_cache(cr, uid, ['reconcile_id'], ids, context=context)
return r_id
obj_acc_period = self.pool.get('account.period')
obj_acc_fiscalyear = self.pool.get('account.fiscalyear')
obj_acc_journal = self.pool.get('account.journal')
obj_acc_move = self.pool.get('account.move')
obj_acc_move_line = self.pool.get('account.move.line')
obj_acc_account = self.pool.get('account.account')
obj_acc_journal_period = self.pool.get('account.journal.period')
currency_obj = self.pool.get('res.currency')
data = self.browse(cr, uid, ids, context=context)
if context is None:
context = {}
fy_id = data[0].fy_id.id
cr.execute("SELECT id FROM account_period WHERE date_stop < (SELECT date_start FROM account_fiscalyear WHERE id = %s)", (str(data[0].fy2_id.id),))
fy_period_set = ','.join(map(lambda id: str(id[0]), cr.fetchall()))
cr.execute("SELECT id FROM account_period WHERE date_start > (SELECT date_stop FROM account_fiscalyear WHERE id = %s)", (str(fy_id),))
fy2_period_set = ','.join(map(lambda id: str(id[0]), cr.fetchall()))
if not fy_period_set or not fy2_period_set:
raise osv.except_osv(_('User Error!'), _('The periods to generate opening entries cannot be found.'))
period = obj_acc_period.browse(cr, uid, data[0].period_id.id, context=context)
new_fyear = obj_acc_fiscalyear.browse(cr, uid, data[0].fy2_id.id, context=context)
old_fyear = obj_acc_fiscalyear.browse(cr, uid, fy_id, context=context)
new_journal = data[0].journal_id.id
new_journal = obj_acc_journal.browse(cr, uid, new_journal, context=context)
company_id = new_journal.company_id.id
if not new_journal.default_credit_account_id or not new_journal.default_debit_account_id:
raise osv.except_osv(_('User Error!'),
_('The journal must have default credit and debit account.'))
if (not new_journal.centralisation) or new_journal.entry_posted:
raise osv.except_osv(_('User Error!'),
_('The journal must have centralized counterpart without the Skipping draft state option checked.'))
#delete existing move and move lines if any
move_ids = obj_acc_move.search(cr, uid, [
('journal_id', '=', new_journal.id), ('period_id', '=', period.id)])
if move_ids:
move_line_ids = obj_acc_move_line.search(cr, uid, [('move_id', 'in', move_ids)])
obj_acc_move_line._remove_move_reconcile(cr, uid, move_line_ids, opening_reconciliation=True, context=context)
obj_acc_move_line.unlink(cr, uid, move_line_ids, context=context)
obj_acc_move.unlink(cr, uid, move_ids, context=context)
cr.execute("SELECT id FROM account_fiscalyear WHERE date_stop < %s", (str(new_fyear.date_start),))
result = cr.dictfetchall()
fy_ids = [x['id'] for x in result]
query_line = obj_acc_move_line._query_get(cr, uid,
obj='account_move_line', context={'fiscalyear': fy_ids})
#create the opening move
vals = {
'name': '/',
'ref': '',
'period_id': period.id,
'date': period.date_start,
'journal_id': new_journal.id,
}
move_id = obj_acc_move.create(cr, uid, vals, context=context)
#1. report of the accounts with defferal method == 'unreconciled'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type not in ('view', 'consolidation')
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'unreconciled', ))
account_ids = map(lambda x: x[0], cr.fetchall())
if account_ids:
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT name, create_uid, create_date, write_uid, write_date,
statement_id, %s,currency_id, date_maturity, partner_id,
blocked, credit, 'draft', debit, ref, account_id,
%s, (%s) AS date, %s, amount_currency, quantity, product_id, company_id
FROM account_move_line
WHERE account_id IN %s
AND ''' + query_line + '''
AND reconcile_id IS NULL)''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
#We have also to consider all move_lines that were reconciled
#on another fiscal year, and report them too
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT
b.name, b.create_uid, b.create_date, b.write_uid, b.write_date,
b.statement_id, %s, b.currency_id, b.date_maturity,
b.partner_id, b.blocked, b.credit, 'draft', b.debit,
b.ref, b.account_id, %s, (%s) AS date, %s, b.amount_currency,
b.quantity, b.product_id, b.company_id
FROM account_move_line b
WHERE b.account_id IN %s
AND b.reconcile_id IS NOT NULL
AND b.period_id IN ('''+fy_period_set+''')
AND b.reconcile_id IN (SELECT DISTINCT(reconcile_id)
FROM account_move_line a
WHERE a.period_id IN ('''+fy2_period_set+''')))''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
self.invalidate_cache(cr, uid, context=context)
#2. report of the accounts with defferal method == 'detail'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type not in ('view', 'consolidation')
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'detail', ))
account_ids = map(lambda x: x[0], cr.fetchall())
if account_ids:
cr.execute('''
INSERT INTO account_move_line (
name, create_uid, create_date, write_uid, write_date,
statement_id, journal_id, currency_id, date_maturity,
partner_id, blocked, credit, state, debit,
ref, account_id, period_id, date, move_id, amount_currency,
quantity, product_id, company_id)
(SELECT name, create_uid, create_date, write_uid, write_date,
statement_id, %s,currency_id, date_maturity, partner_id,
blocked, credit, 'draft', debit, ref, account_id,
%s, (%s) AS date, %s, amount_currency, quantity, product_id, company_id
FROM account_move_line
WHERE account_id IN %s
AND ''' + query_line + ''')
''', (new_journal.id, period.id, period.date_start, move_id, tuple(account_ids),))
self.invalidate_cache(cr, uid, context=context)
#3. report of the accounts with defferal method == 'balance'
cr.execute('''
SELECT a.id
FROM account_account a
LEFT JOIN account_account_type t ON (a.user_type = t.id)
WHERE a.active
AND a.type not in ('view', 'consolidation')
AND a.company_id = %s
AND t.close_method = %s''', (company_id, 'balance', ))
account_ids = map(lambda x: x[0], cr.fetchall())
query_1st_part = """
INSERT INTO account_move_line (
debit, credit, name, date, move_id, journal_id, period_id,
account_id, currency_id, amount_currency, company_id, state) VALUES
"""
query_2nd_part = ""
query_2nd_part_args = []
for account in obj_acc_account.browse(cr, uid, account_ids, context={'fiscalyear': fy_id}):
company_currency_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.currency_id
if not currency_obj.is_zero(cr, uid, company_currency_id, abs(account.balance)):
if query_2nd_part:
query_2nd_part += ','
query_2nd_part += "(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
query_2nd_part_args += (account.balance > 0 and account.balance or 0.0,
account.balance < 0 and -account.balance or 0.0,
data[0].report_name,
period.date_start,
move_id,
new_journal.id,
period.id,
account.id,
account.currency_id and account.currency_id.id or None,
account.foreign_balance if account.currency_id else 0.0,
account.company_id.id,
'draft')
if query_2nd_part:
cr.execute(query_1st_part + query_2nd_part, tuple(query_2nd_part_args))
self.invalidate_cache(cr, uid, context=context)
#validate and centralize the opening move
obj_acc_move.validate(cr, uid, [move_id], context=context)
#reconcile all the move.line of the opening move
ids = obj_acc_move_line.search(cr, uid, [('journal_id', '=', new_journal.id),
('period_id.fiscalyear_id','=',new_fyear.id)])
if ids:
reconcile_id = _reconcile_fy_closing(cr, uid, ids, context=context)
#set the creation date of the reconcilation at the first day of the new fiscalyear, in order to have good figures in the aged trial balance
self.pool.get('account.move.reconcile').write(cr, uid, [reconcile_id], {'create_date': new_fyear.date_start}, context=context)
#create the journal.period object and link it to the old fiscalyear
new_period = data[0].period_id.id
ids = obj_acc_journal_period.search(cr, uid, [('journal_id', '=', new_journal.id), ('period_id', '=', new_period)])
if not ids:
ids = [obj_acc_journal_period.create(cr, uid, {
'name': (new_journal.name or '') + ':' + (period.code or ''),
'journal_id': new_journal.id,
'period_id': period.id
})]
cr.execute('UPDATE account_fiscalyear ' \
'SET end_journal_period_id = %s ' \
'WHERE id = %s', (ids[0], old_fyear.id))
obj_acc_fiscalyear.invalidate_cache(cr, uid, ['end_journal_period_id'], [old_fyear.id], context=context)
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
AgrAlert/AgrAlert_Backend | lib/python2.7/site-packages/pip/_vendor/requests/models.py | 151 | 28156 | # -*- coding: utf-8 -*-
"""
requests.models
~~~~~~~~~~~~~~~
This module contains the primary objects that power Requests.
"""
import collections
import datetime
from io import BytesIO, UnsupportedOperation
from .hooks import default_hooks
from .structures import CaseInsensitiveDict
from .auth import HTTPBasicAuth
from .cookies import cookiejar_from_dict, get_cookie_header
from .packages.urllib3.fields import RequestField
from .packages.urllib3.filepost import encode_multipart_formdata
from .packages.urllib3.util import parse_url
from .packages.urllib3.exceptions import (
DecodeError, ReadTimeoutError, ProtocolError, LocationParseError)
from .exceptions import (
HTTPError, MissingSchema, InvalidURL, ChunkedEncodingError,
ContentDecodingError, ConnectionError, StreamConsumedError)
from .utils import (
guess_filename, get_auth_from_url, requote_uri,
stream_decode_response_unicode, to_key_val_list, parse_header_links,
iter_slices, guess_json_utf, super_len, to_native_string)
from .compat import (
cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO,
is_py2, chardet, json, builtin_str, basestring)
from .status_codes import codes
#: The set of HTTP status codes that indicate an automatically
#: processable redirect.
REDIRECT_STATI = (
codes.moved, # 301
codes.found, # 302
codes.other, # 303
codes.temporary_redirect, # 307
codes.permanent_redirect, # 308
)
DEFAULT_REDIRECT_LIMIT = 30
CONTENT_CHUNK_SIZE = 10 * 1024
ITER_CHUNK_SIZE = 512
json_dumps = json.dumps
class RequestEncodingMixin(object):
@property
def path_url(self):
"""Build the path URL to use."""
url = []
p = urlsplit(self.url)
path = p.path
if not path:
path = '/'
url.append(path)
query = p.query
if query:
url.append('?')
url.append(query)
return ''.join(url)
@staticmethod
def _encode_params(data):
"""Encode parameters in a piece of data.
Will successfully encode parameters when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if isinstance(data, (str, bytes)):
return data
elif hasattr(data, 'read'):
return data
elif hasattr(data, '__iter__'):
result = []
for k, vs in to_key_val_list(data):
if isinstance(vs, basestring) or not hasattr(vs, '__iter__'):
vs = [vs]
for v in vs:
if v is not None:
result.append(
(k.encode('utf-8') if isinstance(k, str) else k,
v.encode('utf-8') if isinstance(v, str) else v))
return urlencode(result, doseq=True)
else:
return data
@staticmethod
def _encode_files(files, data):
"""Build the body for a multipart/form-data request.
Will successfully encode files when passed as a dict or a list of
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
if parameters are supplied as a dict.
"""
if (not files):
raise ValueError("Files must be provided.")
elif isinstance(data, basestring):
raise ValueError("Data must not be a string.")
new_fields = []
fields = to_key_val_list(data or {})
files = to_key_val_list(files or {})
for field, val in fields:
if isinstance(val, basestring) or not hasattr(val, '__iter__'):
val = [val]
for v in val:
if v is not None:
# Don't call str() on bytestrings: in Py3 it all goes wrong.
if not isinstance(v, bytes):
v = str(v)
new_fields.append(
(field.decode('utf-8') if isinstance(field, bytes) else field,
v.encode('utf-8') if isinstance(v, str) else v))
for (k, v) in files:
# support for explicit filename
ft = None
fh = None
if isinstance(v, (tuple, list)):
if len(v) == 2:
fn, fp = v
elif len(v) == 3:
fn, fp, ft = v
else:
fn, fp, ft, fh = v
else:
fn = guess_filename(v) or k
fp = v
if isinstance(fp, (str, bytes, bytearray)):
fdata = fp
else:
fdata = fp.read()
rf = RequestField(name=k, data=fdata,
filename=fn, headers=fh)
rf.make_multipart(content_type=ft)
new_fields.append(rf)
body, content_type = encode_multipart_formdata(new_fields)
return body, content_type
class RequestHooksMixin(object):
def register_hook(self, event, hook):
"""Properly register a hook."""
if event not in self.hooks:
raise ValueError('Unsupported event specified, with event name "%s"' % (event))
if isinstance(hook, collections.Callable):
self.hooks[event].append(hook)
elif hasattr(hook, '__iter__'):
self.hooks[event].extend(h for h in hook if isinstance(h, collections.Callable))
def deregister_hook(self, event, hook):
"""Deregister a previously registered hook.
Returns True if the hook existed, False if not.
"""
try:
self.hooks[event].remove(hook)
return True
except ValueError:
return False
class Request(RequestHooksMixin):
"""A user-created :class:`Request <Request>` object.
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
:param method: HTTP method to use.
:param url: URL to send.
:param headers: dictionary of headers to send.
:param files: dictionary of {filename: fileobject} files to multipart upload.
:param data: the body to attach to the request. If a dictionary is provided, form-encoding will take place.
:param json: json for the body to attach to the request (if data is not specified).
:param params: dictionary of URL parameters to append to the URL.
:param auth: Auth handler or (user, pass) tuple.
:param cookies: dictionary or CookieJar of cookies to attach to this request.
:param hooks: dictionary of callback hooks, for internal usage.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> req.prepare()
<PreparedRequest [GET]>
"""
def __init__(self,
method=None,
url=None,
headers=None,
files=None,
data=None,
params=None,
auth=None,
cookies=None,
hooks=None,
json=None):
# Default empty dicts for dict params.
data = [] if data is None else data
files = [] if files is None else files
headers = {} if headers is None else headers
params = {} if params is None else params
hooks = {} if hooks is None else hooks
self.hooks = default_hooks()
for (k, v) in list(hooks.items()):
self.register_hook(event=k, hook=v)
self.method = method
self.url = url
self.headers = headers
self.files = files
self.data = data
self.json = json
self.params = params
self.auth = auth
self.cookies = cookies
def __repr__(self):
return '<Request [%s]>' % (self.method)
def prepare(self):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
p = PreparedRequest()
p.prepare(
method=self.method,
url=self.url,
headers=self.headers,
files=self.files,
data=self.data,
json=self.json,
params=self.params,
auth=self.auth,
cookies=self.cookies,
hooks=self.hooks,
)
return p
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
containing the exact bytes that will be sent to the server.
Generated from either a :class:`Request <Request>` object or manually.
Usage::
>>> import requests
>>> req = requests.Request('GET', 'http://httpbin.org/get')
>>> r = req.prepare()
<PreparedRequest [GET]>
>>> s = requests.Session()
>>> s.send(r)
<Response [200]>
"""
def __init__(self):
#: HTTP verb to send to the server.
self.method = None
#: HTTP URL to send the request to.
self.url = None
#: dictionary of HTTP headers.
self.headers = None
# The `CookieJar` used to create the Cookie header will be stored here
# after prepare_cookies is called
self._cookies = None
#: request body to send to the server.
self.body = None
#: dictionary of callback hooks, for internal usage.
self.hooks = default_hooks()
def prepare(self, method=None, url=None, headers=None, files=None,
data=None, params=None, auth=None, cookies=None, hooks=None,
json=None):
"""Prepares the entire request with the given parameters."""
self.prepare_method(method)
self.prepare_url(url, params)
self.prepare_headers(headers)
self.prepare_cookies(cookies)
self.prepare_body(data, files, json)
self.prepare_auth(auth, url)
# Note that prepare_auth must be last to enable authentication schemes
# such as OAuth to work on a fully prepared request.
# This MUST go after prepare_auth. Authenticators could add a hook
self.prepare_hooks(hooks)
def __repr__(self):
return '<PreparedRequest [%s]>' % (self.method)
def copy(self):
p = PreparedRequest()
p.method = self.method
p.url = self.url
p.headers = self.headers.copy() if self.headers is not None else None
p._cookies = self._cookies.copy() if self._cookies is not None else None
p.body = self.body
p.hooks = self.hooks
return p
def prepare_method(self, method):
"""Prepares the given HTTP method."""
self.method = method
if self.method is not None:
self.method = self.method.upper()
def prepare_url(self, url, params):
"""Prepares the given HTTP URL."""
#: Accept objects that have string representations.
#: We're unable to blindy call unicode/str functions
#: as this will include the bytestring indicator (b'')
#: on python 3.x.
#: https://github.com/kennethreitz/requests/pull/2238
if isinstance(url, bytes):
url = url.decode('utf8')
else:
url = unicode(url) if is_py2 else str(url)
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
# `data` etc to work around exceptions from `url_parse`, which
# handles RFC 3986 only.
if ':' in url and not url.lower().startswith('http'):
self.url = url
return
# Support for unicode domain names and paths.
try:
scheme, auth, host, port, path, query, fragment = parse_url(url)
except LocationParseError as e:
raise InvalidURL(*e.args)
if not scheme:
raise MissingSchema("Invalid URL {0!r}: No schema supplied. "
"Perhaps you meant http://{0}?".format(url))
if not host:
raise InvalidURL("Invalid URL %r: No host supplied" % url)
# Only want to apply IDNA to the hostname
try:
host = host.encode('idna').decode('utf-8')
except UnicodeError:
raise InvalidURL('URL has an invalid label.')
# Carefully reconstruct the network location
netloc = auth or ''
if netloc:
netloc += '@'
netloc += host
if port:
netloc += ':' + str(port)
# Bare domains aren't valid URLs.
if not path:
path = '/'
if is_py2:
if isinstance(scheme, str):
scheme = scheme.encode('utf-8')
if isinstance(netloc, str):
netloc = netloc.encode('utf-8')
if isinstance(path, str):
path = path.encode('utf-8')
if isinstance(query, str):
query = query.encode('utf-8')
if isinstance(fragment, str):
fragment = fragment.encode('utf-8')
enc_params = self._encode_params(params)
if enc_params:
if query:
query = '%s&%s' % (query, enc_params)
else:
query = enc_params
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
self.url = url
def prepare_headers(self, headers):
"""Prepares the given HTTP headers."""
if headers:
self.headers = CaseInsensitiveDict((to_native_string(name), value) for name, value in headers.items())
else:
self.headers = CaseInsensitiveDict()
def prepare_body(self, data, files, json=None):
"""Prepares the given HTTP body data."""
# Check if file, fo, generator, iterator.
# If not, run through normal process.
# Nottin' on you.
body = None
content_type = None
length = None
if json is not None:
content_type = 'application/json'
body = json_dumps(json)
is_stream = all([
hasattr(data, '__iter__'),
not isinstance(data, (basestring, list, tuple, dict))
])
try:
length = super_len(data)
except (TypeError, AttributeError, UnsupportedOperation):
length = None
if is_stream:
body = data
if files:
raise NotImplementedError('Streamed bodies and files are mutually exclusive.')
if length is not None:
self.headers['Content-Length'] = builtin_str(length)
else:
self.headers['Transfer-Encoding'] = 'chunked'
else:
# Multi-part file uploads.
if files:
(body, content_type) = self._encode_files(files, data)
else:
if data and json is None:
body = self._encode_params(data)
if isinstance(data, basestring) or hasattr(data, 'read'):
content_type = None
else:
content_type = 'application/x-www-form-urlencoded'
self.prepare_content_length(body)
# Add content-type if it wasn't explicitly provided.
if content_type and ('content-type' not in self.headers):
self.headers['Content-Type'] = content_type
self.body = body
def prepare_content_length(self, body):
if hasattr(body, 'seek') and hasattr(body, 'tell'):
body.seek(0, 2)
self.headers['Content-Length'] = builtin_str(body.tell())
body.seek(0, 0)
elif body is not None:
l = super_len(body)
if l:
self.headers['Content-Length'] = builtin_str(l)
elif (self.method not in ('GET', 'HEAD')) and (self.headers.get('Content-Length') is None):
self.headers['Content-Length'] = '0'
def prepare_auth(self, auth, url=''):
"""Prepares the given HTTP auth data."""
# If no Auth is explicitly provided, extract it from the URL first.
if auth is None:
url_auth = get_auth_from_url(self.url)
auth = url_auth if any(url_auth) else None
if auth:
if isinstance(auth, tuple) and len(auth) == 2:
# special-case basic HTTP auth
auth = HTTPBasicAuth(*auth)
# Allow auth to make its changes.
r = auth(self)
# Update self to reflect the auth changes.
self.__dict__.update(r.__dict__)
# Recompute Content-Length
self.prepare_content_length(self.body)
def prepare_cookies(self, cookies):
"""Prepares the given HTTP cookie data."""
if isinstance(cookies, cookielib.CookieJar):
self._cookies = cookies
else:
self._cookies = cookiejar_from_dict(cookies)
cookie_header = get_cookie_header(self._cookies, self)
if cookie_header is not None:
self.headers['Cookie'] = cookie_header
def prepare_hooks(self, hooks):
"""Prepares the given hooks."""
for event in hooks:
self.register_hook(event, hooks[event])
class Response(object):
"""The :class:`Response <Response>` object, which contains a
server's response to an HTTP request.
"""
__attrs__ = [
'_content',
'status_code',
'headers',
'url',
'history',
'encoding',
'reason',
'cookies',
'elapsed',
'request',
]
def __init__(self):
super(Response, self).__init__()
self._content = False
self._content_consumed = False
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
self.status_code = None
#: Case-insensitive Dictionary of Response Headers.
#: For example, ``headers['content-encoding']`` will return the
#: value of a ``'Content-Encoding'`` response header.
self.headers = CaseInsensitiveDict()
#: File-like object representation of response (for advanced usage).
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
# This requirement does not apply for use internally to Requests.
self.raw = None
#: Final URL location of Response.
self.url = None
#: Encoding to decode with when accessing r.text.
self.encoding = None
#: A list of :class:`Response <Response>` objects from
#: the history of the Request. Any redirect responses will end
#: up here. The list is sorted from the oldest to the most recent request.
self.history = []
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
self.reason = None
#: A CookieJar of Cookies the server sent back.
self.cookies = cookiejar_from_dict({})
#: The amount of time elapsed between sending the request
#: and the arrival of the response (as a timedelta)
self.elapsed = datetime.timedelta(0)
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
#: is a response.
self.request = None
def __getstate__(self):
# Consume everything; accessing the content attribute makes
# sure the content has been fully read.
if not self._content_consumed:
self.content
return dict(
(attr, getattr(self, attr, None))
for attr in self.__attrs__
)
def __setstate__(self, state):
for name, value in state.items():
setattr(self, name, value)
# pickled objects do not have .raw
setattr(self, '_content_consumed', True)
setattr(self, 'raw', None)
def __repr__(self):
return '<Response [%s]>' % (self.status_code)
def __bool__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __nonzero__(self):
"""Returns true if :attr:`status_code` is 'OK'."""
return self.ok
def __iter__(self):
"""Allows you to use a response as an iterator."""
return self.iter_content(128)
@property
def ok(self):
try:
self.raise_for_status()
except HTTPError:
return False
return True
@property
def is_redirect(self):
"""True if this Response is a well-formed HTTP redirect that could have
been processed automatically (by :meth:`Session.resolve_redirects`).
"""
return ('location' in self.headers and self.status_code in REDIRECT_STATI)
@property
def is_permanent_redirect(self):
"""True if this Response one of the permanant versions of redirect"""
return ('location' in self.headers and self.status_code in (codes.moved_permanently, codes.permanent_redirect))
@property
def apparent_encoding(self):
"""The apparent encoding, provided by the chardet library"""
return chardet.detect(self.content)['encoding']
def iter_content(self, chunk_size=1, decode_unicode=False):
"""Iterates over the response data. When stream=True is set on the
request, this avoids reading the content at once into memory for
large responses. The chunk size is the number of bytes it should
read into memory. This is not necessarily the length of each item
returned as decoding can take place.
If decode_unicode is True, content will be decoded using the best
available encoding based on the response.
"""
def generate():
try:
# Special case for urllib3.
try:
for chunk in self.raw.stream(chunk_size, decode_content=True):
yield chunk
except ProtocolError as e:
raise ChunkedEncodingError(e)
except DecodeError as e:
raise ContentDecodingError(e)
except ReadTimeoutError as e:
raise ConnectionError(e)
except AttributeError:
# Standard file-like object.
while True:
chunk = self.raw.read(chunk_size)
if not chunk:
break
yield chunk
self._content_consumed = True
if self._content_consumed and isinstance(self._content, bool):
raise StreamConsumedError()
# simulate reading small chunks of the content
reused_chunks = iter_slices(self._content, chunk_size)
stream_chunks = generate()
chunks = reused_chunks if self._content_consumed else stream_chunks
if decode_unicode:
chunks = stream_decode_response_unicode(chunks, self)
return chunks
def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None, delimiter=None):
"""Iterates over the response data, one line at a time. When
stream=True is set on the request, this avoids reading the
content at once into memory for large responses.
.. note:: This method is not reentrant safe.
"""
pending = None
for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode):
if pending is not None:
chunk = pending + chunk
if delimiter:
lines = chunk.split(delimiter)
else:
lines = chunk.splitlines()
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
pending = lines.pop()
else:
pending = None
for line in lines:
yield line
if pending is not None:
yield pending
@property
def content(self):
"""Content of the response, in bytes."""
if self._content is False:
# Read the contents.
try:
if self._content_consumed:
raise RuntimeError(
'The content for this response was already consumed')
if self.status_code == 0:
self._content = None
else:
self._content = bytes().join(self.iter_content(CONTENT_CHUNK_SIZE)) or bytes()
except AttributeError:
self._content = None
self._content_consumed = True
# don't need to release the connection; that's been handled by urllib3
# since we exhausted the data.
return self._content
@property
def text(self):
"""Content of the response, in unicode.
If Response.encoding is None, encoding will be guessed using
``chardet``.
The encoding of the response content is determined based solely on HTTP
headers, following RFC 2616 to the letter. If you can take advantage of
non-HTTP knowledge to make a better guess at the encoding, you should
set ``r.encoding`` appropriately before accessing this property.
"""
# Try charset from content-type
content = None
encoding = self.encoding
if not self.content:
return str('')
# Fallback to auto-detected encoding.
if self.encoding is None:
encoding = self.apparent_encoding
# Decode unicode from given encoding.
try:
content = str(self.content, encoding, errors='replace')
except (LookupError, TypeError):
# A LookupError is raised if the encoding was not found which could
# indicate a misspelling or similar mistake.
#
# A TypeError can be raised if encoding is None
#
# So we try blindly encoding.
content = str(self.content, errors='replace')
return content
def json(self, **kwargs):
"""Returns the json-encoded content of a response, if any.
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
"""
if not self.encoding and len(self.content) > 3:
# No encoding set. JSON RFC 4627 section 3 states we should expect
# UTF-8, -16 or -32. Detect which one to use; If the detection or
# decoding fails, fall back to `self.text` (using chardet to make
# a best guess).
encoding = guess_json_utf(self.content)
if encoding is not None:
try:
return json.loads(self.content.decode(encoding), **kwargs)
except UnicodeDecodeError:
# Wrong UTF codec detected; usually because it's not UTF-8
# but some other 8-bit codec. This is an RFC violation,
# and the server didn't bother to tell us what codec *was*
# used.
pass
return json.loads(self.text, **kwargs)
@property
def links(self):
"""Returns the parsed header links of the response, if any."""
header = self.headers.get('link')
# l = MultiDict()
l = {}
if header:
links = parse_header_links(header)
for link in links:
key = link.get('rel') or link.get('url')
l[key] = link
return l
def raise_for_status(self):
"""Raises stored :class:`HTTPError`, if one occurred."""
http_error_msg = ''
if 400 <= self.status_code < 500:
http_error_msg = '%s Client Error: %s' % (self.status_code, self.reason)
elif 500 <= self.status_code < 600:
http_error_msg = '%s Server Error: %s' % (self.status_code, self.reason)
if http_error_msg:
raise HTTPError(http_error_msg, response=self)
def close(self):
"""Releases the connection back to the pool. Once this method has been
called the underlying ``raw`` object must not be accessed again.
*Note: Should not normally need to be called explicitly.*
"""
return self.raw.release_conn()
| mit |
tiagoantao/virtual-core | docker/file_router/copy/usr/bin/change_password.py | 1 | 1624 | #!/usr/bin/python2
import getpass
import sys
import pexpect
boot = False
if len(sys.argv) > 1 and sys.argv[1] == 'boot':
print "Boot mode"
boot = True
def get_passes():
old = getpass.getpass('Old password: ')
new1 = 'a'
new2 = 'b'
while new1 != new2:
new1 = getpass.getpass('New password: ')
new2 = getpass.getpass('New password (repeat): ')
return old, new1
def change_password(old=None, new=None):
if old is None:
old, new = get_passes()
p = pexpect.spawn('passwd')
p.expect('password')
p.sendline(old)
outcome = p.expect(['New', 'incorrect', 'error'])
p.sendline(new)
try:
outcome = p.expect('ew password:', timeout=1)
if p.match is None:
print p.buffer, 'new password'
else:
p.sendline(new)
outcome = p.expect(['success'] , timeout=1)
if p.match is not None:
return old, new
except:
print p.buffer, 'top level'
return False
def change_samba_password(old, new):
p = pexpect.spawn('smbpasswd')
p.expect('Old SMB password:')
p.sendline(old)
p.expect('New SMB password:')
p.sendline(new)
p.expect('Retype new SMB password:')
p.sendline(new)
p.expect('Password changed', timeout=2)
if p.match is None:
return False
else:
return True
pwds = change_password()
while not pwds:
pwds = change_password()
old, new = pwds
if not change_samba_password('boot' if boot else old, new):
print 'Samba password change failed, reverting ldap password'
change_password(new, old)
| agpl-3.0 |
scivey/mockthink | mockthink/test/functional/test_logic.py | 2 | 2727 | import rethinkdb as r
from mockthink.test.common import as_db_and_table, assertEqUnordered, assertEqual
from mockthink.test.functional.common import MockTest
from pprint import pprint
class TestLogic1(MockTest):
@staticmethod
def get_data():
data = [
{'id': 'joe', 'has_eyes': True, 'age': 22, 'hair_color': 'brown'},
{'id': 'sam', 'has_eyes': True, 'age': 17, 'hair_color': 'bald'},
{'id': 'angela', 'has_eyes': False, 'age': 26, 'hair_color': 'black'},
{'id': 'johnson', 'has_eyes': False, 'age': 16, 'hair_color': 'blonde'}
]
return as_db_and_table('pdb', 'p', data)
def test_not(self, conn):
expected = [
{'id': 'johnson'},
{'id': 'angela'}
]
result = r.db('pdb').table('p').filter(
lambda doc: ~doc['has_eyes']
).pluck('id').run(conn)
assertEqUnordered(expected, list(result))
def test_and(self, conn):
expected = [
{'id': 'sam'}
]
result = r.db('pdb').table('p').filter(
lambda doc: doc['has_eyes'].and_(doc['age'].lt(20))
).pluck('id').run(conn)
assertEqual(expected, list(result))
def test_or(self, conn):
expected = [
{'id': 'sam'},
{'id': 'angela'},
{'id': 'joe'}
]
result = r.db('pdb').table('p').filter(
lambda doc: doc['has_eyes'].or_(doc['age'].gt(20))
).pluck('id').run(conn)
assertEqUnordered(expected, list(result))
def test_gt(self, conn):
expected = [
{'id': 'joe'},
{'id': 'angela'}
]
result = r.db('pdb').table('p').filter(
lambda doc: doc['age'] > 20
).pluck('id').run(conn)
assertEqUnordered(expected, list(result))
def test_lt(self, conn):
expected = [
{'id': 'sam'},
{'id': 'johnson'}
]
result = r.db('pdb').table('p').filter(
lambda doc: doc['age'].lt(20)
).pluck('id').run(conn)
assertEqUnordered(expected, list(result))
def test_eq(self, conn):
expected = [
{'id': 'sam'}
]
result = r.db('pdb').table('p').filter(
lambda doc: doc['hair_color'] == 'bald'
).pluck('id').run(conn)
assertEqual(expected, list(result))
def test_neq(self, conn):
expected = [
{'id': 'sam'},
{'id': 'angela'},
{'id': 'joe'}
]
result = r.db('pdb').table('p').filter(
lambda doc: doc['hair_color'] != 'blonde'
).pluck('id').run(conn)
assertEqUnordered(expected, list(result))
| mit |
fenglu-g/incubator-airflow | airflow/contrib/sensors/weekday_sensor.py | 4 | 4291 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import six
from airflow.contrib.utils.weekday import WeekDay
from airflow.sensors.base_sensor_operator import BaseSensorOperator
from airflow.utils import timezone
from airflow.utils.decorators import apply_defaults
class DayOfWeekSensor(BaseSensorOperator):
"""
Waits until the first specified day of the week. For example, if the execution
day of the task is '2018-12-22' (Saturday) and you pass 'FRIDAY', the task will wait
until next Friday.
**Example** (with single day): ::
weekend_check = DayOfWeekSensor(
task_id='weekend_check',
week_day='Saturday',
use_task_execution_day=True,
dag=dag)
**Example** (with multiple day using set): ::
weekend_check = DayOfWeekSensor(
task_id='weekend_check',
week_day={'Saturday', 'Sunday'},
use_task_execution_day=True,
dag=dag)
**Example** (with :class:`~airflow.contrib.utils.weekday.WeekDay` enum): ::
# import WeekDay Enum
from airflow.contrib.utils.weekday import WeekDay
weekend_check = DayOfWeekSensor(
task_id='weekend_check',
week_day={WeekDay.SATURDAY, WeekDay.SUNDAY},
use_task_execution_day=True,
dag=dag)
:param week_day: Day of the week to check (full name). Optionally, a set
of days can also be provided using a set.
Example values:
* ``"MONDAY"``,
* ``{"Saturday", "Sunday"}``
* ``{WeekDay.TUESDAY}``
* ``{WeekDay.SATURDAY, WeekDay.SUNDAY}``
:type week_day: set or str or airflow.contrib.utils.weekday.WeekDay
:param use_task_execution_day: If ``True``, uses task's execution day to compare
with week_day. Execution Date is Useful for backfilling.
If ``False``, uses system's day of the week. Useful when you
don't want to run anything on weekdays on the system.
:type use_task_execution_day: bool
"""
@apply_defaults
def __init__(self, week_day,
use_task_execution_day=False,
*args, **kwargs):
super(DayOfWeekSensor, self).__init__(*args, **kwargs)
self.week_day = week_day
self.use_task_execution_day = use_task_execution_day
if isinstance(self.week_day, six.string_types):
self._week_day_num = {WeekDay.get_weekday_number(week_day_str=self.week_day)}
elif isinstance(self.week_day, WeekDay):
self._week_day_num = {self.week_day}
elif isinstance(self.week_day, set):
if all(isinstance(day, six.string_types) for day in self.week_day):
self._week_day_num = {WeekDay.get_weekday_number(day) for day in week_day}
elif all(isinstance(day, WeekDay) for day in self.week_day):
self._week_day_num = self.week_day
else:
raise TypeError(
'Unsupported Type for week_day parameter: {}. It should be one of str'
', set or Weekday enum type'.format(type(week_day)))
def poke(self, context):
self.log.info('Poking until weekday is in %s, Today is %s',
self.week_day,
WeekDay(timezone.utcnow().isoweekday()).name)
if self.use_task_execution_day:
return context['execution_date'].isoweekday() in self._week_day_num
else:
return timezone.utcnow().isoweekday() in self._week_day_num
| apache-2.0 |
SRabbelier/Melange | thirdparty/google_appengine/lib/django_1_2/django/utils/encoding.py | 44 | 7087 | import types
import urllib
import locale
import datetime
import codecs
from decimal import Decimal
from django.utils.functional import Promise
class DjangoUnicodeDecodeError(UnicodeDecodeError):
def __init__(self, obj, *args):
self.obj = obj
UnicodeDecodeError.__init__(self, *args)
def __str__(self):
original = UnicodeDecodeError.__str__(self)
return '%s. You passed in %r (%s)' % (original, self.obj,
type(self.obj))
class StrAndUnicode(object):
"""
A class whose __str__ returns its __unicode__ as a UTF-8 bytestring.
Useful as a mix-in.
"""
def __str__(self):
return self.__unicode__().encode('utf-8')
def smart_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a unicode object representing 's'. Treats bytestrings using the
'encoding' codec.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if isinstance(s, Promise):
# The input is the result of a gettext_lazy() call.
return s
return force_unicode(s, encoding, strings_only, errors)
def is_protected_type(obj):
"""Determine if the object instance is of a protected type.
Objects of protected types are preserved as-is when passed to
force_unicode(strings_only=True).
"""
return isinstance(obj, (
types.NoneType,
int, long,
datetime.datetime, datetime.date, datetime.time,
float, Decimal)
)
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_unicode, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and is_protected_type(s):
return s
try:
if not isinstance(s, basestring,):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
try:
s = unicode(str(s), encoding, errors)
except UnicodeEncodeError:
if not isinstance(s, Exception):
raise
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII data without special
# handling to display as a string. We need to handle this
# without raising a further exception. We do an
# approximation to what the Exception's standard str()
# output should be.
s = ' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
elif not isinstance(s, unicode):
# Note: We use .decode() here, instead of unicode(s, encoding,
# errors), so that if s is a SafeString, it ends up being a
# SafeUnicode at the end.
s = s.decode(encoding, errors)
except UnicodeDecodeError, e:
if not isinstance(s, Exception):
raise DjangoUnicodeDecodeError(s, *e.args)
else:
# If we get to here, the caller has passed in an Exception
# subclass populated with non-ASCII bytestring data without a
# working unicode method. Try to handle this without raising a
# further exception by individually forcing the exception args
# to unicode.
s = ' '.join([force_unicode(arg, encoding, strings_only,
errors) for arg in s])
return s
def smart_str(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Returns a bytestring version of 's', encoded as specified in 'encoding'.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and isinstance(s, (types.NoneType, int)):
return s
if isinstance(s, Promise):
return unicode(s).encode(encoding, errors)
elif not isinstance(s, basestring):
try:
return str(s)
except UnicodeEncodeError:
if isinstance(s, Exception):
# An Exception subclass containing non-ASCII data that doesn't
# know how to print itself properly. We shouldn't raise a
# further exception.
return ' '.join([smart_str(arg, encoding, strings_only,
errors) for arg in s])
return unicode(s).encode(encoding, errors)
elif isinstance(s, unicode):
return s.encode(encoding, errors)
elif s and encoding != 'utf-8':
return s.decode('utf-8', errors).encode(encoding, errors)
else:
return s
def iri_to_uri(iri):
"""
Convert an Internationalized Resource Identifier (IRI) portion to a URI
portion that is suitable for inclusion in a URL.
This is the algorithm from section 3.1 of RFC 3987. However, since we are
assuming input is either UTF-8 or unicode already, we can simplify things a
little from the full method.
Returns an ASCII string containing the encoded result.
"""
# The list of safe characters here is constructed from the "reserved" and
# "unreserved" characters specified in sections 2.2 and 2.3 of RFC 3986:
# reserved = gen-delims / sub-delims
# gen-delims = ":" / "/" / "?" / "#" / "[" / "]" / "@"
# sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
# / "*" / "+" / "," / ";" / "="
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
# Of the unreserved characters, urllib.quote already considers all but
# the ~ safe.
# The % character is also added to the list of safe characters here, as the
# end of section 3.1 of RFC 3987 specifically mentions that % must not be
# converted.
if iri is None:
return iri
return urllib.quote(smart_str(iri), safe="/#%[]=:;$&()+,!?*@'~")
def filepath_to_uri(path):
"""Convert an file system path to a URI portion that is suitable for
inclusion in a URL.
We are assuming input is either UTF-8 or unicode already.
This method will encode certain chars that would normally be recognized as
special chars for URIs. Note that this method does not encode the '
character, as it is a valid character within URIs. See
encodeURIComponent() JavaScript function for more details.
Returns an ASCII string containing the encoded result.
"""
if path is None:
return path
# I know about `os.sep` and `os.altsep` but I want to leave
# some flexibility for hardcoding separators.
return urllib.quote(smart_str(path).replace("\\", "/"), safe="/~!*()'")
# The encoding of the default system locale but falls back to the
# given fallback encoding if the encoding is unsupported by python or could
# not be determined. See tickets #10335 and #5846
try:
DEFAULT_LOCALE_ENCODING = locale.getdefaultlocale()[1] or 'ascii'
codecs.lookup(DEFAULT_LOCALE_ENCODING)
except:
DEFAULT_LOCALE_ENCODING = 'ascii'
| apache-2.0 |
mgit-at/ansible | test/units/modules/network/onyx/test_onyx_config.py | 16 | 4592 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from ansible.modules.network.onyx import onyx_config
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
class TestOnyxConfigModule(TestOnyxModule):
module = onyx_config
def setUp(self):
super(TestOnyxConfigModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.onyx.onyx_config.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.onyx.onyx_config.load_config')
self.load_config = self.mock_load_config.start()
self.mock_run_commands = patch('ansible.modules.network.onyx.onyx_config.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestOnyxConfigModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None, transport='cli'):
config_file = 'onyx_config_config.cfg'
self.get_config.return_value = load_fixture(config_file)
self.load_config.return_value = None
def test_onyx_config_unchanged(self):
src = load_fixture('onyx_config_config.cfg')
set_module_args(dict(src=src))
self.execute_module()
def test_onyx_config_src(self):
src = load_fixture('onyx_config_src.cfg')
set_module_args(dict(src=src))
commands = [
'interface mlag-port-channel 2']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_backup(self):
set_module_args(dict(backup=True))
result = self.execute_module()
self.assertIn('__backup__', result)
def test_onyx_config_save(self):
set_module_args(dict(save='yes'))
self.execute_module(changed=True)
self.assertEqual(self.run_commands.call_count, 1)
self.assertEqual(self.get_config.call_count, 1)
self.assertEqual(self.load_config.call_count, 0)
args = self.run_commands.call_args[0][1]
self.assertIn('configuration write', args)
def test_onyx_config_lines_wo_parents(self):
set_module_args(dict(lines=['hostname foo']))
commands = ['hostname foo']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_before(self):
set_module_args(dict(lines=['hostname foo'], before=['test1', 'test2']))
commands = ['test1', 'test2', 'hostname foo']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_after(self):
set_module_args(dict(lines=['hostname foo'], after=['test1', 'test2']))
commands = ['hostname foo', 'test1', 'test2']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_before_after(self):
set_module_args(dict(lines=['hostname foo'],
before=['test1', 'test2'],
after=['test3', 'test4']))
commands = ['test1', 'test2', 'hostname foo', 'test3', 'test4']
self.execute_module(changed=True, commands=commands, sort=False, is_updates=True)
def test_onyx_config_config(self):
config = 'hostname localhost'
set_module_args(dict(lines=['hostname router'], config=config))
commands = ['hostname router']
self.execute_module(changed=True, commands=commands, is_updates=True)
def test_onyx_config_match_none(self):
lines = ['hostname router']
set_module_args(dict(lines=lines, match='none'))
self.execute_module(changed=True, commands=lines, is_updates=True)
| gpl-3.0 |
monopole/test-infra | gubernator/view_logs.py | 22 | 11302 | # Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import gcs_async
import log_parser
import kubelet_parser
import regex
import view_base
@view_base.memcache_memoize('log-file-junit://', expires=60*60*4)
def find_log_junit(build_dir, junit, log_file):
"""
Looks in build_dir for log_file in a folder that
also includes the junit file.
"""
tmps = [f.filename for f in view_base.gcs_ls('%s/artifacts' % build_dir)
if '/tmp-node' in f.filename]
for folder in tmps:
filenames = [f.filename for f in view_base.gcs_ls(folder)]
if folder + junit in filenames:
path = folder + log_file
if path in filenames:
return path
def find_log_files(all_logs, log_file):
"""
Returns list of files named log_file from values in all_logs
"""
log_files = []
for folder in all_logs.itervalues():
for log in folder:
if log_file in log:
log_files.append(log)
return log_files
@view_base.memcache_memoize('all-logs://', expires=60*60*4)
def get_all_logs(directory, artifacts):
"""
returns dictionary given the artifacts folder with the keys being the
folders, and the values being the log files within the corresponding folder
"""
log_files = {}
if artifacts:
dirs = [f.filename for f in view_base.gcs_ls('%s/artifacts' % directory)
if f.is_dir]
else:
dirs = [directory]
for d in dirs:
log_files[d] = []
for f in view_base.gcs_ls(d):
log_name = regex.log_re.search(f.filename)
if log_name:
log_files[d].append(f.filename)
return log_files
def parse_log_file(log_filename, pod, filters=None, make_dict=False, objref_dict=None):
"""Based on make_dict, either returns the objref_dict or the parsed log file"""
log = gcs_async.read(log_filename).get_result()
if log is None:
return {}, False if make_dict else None
if pod:
bold_re = regex.wordRE(pod)
else:
bold_re = regex.error_re
if objref_dict is None:
objref_dict = {}
if make_dict and pod:
return kubelet_parser.make_dict(log.decode('utf8', 'replace'), bold_re, objref_dict)
else:
return log_parser.digest(log.decode('utf8', 'replace'),
error_re=bold_re, filters=filters, objref_dict=objref_dict)
def get_logs_junit((log_files, pod_name, filters, objref_dict, apiserver_filename)):
# Get the logs in the case where the junit file with the failure is in a specific folder
all_logs = {}
results = {}
# default to filtering kube-apiserver log if user unchecks both checkboxes
if log_files == []:
log_files = [apiserver_filename]
artifact_filename = os.path.dirname(apiserver_filename)
all_logs = get_all_logs(artifact_filename, False)
parsed_dict, _ = parse_log_file(os.path.join(artifact_filename, "kubelet.log"),
pod_name, make_dict=True, objref_dict=objref_dict)
objref_dict.update(parsed_dict)
if log_files:
for log_file in log_files:
parsed_file = parse_log_file(log_file, pod_name, filters, objref_dict=objref_dict)
if parsed_file:
results[log_file] = parsed_file
return all_logs, results, objref_dict, log_files
def get_logs_no_pod(apiserver_filename, kubelet_filenames, filters, objref_dict, all_logs):
# Get results of parsing logs when no pod name is given
results = {}
if apiserver_filename:
for apiserver_log in apiserver_filename:
parsed_file = parse_log_file(apiserver_log, "", filters,
objref_dict=objref_dict)
if parsed_file:
results[apiserver_log] = parsed_file
return all_logs, results, objref_dict, apiserver_filename
else:
for kubelet_log in kubelet_filenames:
parsed_file = parse_log_file(kubelet_log, "", filters,
objref_dict=objref_dict)
if parsed_file:
results[kubelet_log] = parsed_file
return all_logs, results, objref_dict, kubelet_filenames
def get_logs(build_dir, log_files, pod_name, filters, objref_dict):
"""
Get the logs in the case where all logs in artifacts folder may be relevant
Returns:
all_logs: dictionary of all logs that can be filtered
results: dictionary of log file to the parsed text
obref_dict: dictionary of name of filter to the string to be filtered
log_files: list of files that are being displayed/filtered
"""
all_logs = {}
results = {}
old_dict_len = len(objref_dict)
all_logs = get_all_logs(build_dir, True)
apiserver_filename = find_log_files(all_logs, "kube-apiserver.log")
kubelet_filenames = find_log_files(all_logs, "kubelet.log")
if not pod_name and not objref_dict:
return get_logs_no_pod(apiserver_filename, kubelet_filenames, filters,
objref_dict, all_logs)
for kubelet_log in kubelet_filenames:
if pod_name:
parsed_dict, pod_in_file = parse_log_file(kubelet_log, pod_name, make_dict=True,
objref_dict=objref_dict)
objref_dict.update(parsed_dict)
if len(objref_dict) > old_dict_len or not pod_name or pod_in_file or not objref_dict:
if log_files == []:
log_files = [kubelet_log]
if apiserver_filename:
log_files.extend(apiserver_filename)
for log_file in log_files:
parsed_file = parse_log_file(log_file, pod_name, filters,
objref_dict=objref_dict)
if parsed_file:
results[log_file] = parsed_file
break
return all_logs, results, objref_dict, log_files
def get_woven_logs(log_files, pod, filters, objref_dict):
lines = []
combined_lines = []
first_combined = ""
pod_re = regex.wordRE(pod)
# Produce a list of lines of all the selected logs
for log_file in log_files:
log = gcs_async.read(log_file).get_result()
log = log.decode('utf8', 'replace')
lines.extend(log.split('\n'))
# Combine lines without timestamp into previous line, except if it comes at the
# beginning of the file, in which case add it to the line with the first timestamp
for line in lines:
timestamp_re = regex.timestamp(line)
if timestamp_re and timestamp_re.group(0):
if not combined_lines:
# add beginning of file to first timestamp line
line = first_combined + line
combined_lines.append(line)
else:
if not combined_lines:
first_combined = first_combined + line
else:
combined_lines[-1] = combined_lines[-1] + line
lines = sorted(combined_lines, key=regex.sub_timestamp)
data = '\n'.join(lines)
woven_logs = log_parser.digest(data, error_re=pod_re,
filters=filters, objref_dict=objref_dict)
return woven_logs
def parse_by_timestamp((build_dir, junit, log_files, pod, filters, objref_dict)):
"""
Returns:
woven_logs: HTML code of chosen logs woven together by timestamp
all_logs: Dictionary of logs relevant for filtering
"""
woven_logs = get_woven_logs(log_files, pod, filters, objref_dict)
apiserver_filename = find_log_junit(build_dir, junit, "kube-apiserver.log")
if apiserver_filename:
artifact_filename = re.sub("/kube-apiserver.log", "", apiserver_filename)
all_logs = get_all_logs(artifact_filename, False)
if not apiserver_filename:
all_logs = get_all_logs(build_dir, True)
return woven_logs, all_logs
class NodeLogHandler(view_base.BaseHandler):
def get(self, prefix, job, build):
"""
Examples of variables
log_files: ["kubelet.log", "kube-apiserver.log"]
pod_name: "pod-abcdef123"
junit: "junit_01.xml"
uid, namespace, wrap: "on"
cID, poduid, ns: strings entered into textboxes
results, logs: {"kubelet.log":"parsed kubelet log for html"}
all_logs: {"folder_name":["a.log", "b.log"]}
"""
# pylint: disable=too-many-locals
job_dir = '/%s/%s/' % (prefix, job)
build_dir = job_dir + build
log_files = self.request.get_all("logfiles")
others = self.request.get_all("others")
pod_name = self.request.get("pod")
junit = self.request.get("junit")
cID = self.request.get("cID")
poduid = self.request.get("poduid")
ns = self.request.get("ns")
uid = bool(self.request.get("UID"))
namespace = bool(self.request.get("Namespace"))
containerID = bool(self.request.get("ContainerID"))
wrap = bool(self.request.get("wrap"))
weave = bool(self.request.get("weave"))
filters = {"UID":uid, "pod":pod_name, "Namespace":namespace, "ContainerID":containerID}
objref_dict = {}
results = {}
woven_logs = ""
for idx, filter_term in enumerate(others):
filters["other%d" % idx] = filter_term
objref_dict["other%d" % idx] = filter_term
if cID:
objref_dict["ContainerID"] = cID
if poduid:
objref_dict["UID"] = poduid
if ns:
objref_dict["Namespace"] = ns
apiserver_filename = find_log_junit(build_dir, junit, "kube-apiserver.log")
if not weave or len(log_files) == 1:
weave = False
if apiserver_filename and pod_name:
all_logs, results, objref_dict, log_files = get_logs_junit((log_files,
pod_name, filters, objref_dict, apiserver_filename))
if not apiserver_filename:
all_logs, results, objref_dict, log_files = get_logs(build_dir, log_files,
pod_name, filters, objref_dict)
else:
woven_logs, all_logs = parse_by_timestamp((build_dir, junit, log_files, pod_name,
filters, objref_dict))
if (not weave and results == {}) or (weave and woven_logs == ""):
self.render('node_404.html', {"build_dir": build_dir, "log_files": log_files,
"pod_name":pod_name, "junit":junit})
self.response.set_status(404)
return
self.render('filtered_log.html', dict(
job_dir=job_dir, build_dir=build_dir, logs=results, job=job,
build=build, log_files=log_files, containerID=containerID, others=others,
pod=pod_name, junit=junit, uid=uid, namespace=namespace, weave=weave,
wrap=wrap, objref_dict=objref_dict, all_logs=all_logs, woven_logs=woven_logs))
| apache-2.0 |
ProfessionalIT/professionalit-webiste | sdk/google_appengine/lib/django-1.5/django/http/multipartparser.py | 82 | 22856 | """
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
from __future__ import unicode_literals
import base64
import cgi
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_text
from django.utils import six
from django.utils.text import unescape_entities
from django.core.files.uploadhandler import StopUpload, SkipFile, StopFutureHandlers
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
class MultiPartParser(object):
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``.
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handler:
An UploadHandler instance that performs operations on the uploaded
data.
:encoding:
The encoding with which to treat the incoming data.
"""
#
# Content-Type should containt multipart and the boundary information.
#
content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type.encode('ascii'))
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
# Content-Length should contain the length of the body we are about
# to receive.
try:
content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH', 0)))
except (ValueError, TypeError):
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
if isinstance(boundary, six.text_type):
boundary = boundary.encode('ascii')
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2**31-4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Returns a tuple containing the POST and FILES dictionary, respectively.
"""
# We have to import QueryDict down here to avoid a circular import.
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict('', encoding=self._encoding), MultiValueDict()
# See if the handler will want to take care of the parsing.
# This allows overriding everything if somebody wants it.
for handler in handlers:
result = handler.handle_raw_input(self._input_data,
self._meta,
self._content_length,
self._boundary,
encoding)
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict('', mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(self._input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
if transfer_encoding is not None:
transfer_encoding = transfer_encoding[0].strip()
field_name = force_text(field_name, encoding, errors='replace')
if item_type == FIELD:
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read()
try:
data = str(raw_data).decode('base64')
except:
data = raw_data
else:
data = field_stream.read()
self._post.appendlist(field_name,
force_text(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if not file_name:
continue
file_name = force_text(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
content_type = meta_data.get('content-type', ('',))[0].strip()
try:
charset = meta_data.get('content-type', (0, {}))[1].get('charset', None)
except:
charset = None
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(field_name, file_name,
content_type, content_length,
charset)
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
# We should always read base64 streams by multiple of 4
over_bytes = len(chunk) % 4
if over_bytes:
over_chunk = field_stream.read(4 - over_bytes)
chunk += over_chunk
try:
chunk = base64.b64decode(chunk)
except Exception as e:
# Since this is only a chunk, any error is an unfixable error.
raise MultiPartParserError("Could not decode base64 data: %r" % e)
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk,
counters[i])
counters[i] += chunk_length
if chunk is None:
# If the chunk received by the handler is None, then don't continue.
break
except SkipFile:
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload as e:
if not e.connection_reset:
exhaust(self._input_data)
else:
# Make sure that the request data is all fed
exhaust(self._input_data)
# Signal that the upload has completed.
for handler in handlers:
retval = handler.upload_complete()
if retval:
break
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signalling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(force_text(old_field_name,
self._encoding,
errors='replace'),
file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\")+1:].strip()
class LazyStream(six.Iterator):
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = b''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = (size is not None and [size] or [self._remaining])[0]
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield b''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
chunk = next(self)
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = b''.join(parts())
return out
def __next__(self):
"""
Used when the exact number of bytes to read is unimportant.
This procedure just returns whatever is chunk is conveniently returned
from the iterator instead. Useful to avoid unnecessary bookkeeping if
performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = b''
else:
output = next(self._producer)
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replaces the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Places bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = b''.join([bytes, self._leftover])
def _update_unget_history(self, num_bytes):
"""
Updates the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([current_number for current_number in self._unget_history
if current_number == num_bytes])
if number_equal > 40:
raise SuspiciousOperation(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter(six.Iterator):
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, this object will yield chunks of read operations from that
object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def __next__(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class InterBoundaryIter(six.Iterator):
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def __next__(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter(six.Iterator):
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
try:
from mx.TextTools import FS
self._fs = FS(boundary).find
except ImportError:
self._fs = lambda data: data.find(boundary)
def __iter__(self):
return self
def __next__(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = b''.join(chunks)
boundary = self._find_boundary(chunk, len(chunk) < self._rollback)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we dont treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]:# and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data, eof = False):
"""
Finds a multipart boundary in data.
Should no boundry exist in the data None is returned instead. Otherwise
a tuple containing the indices of the following are returned:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = self._fs(data)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
last = max(0, end-1)
if data[last:last+1] == b'\n':
end -= 1
last = max(0, end-1)
if data[last:last+1] == b'\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""
Completely exhausts an iterator or stream.
Raise a MultiPartParserError if the argument is not a stream or an iterable.
"""
iterator = None
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
if iterator is None:
raise MultiPartParserError('multipartparser.exhaust() was passed a non-iterable or stream parameter')
for __ in iterator:
pass
def parse_boundary_stream(stream, max_header_size):
"""
Parses one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find(b'\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split(b'\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser(object):
def __init__(self, stream, boundary):
self._stream = stream
self._separator = b'--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
""" Parse the header into a key-value.
Input (line): bytes, output: unicode for key/name, bytes for value which
will be decoded later
"""
plist = _parse_header_params(b';' + line)
key = plist.pop(0).lower().decode('ascii')
pdict = {}
for p in plist:
i = p.find(b'=')
if i >= 0:
name = p[:i].strip().lower().decode('ascii')
value = p[i+1:].strip()
if len(value) >= 2 and value[:1] == value[-1:] == b'"':
value = value[1:-1]
value = value.replace(b'\\\\', b'\\').replace(b'\\"', b'"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == b';':
s = s[1:]
end = s.find(b';')
while end > 0 and s.count(b'"', 0, end) % 2:
end = s.find(b';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
| lgpl-3.0 |
KohlsTechnology/ansible | lib/ansible/modules/inventory/group_by.py | 67 | 1457 | # -*- mode: python -*-
#
# Copyright: Ansible Team
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: group_by
short_description: Create Ansible groups based on facts
description:
- Use facts to create ad-hoc groups that can be used later in a playbook.
- This module is also supported for Windows targets.
version_added: "0.9"
options:
key:
description:
- The variables whose values will be used as groups
required: true
parents:
description:
- The list of the parent groups
required: false
default: "all"
version_added: "2.4"
author: "Jeroen Hoekx (@jhoekx)"
notes:
- Spaces in group names are converted to dashes '-'.
- This module is also supported for Windows targets.
'''
EXAMPLES = '''
# Create groups based on the machine architecture
- group_by:
key: machine_{{ ansible_machine }}
# Create groups like 'kvm-host'
- group_by:
key: virt_{{ ansible_virtualization_type }}_{{ ansible_virtualization_role }}
# Create nested groups
- group_by:
key: el{{ ansible_distribution_major_version }}-{{ ansible_architecture }}
parents:
- el{{ ansible_distribution_major_version }}
'''
| gpl-3.0 |
DavidHerzogTU-Berlin/cassandraToRun | test/system/test_thrift_server.py | 1 | 103505 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# to run a single test, run from trunk/:
# PYTHONPATH=test nosetests --tests=system.test_thrift_server:TestMutations.test_empty_range
import os, sys, time, struct, uuid, re
from . import root, ThriftTester
from . import thrift_client as client
from thrift.Thrift import TApplicationException
from ttypes import *
from constants import VERSION
def _i64(n):
return struct.pack('>q', n) # big endian = network order
_SIMPLE_COLUMNS = [Column('c1', 'value1', 0),
Column('c2', 'value2', 0)]
_SUPER_COLUMNS = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 0),
Column(_i64(6), 'value6', 0)])]
def _assert_column(column_family, key, column, value, ts = 0):
try:
assert client.get(key, ColumnPath(column_family, column=column), ConsistencyLevel.ONE).column == Column(column, value, ts)
except NotFoundException:
raise Exception('expected %s:%s:%s:%s, but was not present' % (column_family, key, column, value) )
def _assert_columnpath_exists(key, column_path):
try:
assert client.get(key, column_path, ConsistencyLevel.ONE)
except NotFoundException:
raise Exception('expected %s with %s but was not present.' % (key, column_path) )
def _assert_no_columnpath(key, column_path):
try:
client.get(key, column_path, ConsistencyLevel.ONE)
assert False, ('columnpath %s existed in %s when it should not' % (column_path, key))
except NotFoundException:
assert True, 'column did not exist'
def _insert_simple(block=True):
return _insert_multi(['key1'])
def _insert_batch(block):
return _insert_multi_batch(['key1'], block)
def _insert_multi(keys):
CL = ConsistencyLevel.ONE
for key in keys:
client.insert(key, ColumnParent('Standard1'), Column('c1', 'value1', 0), CL)
client.insert(key, ColumnParent('Standard1'), Column('c2', 'value2', 0), CL)
def _insert_multi_batch(keys, block):
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS],
'Standard2': [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]}
for key in keys:
client.batch_mutate({key: cfmap}, ConsistencyLevel.ONE)
def _big_slice(key, column_parent):
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
return client.get_slice(key, column_parent, p, ConsistencyLevel.ONE)
def _big_multislice(keys, column_parent):
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
return client.multiget_slice(keys, column_parent, p, ConsistencyLevel.ONE)
def _verify_batch():
_verify_simple()
L = [result.column
for result in _big_slice('key1', ColumnParent('Standard2'))]
assert L == _SIMPLE_COLUMNS, L
def _verify_simple():
assert client.get('key1', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE).column == Column('c1', 'value1', 0)
L = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert L == _SIMPLE_COLUMNS, L
def _insert_super(key='key1'):
client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', 'sc2'), Column(_i64(6), 'value6', 0), ConsistencyLevel.ONE)
time.sleep(0.1)
def _insert_range():
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c2', 'value2', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
time.sleep(0.1)
def _insert_counter_range():
client.add('key1', ColumnParent('Counter1'), CounterColumn('c1', 1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('Counter1'), CounterColumn('c2', 2), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('Counter1'), CounterColumn('c3', 3), ConsistencyLevel.ONE)
time.sleep(0.1)
def _verify_range():
p = SlicePredicate(slice_range=SliceRange('c1', 'c2', False, 1000))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == 'c1'
assert result[1].column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('c3', 'c2', True, 1000))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == 'c3'
assert result[1].column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 1000))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 3, result
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 2))
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2, result
def _verify_counter_range():
p = SlicePredicate(slice_range=SliceRange('c1', 'c2', False, 1000))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_column.name == 'c1'
assert result[1].counter_column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('c3', 'c2', True, 1000))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_column.name == 'c3'
assert result[1].counter_column.name == 'c2'
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 1000))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 3, result
p = SlicePredicate(slice_range=SliceRange('a', 'z', False, 2))
result = client.get_slice('key1', ColumnParent('Counter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2, result
def _set_keyspace(keyspace):
client.set_keyspace(keyspace)
def _insert_super_range():
client.insert('key1', ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(6), 'value6', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Super1', 'sc3'), Column(_i64(7), 'value7', 0), ConsistencyLevel.ONE)
time.sleep(0.1)
def _insert_counter_super_range():
client.add('key1', ColumnParent('SuperCounter1', 'sc1'), CounterColumn(_i64(4), 4), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('SuperCounter1', 'sc2'), CounterColumn(_i64(5), 5), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('SuperCounter1', 'sc2'), CounterColumn(_i64(6), 6), ConsistencyLevel.ONE)
client.add('key1', ColumnParent('SuperCounter1', 'sc3'), CounterColumn(_i64(7), 7), ConsistencyLevel.ONE)
time.sleep(0.1)
def _verify_super_range():
p = SlicePredicate(slice_range=SliceRange('sc2', 'sc3', False, 2))
result = client.get_slice('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].super_column.name == 'sc2'
assert result[1].super_column.name == 'sc3'
p = SlicePredicate(slice_range=SliceRange('sc3', 'sc2', True, 2))
result = client.get_slice('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].super_column.name == 'sc3'
assert result[1].super_column.name == 'sc2'
def _verify_counter_super_range():
p = SlicePredicate(slice_range=SliceRange('sc2', 'sc3', False, 2))
result = client.get_slice('key1', ColumnParent('SuperCounter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_super_column.name == 'sc2'
assert result[1].counter_super_column.name == 'sc3'
p = SlicePredicate(slice_range=SliceRange('sc3', 'sc2', True, 2))
result = client.get_slice('key1', ColumnParent('SuperCounter1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].counter_super_column.name == 'sc3'
assert result[1].counter_super_column.name == 'sc2'
def _verify_super(supercf='Super1', key='key1'):
assert client.get(key, ColumnPath(supercf, 'sc1', _i64(4)), ConsistencyLevel.ONE).column == Column(_i64(4), 'value4', 0)
slice = [result.super_column
for result in _big_slice(key, ColumnParent('Super1'))]
assert slice == _SUPER_COLUMNS, slice
def _expect_exception(fn, type_):
try:
r = fn()
except type_, t:
return t
else:
raise Exception('expected %s; got %s' % (type_.__name__, r))
def _expect_missing(fn):
_expect_exception(fn, NotFoundException)
def get_range_slice(client, parent, predicate, start, end, count, cl, row_filter=None):
kr = KeyRange(start, end, count=count, row_filter=row_filter)
return client.get_range_slices(parent, predicate, kr, cl)
class TestMutations(ThriftTester):
def test_insert(self):
_set_keyspace('Keyspace1')
_insert_simple(False)
time.sleep(0.1)
_verify_simple()
def test_empty_slice(self):
_set_keyspace('Keyspace1')
assert _big_slice('key1', ColumnParent('Standard2')) == []
assert _big_slice('key1', ColumnParent('Super1')) == []
def test_cas(self):
_set_keyspace('Keyspace1')
def cas(expected, updates):
return client.cas('key1', 'Standard1', expected, updates, ConsistencyLevel.ONE)
cas_result = cas(_SIMPLE_COLUMNS, _SIMPLE_COLUMNS)
assert not cas_result.success
assert len(cas_result.current_values) == 0, cas_result
assert cas([], _SIMPLE_COLUMNS).success
result = [cosc.column for cosc in _big_slice('key1', ColumnParent('Standard1'))]
# CAS will use its own timestamp, so we can't just compare result == _SIMPLE_COLUMNS
cas_result = cas([], _SIMPLE_COLUMNS)
assert not cas_result.success
# When we CAS for non-existence, current_values is the first live column of the row
assert dict((c.name, c.value) for c in cas_result.current_values) == { _SIMPLE_COLUMNS[0].name : _SIMPLE_COLUMNS[0].value }, cas_result
# CL.SERIAL for reads
assert client.get('key1', ColumnPath('Standard1', column='c1'), ConsistencyLevel.SERIAL).column.value == 'value1'
def test_missing_super(self):
_set_keyspace('Keyspace1')
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1', _i64(1)), ConsistencyLevel.ONE))
_insert_super()
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1', _i64(1)), ConsistencyLevel.ONE))
def test_count(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
assert client.get_count('key1', ColumnParent('Standard2'), p, ConsistencyLevel.ONE) == 0
assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 2
assert client.get_count('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE) == 2
assert client.get_count('key1', ColumnParent('Super1'), p, ConsistencyLevel.ONE) == 2
# Let's make that a little more interesting
client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c4', 'value4', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('c5', 'value5', 0), ConsistencyLevel.ONE)
p = SlicePredicate(slice_range=SliceRange('c2', 'c4', False, 1000))
assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 3
def test_count_paging(self):
_set_keyspace('Keyspace1')
_insert_simple()
# Exercise paging
column_parent = ColumnParent('Standard1')
super_column_parent = ColumnParent('Super1', 'sc3')
# Paging for small columns starts at 1024 columns
columns_to_insert = [Column('c%d' % (i,), 'value%d' % (i,), 0) for i in xrange(3, 1026)]
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(c)) for c in columns_to_insert]}
client.batch_mutate({'key1' : cfmap }, ConsistencyLevel.ONE)
p = SlicePredicate(slice_range=SliceRange('', '', False, 2000))
assert client.get_count('key1', column_parent, p, ConsistencyLevel.ONE) == 1025
# Ensure that the count limit isn't clobbered
p = SlicePredicate(slice_range=SliceRange('', '', False, 10))
assert client.get_count('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE) == 10
# test get_count() to work correctly with 'count' settings around page size (CASSANDRA-4833)
def test_count_around_page_size(self):
def slice_predicate(count):
return SlicePredicate(slice_range=SliceRange('', '', False, count))
_set_keyspace('Keyspace1')
key = 'key1'
parent = ColumnParent('Standard1')
cl = ConsistencyLevel.ONE
for i in xrange(0, 3050):
client.insert(key, parent, Column(str(i), '', 0), cl)
# same as page size
assert client.get_count(key, parent, slice_predicate(1024), cl) == 1024
# 1 above page size
assert client.get_count(key, parent, slice_predicate(1025), cl) == 1025
# above number or columns
assert client.get_count(key, parent, slice_predicate(4000), cl) == 3050
# same as number of columns
assert client.get_count(key, parent, slice_predicate(3050), cl) == 3050
# 1 above number of columns
assert client.get_count(key, parent, slice_predicate(3051), cl) == 3050
def test_insert_blocking(self):
_set_keyspace('Keyspace1')
_insert_simple()
_verify_simple()
def test_super_insert(self):
_set_keyspace('Keyspace1')
_insert_super()
_verify_super()
def test_super_get(self):
_set_keyspace('Keyspace1')
_insert_super()
result = client.get('key1', ColumnPath('Super1', 'sc2'), ConsistencyLevel.ONE).super_column
assert result == _SUPER_COLUMNS[1], result
def test_super_subcolumn_limit(self):
_set_keyspace('Keyspace1')
_insert_super()
p = SlicePredicate(slice_range=SliceRange('', '', False, 1))
column_parent = ColumnParent('Super1', 'sc2')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(5), 'value5', 0)], slice
p = SlicePredicate(slice_range=SliceRange('', '', True, 1))
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(6), 'value6', 0)], slice
def test_long_order(self):
_set_keyspace('Keyspace1')
def long_xrange(start, stop, step):
i = start
while i < stop:
yield i
i += step
L = []
for i in long_xrange(0, 104294967296, 429496729):
name = _i64(i)
client.insert('key1', ColumnParent('StandardLong1'), Column(name, 'v', 0), ConsistencyLevel.ONE)
L.append(name)
slice = [result.column.name for result in _big_slice('key1', ColumnParent('StandardLong1'))]
assert slice == L, slice
def test_integer_order(self):
_set_keyspace('Keyspace1')
def long_xrange(start, stop, step):
i = start
while i >= stop:
yield i
i -= step
L = []
for i in long_xrange(104294967296, 0, 429496729):
name = _i64(i)
client.insert('key1', ColumnParent('StandardInteger1'), Column(name, 'v', 0), ConsistencyLevel.ONE)
L.append(name)
slice = [result.column.name for result in _big_slice('key1', ColumnParent('StandardInteger1'))]
L.sort()
assert slice == L, slice
def test_time_uuid(self):
import uuid
L = []
_set_keyspace('Keyspace2')
# 100 isn't enough to fail reliably if the comparator is borked
for i in xrange(500):
L.append(uuid.uuid1())
client.insert('key1', ColumnParent('Super4', 'sc1'), Column(L[-1].bytes, 'value%s' % i, i), ConsistencyLevel.ONE)
slice = _big_slice('key1', ColumnParent('Super4', 'sc1'))
assert len(slice) == 500, len(slice)
for i in xrange(500):
u = slice[i].column
assert u.value == 'value%s' % i
assert u.name == L[i].bytes
p = SlicePredicate(slice_range=SliceRange('', '', True, 1))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[-1].bytes, 'value499', 499)], slice
p = SlicePredicate(slice_range=SliceRange('', L[2].bytes, False, 1000))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[0].bytes, 'value0', 0),
Column(L[1].bytes, 'value1', 1),
Column(L[2].bytes, 'value2', 2)], slice
p = SlicePredicate(slice_range=SliceRange(L[2].bytes, '', True, 1000))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[2].bytes, 'value2', 2),
Column(L[1].bytes, 'value1', 1),
Column(L[0].bytes, 'value0', 0)], slice
p = SlicePredicate(slice_range=SliceRange(L[2].bytes, '', False, 1))
column_parent = ColumnParent('Super4', 'sc1')
slice = [result.column
for result in client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE)]
assert slice == [Column(L[2].bytes, 'value2', 2)], slice
def test_long_remove(self):
column_parent = ColumnParent('StandardLong1')
sp = SlicePredicate(slice_range=SliceRange('', '', False, 1))
_set_keyspace('Keyspace1')
for i in xrange(10):
parent = ColumnParent('StandardLong1')
client.insert('key1', parent, Column(_i64(i), 'value1', 10 * i), ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('StandardLong1'), 10 * i + 1, ConsistencyLevel.ONE)
slice = client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)
assert slice == [], slice
# resurrect
client.insert('key1', parent, Column(_i64(i), 'value2', 10 * i + 2), ConsistencyLevel.ONE)
slice = [result.column
for result in client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(i), 'value2', 10 * i + 2)], (slice, i)
def test_integer_remove(self):
column_parent = ColumnParent('StandardInteger1')
sp = SlicePredicate(slice_range=SliceRange('', '', False, 1))
_set_keyspace('Keyspace1')
for i in xrange(10):
parent = ColumnParent('StandardInteger1')
client.insert('key1', parent, Column(_i64(i), 'value1', 10 * i), ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('StandardInteger1'), 10 * i + 1, ConsistencyLevel.ONE)
slice = client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)
assert slice == [], slice
# resurrect
client.insert('key1', parent, Column(_i64(i), 'value2', 10 * i + 2), ConsistencyLevel.ONE)
slice = [result.column
for result in client.get_slice('key1', column_parent, sp, ConsistencyLevel.ONE)]
assert slice == [Column(_i64(i), 'value2', 10 * i + 2)], (slice, i)
def test_batch_insert(self):
_set_keyspace('Keyspace1')
_insert_batch(False)
time.sleep(0.1)
_verify_batch()
def test_batch_insert_blocking(self):
_set_keyspace('Keyspace1')
_insert_batch(True)
_verify_batch()
def test_batch_mutate_standard_columns(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(27,32)]
mutations = [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for key in keys:
_assert_column(column_family, key, 'c1', 'value1')
def test_batch_mutate_standard_columns_blocking(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(38,46)]
mutations = [Mutation(ColumnOrSuperColumn(c)) for c in _SIMPLE_COLUMNS]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for key in keys:
_assert_column(column_family, key, 'c1', 'value1')
def test_batch_mutate_remove_standard_columns(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(11,21)]
_insert_multi(keys)
mutations = [Mutation(deletion=Deletion(20, predicate=SlicePredicate(column_names=[c.name]))) for c in _SIMPLE_COLUMNS]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for c in _SIMPLE_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, column=c.name))
def test_batch_mutate_remove_standard_row(self):
_set_keyspace('Keyspace1')
column_families = ['Standard1', 'Standard2']
keys = ['key_%d' % i for i in range(11,21)]
_insert_multi(keys)
mutations = [Mutation(deletion=Deletion(20))]
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for c in _SIMPLE_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, column=c.name))
def test_batch_mutate_remove_super_columns_with_standard_under(self):
_set_keyspace('Keyspace1')
column_families = ['Super1', 'Super2']
keys = ['key_%d' % i for i in range(11,21)]
_insert_super()
mutations = []
for sc in _SUPER_COLUMNS:
names = []
for c in sc.columns:
names.append(c.name)
mutations.append(Mutation(deletion=Deletion(20, super_column=c.name, predicate=SlicePredicate(column_names=names))))
mutation_map = dict((column_family, mutations) for column_family in column_families)
keyed_mutations = dict((key, mutation_map) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for column_family in column_families:
for sc in _SUPER_COLUMNS:
for c in sc.columns:
for key in keys:
_assert_no_columnpath(key, ColumnPath(column_family, super_column=sc.name, column=c.name))
def test_batch_mutate_remove_super_columns_with_none_given_underneath(self):
_set_keyspace('Keyspace1')
keys = ['key_%d' % i for i in range(17,21)]
for key in keys:
_insert_super(key)
mutations = []
for sc in _SUPER_COLUMNS:
mutations.append(Mutation(deletion=Deletion(20,
super_column=sc.name)))
mutation_map = {'Super1': mutations}
keyed_mutations = dict((key, mutation_map) for key in keys)
# Sanity check
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column=sc.name))
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for sc in _SUPER_COLUMNS:
for c in sc.columns:
for key in keys:
_assert_no_columnpath(key, ColumnPath('Super1', super_column=sc.name))
def test_batch_mutate_remove_super_columns_entire_row(self):
_set_keyspace('Keyspace1')
keys = ['key_%d' % i for i in range(17,21)]
for key in keys:
_insert_super(key)
mutations = []
mutations.append(Mutation(deletion=Deletion(20)))
mutation_map = {'Super1': mutations}
keyed_mutations = dict((key, mutation_map) for key in keys)
# Sanity check
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column=sc.name))
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for sc in _SUPER_COLUMNS:
for key in keys:
_assert_no_columnpath(key, ColumnPath('Super1', super_column=sc.name))
def test_batch_mutate_insertions_and_deletions(self):
_set_keyspace('Keyspace1')
first_insert = SuperColumn("sc1",
columns=[Column(_i64(20), 'value20', 3),
Column(_i64(21), 'value21', 3)])
second_insert = SuperColumn("sc1",
columns=[Column(_i64(20), 'value20', 3),
Column(_i64(21), 'value21', 3)])
first_deletion = {'super_column': "sc1",
'predicate': SlicePredicate(column_names=[_i64(22), _i64(23)])}
second_deletion = {'super_column': "sc2",
'predicate': SlicePredicate(column_names=[_i64(22), _i64(23)])}
keys = ['key_30', 'key_31']
for key in keys:
sc = SuperColumn('sc1',[Column(_i64(22), 'value22', 0),
Column(_i64(23), 'value23', 0)])
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=sc))]}
client.batch_mutate({key: cfmap}, ConsistencyLevel.ONE)
sc2 = SuperColumn('sc2', [Column(_i64(22), 'value22', 0),
Column(_i64(23), 'value23', 0)])
cfmap2 = {'Super2': [Mutation(ColumnOrSuperColumn(super_column=sc2))]}
client.batch_mutate({key: cfmap2}, ConsistencyLevel.ONE)
cfmap3 = {
'Super1' : [Mutation(ColumnOrSuperColumn(super_column=first_insert)),
Mutation(deletion=Deletion(3, **first_deletion))],
'Super2' : [Mutation(deletion=Deletion(2, **second_deletion)),
Mutation(ColumnOrSuperColumn(super_column=second_insert))]
}
keyed_mutations = dict((key, cfmap3) for key in keys)
client.batch_mutate(keyed_mutations, ConsistencyLevel.ONE)
for key in keys:
for c in [_i64(22), _i64(23)]:
_assert_no_columnpath(key, ColumnPath('Super1', super_column='sc1', column=c))
_assert_no_columnpath(key, ColumnPath('Super2', super_column='sc2', column=c))
for c in [_i64(20), _i64(21)]:
_assert_columnpath_exists(key, ColumnPath('Super1', super_column='sc1', column=c))
_assert_columnpath_exists(key, ColumnPath('Super2', super_column='sc1', column=c))
def test_bad_system_calls(self):
def duplicate_index_names():
_set_keyspace('Keyspace1')
cd1 = ColumnDef('foo', 'BytesType', IndexType.KEYS, 'i')
cd2 = ColumnDef('bar', 'BytesType', IndexType.KEYS, 'i')
cf = CfDef('Keyspace1', 'BadCF', column_metadata=[cd1, cd2])
client.system_add_column_family(cf)
_expect_exception(duplicate_index_names, InvalidRequestException)
def test_bad_batch_calls(self):
# mutate_does_not_accept_cosc_and_deletion_in_same_mutation
def too_full():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column("foo", 'bar', 0))
dele = Deletion(2, predicate=SlicePredicate(column_names=['baz']))
client.batch_mutate({'key_34': {'Standard1': [Mutation(col, dele)]}},
ConsistencyLevel.ONE)
_expect_exception(too_full, InvalidRequestException)
# test_batch_mutate_does_not_yet_accept_slice_ranges
def send_range():
_set_keyspace('Keyspace1')
sp = SlicePredicate(slice_range=SliceRange(start='0', finish="", count=10))
d = Deletion(2, predicate=sp)
client.batch_mutate({'key_35': {'Standard1':[Mutation(deletion=d)]}},
ConsistencyLevel.ONE)
_expect_exception(send_range, InvalidRequestException)
# test_batch_mutate_does_not_accept_cosc_on_undefined_cf:
def bad_cf():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column("foo", 'bar', 0))
client.batch_mutate({'key_36': {'Undefined': [Mutation(col)]}},
ConsistencyLevel.ONE)
_expect_exception(bad_cf, InvalidRequestException)
# test_batch_mutate_does_not_accept_deletion_on_undefined_cf
def bad_cf():
_set_keyspace('Keyspace1')
d = Deletion(2, predicate=SlicePredicate(column_names=['baz']))
client.batch_mutate({'key_37': {'Undefined':[Mutation(deletion=d)]}},
ConsistencyLevel.ONE)
_expect_exception(bad_cf, InvalidRequestException)
# a column value that does not match the declared validator
def send_string_instead_of_long():
_set_keyspace('Keyspace1')
col = ColumnOrSuperColumn(column=Column('birthdate', 'bar', 0))
client.batch_mutate({'key_38': {'Indexed1': [Mutation(col)]}},
ConsistencyLevel.ONE)
_expect_exception(send_string_instead_of_long, InvalidRequestException)
def test_column_name_lengths(self):
_set_keyspace('Keyspace1')
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), Column('', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
client.insert('key1', ColumnParent('Standard1'), Column('x'*1, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*127, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*128, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*129, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*255, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*256, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*257, 'value', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Standard1'), Column('x'*(2**16 - 1), 'value', 0), ConsistencyLevel.ONE)
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), Column('x'*(2**16), 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
def test_bad_calls(self):
_set_keyspace('Keyspace1')
# missing arguments
_expect_exception(lambda: client.insert(None, None, None, None), TApplicationException)
# supercolumn in a non-super CF
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1', 'x'), Column('y', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
# no supercolumn in a super CF
_expect_exception(lambda: client.insert('key1', ColumnParent('Super1'), Column('y', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
# column but no supercolumn in remove
_expect_exception(lambda: client.remove('key1', ColumnPath('Super1', column='x'), 0, ConsistencyLevel.ONE), InvalidRequestException)
# super column in non-super CF
_expect_exception(lambda: client.remove('key1', ColumnPath('Standard1', 'y', 'x'), 0, ConsistencyLevel.ONE), InvalidRequestException)
# key too long
_expect_exception(lambda: client.get('x' * 2**16, ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE), InvalidRequestException)
# empty key
_expect_exception(lambda: client.get('', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE), InvalidRequestException)
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c)) for c in _SUPER_COLUMNS],
'Super2': [Mutation(ColumnOrSuperColumn(super_column=c)) for c in _SUPER_COLUMNS]}
_expect_exception(lambda: client.batch_mutate({'': cfmap}, ConsistencyLevel.ONE), InvalidRequestException)
# empty column name
_expect_exception(lambda: client.get('key1', ColumnPath('Standard1', column=''), ConsistencyLevel.ONE), InvalidRequestException)
# get doesn't specify column name
_expect_exception(lambda: client.get('key1', ColumnPath('Standard1'), ConsistencyLevel.ONE), InvalidRequestException)
# supercolumn in a non-super CF
_expect_exception(lambda: client.get('key1', ColumnPath('Standard1', 'x', 'y'), ConsistencyLevel.ONE), InvalidRequestException)
# get doesn't specify supercolumn name
_expect_exception(lambda: client.get('key1', ColumnPath('Super1'), ConsistencyLevel.ONE), InvalidRequestException)
# invalid CF
_expect_exception(lambda: get_range_slice(client, ColumnParent('S'), SlicePredicate(column_names=['', '']), '', '', 5, ConsistencyLevel.ONE), InvalidRequestException)
# 'x' is not a valid Long
_expect_exception(lambda: client.insert('key1', ColumnParent('Super1', 'sc1'), Column('x', 'value', 0), ConsistencyLevel.ONE), InvalidRequestException)
# start is not a valid Long
p = SlicePredicate(slice_range=SliceRange('x', '', False, 1))
column_parent = ColumnParent('StandardLong1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish
p = SlicePredicate(slice_range=SliceRange(_i64(10), _i64(0), False, 1))
column_parent = ColumnParent('StandardLong1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start is not a valid Long, supercolumn version
p = SlicePredicate(slice_range=SliceRange('x', '', False, 1))
column_parent = ColumnParent('Super1', 'sc1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish, supercolumn version
p = SlicePredicate(slice_range=SliceRange(_i64(10), _i64(0), False, 1))
column_parent = ColumnParent('Super1', 'sc1')
_expect_exception(lambda: client.get_slice('key1', column_parent, p, ConsistencyLevel.ONE),
InvalidRequestException)
# start > finish, key version
_expect_exception(lambda: get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['']), 'z', 'a', 1, ConsistencyLevel.ONE), InvalidRequestException)
# ttl must be positive
column = Column('cttl1', 'value1', 0, 0)
_expect_exception(lambda: client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE),
InvalidRequestException)
# don't allow super_column in Deletion for standard ColumnFamily
deletion = Deletion(1, 'supercolumn', None)
mutation = Mutation(deletion=deletion)
mutations = {'key' : {'Standard1' : [mutation]}}
_expect_exception(lambda: client.batch_mutate(mutations, ConsistencyLevel.QUORUM),
InvalidRequestException)
# 'x' is not a valid long
deletion = Deletion(1, 'x', None)
mutation = Mutation(deletion=deletion)
mutations = {'key' : {'Super5' : [mutation]}}
_expect_exception(lambda: client.batch_mutate(mutations, ConsistencyLevel.QUORUM), InvalidRequestException)
# counters don't support ANY
_expect_exception(lambda: client.add('key1', ColumnParent('Counter1', 'x'), CounterColumn('y', 1), ConsistencyLevel.ANY), InvalidRequestException)
def test_batch_insert_super(self):
_set_keyspace('Keyspace1')
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS],
'Super2': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS]}
client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
_verify_super('Super1')
_verify_super('Super2')
def test_batch_insert_super_blocking(self):
_set_keyspace('Keyspace1')
cfmap = {'Super1': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS],
'Super2': [Mutation(ColumnOrSuperColumn(super_column=c))
for c in _SUPER_COLUMNS]}
client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
_verify_super('Super1')
_verify_super('Super2')
def test_cf_remove_column(self):
_set_keyspace('Keyspace1')
_insert_simple()
client.remove('key1', ColumnPath('Standard1', column='c1'), 1, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='c1'), ConsistencyLevel.ONE))
assert client.get('key1', ColumnPath('Standard1', column='c2'), ConsistencyLevel.ONE).column \
== Column('c2', 'value2', 0)
assert _big_slice('key1', ColumnParent('Standard1')) \
== [ColumnOrSuperColumn(column=Column('c2', 'value2', 0))]
# New insert, make sure it shows up post-remove:
client.insert('key1', ColumnParent('Standard1'), Column('c3', 'value3', 0), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert columns == [Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert columns == [Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
# Next, w/ a newer timestamp; it should come back:
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 2), ConsistencyLevel.ONE)
columns = [result.column
for result in _big_slice('key1', ColumnParent('Standard1'))]
assert columns == [Column('c1', 'value1', 2), Column('c2', 'value2', 0), Column('c3', 'value3', 0)], columns
def test_cf_remove(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# Remove the key1:Standard1 cf; verify super is unaffected
client.remove('key1', ColumnPath('Standard1'), 3, ConsistencyLevel.ONE)
assert _big_slice('key1', ColumnParent('Standard1')) == []
_verify_super()
# Test resurrection. First, re-insert a value w/ older timestamp,
# and make sure it stays removed:
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 0), ConsistencyLevel.ONE)
assert _big_slice('key1', ColumnParent('Standard1')) == []
# Next, w/ a newer timestamp; it should come back:
client.insert('key1', ColumnParent('Standard1'), Column('c1', 'value1', 4), ConsistencyLevel.ONE)
result = _big_slice('key1', ColumnParent('Standard1'))
assert result == [ColumnOrSuperColumn(column=Column('c1', 'value1', 4))], result
# check removing the entire super cf, too.
client.remove('key1', ColumnPath('Super1'), 3, ConsistencyLevel.ONE)
assert _big_slice('key1', ColumnParent('Super1')) == []
assert _big_slice('key1', ColumnParent('Super1', 'sc1')) == []
def test_super_cf_remove_and_range_slice(self):
_set_keyspace('Keyspace1')
client.insert('key3', ColumnParent('Super1', 'sc1'), Column(_i64(1), 'v1', 0), ConsistencyLevel.ONE)
client.remove('key3', ColumnPath('Super1', 'sc1'), 5, ConsistencyLevel.ONE)
rows = {}
for row in get_range_slice(client, ColumnParent('Super1'), SlicePredicate(slice_range=SliceRange('', '', False, 1000)), '', '', 1000, ConsistencyLevel.ONE):
scs = [cosc.super_column for cosc in row.columns]
rows[row.key] = scs
assert rows == {'key3': []}, rows
def test_super_cf_remove_column(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# Make sure remove clears out what it's supposed to, and _only_ that:
client.remove('key1', ColumnPath('Super1', 'sc2', _i64(5)), 5, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc2', _i64(5)), ConsistencyLevel.ONE))
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(6), 'value6', 0)])]
_verify_simple()
# New insert, make sure it shows up post-remove:
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(7), 'value7', 0), ConsistencyLevel.ONE)
super_columns_expected = [SuperColumn(name='sc1',
columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2',
columns=[Column(_i64(6), 'value6', 0), Column(_i64(7), 'value7', 0)])]
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, actual
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed:
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 0), ConsistencyLevel.ONE)
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
# Next, w/ a newer timestamp; it should come back
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 6), ConsistencyLevel.ONE)
super_columns = [result.super_column for result in _big_slice('key1', ColumnParent('Super1'))]
super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 6),
Column(_i64(6), 'value6', 0),
Column(_i64(7), 'value7', 0)])]
assert super_columns == super_columns_expected, super_columns
# shouldn't be able to specify a column w/o a super column for remove
cp = ColumnPath(column_family='Super1', column='sc2')
e = _expect_exception(lambda: client.remove('key1', cp, 5, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("column cannot be specified without") >= 0
def test_super_cf_remove_supercolumn(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# Make sure remove clears out what it's supposed to, and _only_ that:
client.remove('key1', ColumnPath('Super1', 'sc2'), 5, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc2', _i64(5)), ConsistencyLevel.ONE))
super_columns = _big_slice('key1', ColumnParent('Super1', 'sc2'))
assert super_columns == [], super_columns
super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)])]
super_columns = [result.super_column
for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
_verify_simple()
# Test resurrection. First, re-insert the value w/ older timestamp,
# and make sure it stays removed:
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 1), ConsistencyLevel.ONE)
super_columns = [result.super_column
for result in _big_slice('key1', ColumnParent('Super1'))]
assert super_columns == super_columns_expected, super_columns
# Next, w/ a newer timestamp; it should come back
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(5), 'value5', 6), ConsistencyLevel.ONE)
super_columns = [result.super_column
for result in _big_slice('key1', ColumnParent('Super1'))]
super_columns_expected = [SuperColumn(name='sc1', columns=[Column(_i64(4), 'value4', 0)]),
SuperColumn(name='sc2', columns=[Column(_i64(5), 'value5', 6)])]
assert super_columns == super_columns_expected, super_columns
# check slicing at the subcolumn level too
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
columns = [result.column
for result in client.get_slice('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE)]
assert columns == [Column(_i64(5), 'value5', 6)], columns
def test_super_cf_resurrect_subcolumn(self):
_set_keyspace('Keyspace1')
key = 'vijay'
client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 0), ConsistencyLevel.ONE)
client.remove(key, ColumnPath('Super1', 'sc1'), 1, ConsistencyLevel.ONE)
client.insert(key, ColumnParent('Super1', 'sc1'), Column(_i64(4), 'value4', 2), ConsistencyLevel.ONE)
result = client.get(key, ColumnPath('Super1', 'sc1'), ConsistencyLevel.ONE)
assert result.super_column.columns is not None, result.super_column
def test_empty_range(self):
_set_keyspace('Keyspace1')
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE) == []
_insert_simple()
assert get_range_slice(client, ColumnParent('Super1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE) == []
def test_range_with_remove(self):
_set_keyspace('Keyspace1')
_insert_simple()
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), 'key1', '', 1000, ConsistencyLevel.ONE)[0].key == 'key1'
client.remove('key1', ColumnPath('Standard1', column='c1'), 1, ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('Standard1', column='c2'), 1, ConsistencyLevel.ONE)
actual = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c2']), '', '', 1000, ConsistencyLevel.ONE)
assert actual == [KeySlice(columns=[], key='key1')], actual
def test_range_with_remove_cf(self):
_set_keyspace('Keyspace1')
_insert_simple()
assert get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), 'key1', '', 1000, ConsistencyLevel.ONE)[0].key == 'key1'
client.remove('key1', ColumnPath('Standard1'), 1, ConsistencyLevel.ONE)
actual = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['c1', 'c1']), '', '', 1000, ConsistencyLevel.ONE)
assert actual == [KeySlice(columns=[], key='key1')], actual
def test_range_collation(self):
_set_keyspace('Keyspace1')
for key in ['-a', '-b', 'a', 'b'] + [str(i) for i in xrange(100)]:
client.insert(key, ColumnParent('Standard1'), Column(key, 'v', 0), ConsistencyLevel.ONE)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '', '', 1000, ConsistencyLevel.ONE)
L = ['-a', '-b', '0', '1', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '2', '20', '21', '22', '23', '24', '25', '26', '27','28', '29', '3', '30', '31', '32', '33', '34', '35', '36', '37', '38', '39', '4', '40', '41', '42', '43', '44', '45', '46', '47', '48', '49', '5', '50', '51', '52', '53', '54', '55', '56', '57', '58', '59', '6', '60', '61', '62', '63', '64', '65', '66', '67', '68', '69', '7', '70', '71', '72', '73', '74', '75', '76', '77', '78', '79', '8', '80', '81', '82', '83', '84', '85', '86', '87', '88', '89', '9', '90', '91', '92', '93', '94', '95', '96', '97', '98', '99', 'a', 'b']
assert len(slices) == len(L)
for key, ks in zip(L, slices):
assert key == ks.key
def test_range_partial(self):
_set_keyspace('Keyspace1')
for key in ['-a', '-b', 'a', 'b'] + [str(i) for i in xrange(100)]:
client.insert(key, ColumnParent('Standard1'), Column(key, 'v', 0), ConsistencyLevel.ONE)
def check_slices_against_keys(keyList, sliceList):
assert len(keyList) == len(sliceList), "%d vs %d" % (len(keyList), len(sliceList))
for key, ks in zip(keyList, sliceList):
assert key == ks.key
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), 'a', '', 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['a', 'b'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '', '15', 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['-a', '-b', '0', '1', '10', '11', '12', '13', '14', '15'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '50', '51', 1000, ConsistencyLevel.ONE)
check_slices_against_keys(['50', '51'], slices)
slices = get_range_slice(client, ColumnParent('Standard1'), SlicePredicate(column_names=['-a', '-a']), '1', '', 10, ConsistencyLevel.ONE)
check_slices_against_keys(['1', '10', '11', '12', '13', '14', '15', '16', '17', '18'], slices)
def test_get_slice_range(self):
_set_keyspace('Keyspace1')
_insert_range()
_verify_range()
def test_get_slice_super_range(self):
_set_keyspace('Keyspace1')
_insert_super_range()
_verify_super_range()
def test_get_range_slices_tokens(self):
_set_keyspace('Keyspace2')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Super3', 'sc1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3', 'sc1')
predicate = SlicePredicate(column_names=['col1', 'col3'])
range = KeyRange(start_token='55', end_token='55', count=100)
result = client.get_range_slices(cp, predicate, range, ConsistencyLevel.ONE)
assert len(result) == 5
assert result[0].columns[0].column.name == 'col1'
assert result[0].columns[1].column.name == 'col3'
def test_get_range_slice_super(self):
_set_keyspace('Keyspace2')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Super3', 'sc1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3', 'sc1')
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
assert len(result) == 3
assert result[0].columns[0].column.name == 'col1'
assert result[0].columns[1].column.name == 'col3'
cp = ColumnParent('Super3')
result = get_range_slice(client, cp, SlicePredicate(column_names=['sc1']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
assert len(result) == 3
assert list(set(row.columns[0].super_column.name for row in result))[0] == 'sc1'
def test_get_range_slice(self):
_set_keyspace('Keyspace1')
for key in ['key1', 'key2', 'key3', 'key4', 'key5']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Standard1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Standard1')
# test empty slice
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key6', '', 1, ConsistencyLevel.ONE)
assert len(result) == 0
# test empty columns
result = get_range_slice(client, cp, SlicePredicate(column_names=['a']), 'key2', '', 1, ConsistencyLevel.ONE)
assert len(result) == 1
assert len(result[0].columns) == 0
# test column_names predicate
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 5, ConsistencyLevel.ONE)
assert len(result) == 3, result
assert result[0].columns[0].column.name == 'col1'
assert result[0].columns[1].column.name == 'col3'
# row limiting via count.
result = get_range_slice(client, cp, SlicePredicate(column_names=['col1', 'col3']), 'key2', 'key4', 1, ConsistencyLevel.ONE)
assert len(result) == 1
# test column slice predicate
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=5)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].key == 'key1'
assert result[1].key == 'key2'
assert len(result[0].columns) == 3
assert result[0].columns[0].column.name == 'col2'
assert result[0].columns[2].column.name == 'col4'
# col limiting via count
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=2)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert len(result[0].columns) == 2
# and reversed
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col4', finish='col2', reversed=True, count=5)), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert result[0].columns[0].column.name == 'col4'
assert result[0].columns[2].column.name == 'col2'
# row limiting via count
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange(start='col2', finish='col4', reversed=False, count=5)), 'key1', 'key2', 1, ConsistencyLevel.ONE)
assert len(result) == 1
# removed data
client.remove('key1', ColumnPath('Standard1', column='col1'), 1, ConsistencyLevel.ONE)
result = get_range_slice(client, cp, SlicePredicate(slice_range=SliceRange('', '')), 'key1', 'key2', 5, ConsistencyLevel.ONE)
assert len(result) == 2, result
assert result[0].columns[0].column.name == 'col2', result[0].columns[0].column.name
assert result[1].columns[0].column.name == 'col1'
def test_wrapped_range_slices(self):
_set_keyspace('Keyspace1')
def copp_token(key):
# I cheated and generated this from Java
return {'a': '00530000000100000001',
'b': '00540000000100000001',
'c': '00550000000100000001',
'd': '00560000000100000001',
'e': '00580000000100000001'}[key]
for key in ['a', 'b', 'c', 'd', 'e']:
for cname in ['col1', 'col2', 'col3', 'col4', 'col5']:
client.insert(key, ColumnParent('Standard1'), Column(cname, 'v-' + cname, 0), ConsistencyLevel.ONE)
cp = ColumnParent('Standard1')
result = client.get_range_slices(cp, SlicePredicate(column_names=['col1', 'col3']), KeyRange(start_token=copp_token('e'), end_token=copp_token('e')), ConsistencyLevel.ONE)
assert [row.key for row in result] == ['a', 'b', 'c', 'd', 'e',], [row.key for row in result]
result = client.get_range_slices(cp, SlicePredicate(column_names=['col1', 'col3']), KeyRange(start_token=copp_token('c'), end_token=copp_token('c')), ConsistencyLevel.ONE)
assert [row.key for row in result] == ['a', 'b', 'c', 'd', 'e',], [row.key for row in result]
def test_get_slice_by_names(self):
_set_keyspace('Keyspace1')
_insert_range()
p = SlicePredicate(column_names=['c1', 'c2'])
result = client.get_slice('key1', ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
assert len(result) == 2
assert result[0].column.name == 'c1'
assert result[1].column.name == 'c2'
_insert_super()
p = SlicePredicate(column_names=[_i64(4)])
result = client.get_slice('key1', ColumnParent('Super1', 'sc1'), p, ConsistencyLevel.ONE)
assert len(result) == 1
assert result[0].column.name == _i64(4)
def test_multiget_slice(self):
"""Insert multiple keys and retrieve them using the multiget_slice interface"""
_set_keyspace('Keyspace1')
# Generate a list of 10 keys and insert them
num_keys = 10
keys = ['key'+str(i) for i in range(1, num_keys+1)]
_insert_multi(keys)
# Retrieve all 10 key slices
rows = _big_multislice(keys, ColumnParent('Standard1'))
keys1 = rows.keys().sort()
keys2 = keys.sort()
columns = [ColumnOrSuperColumn(c) for c in _SIMPLE_COLUMNS]
# Validate if the returned rows have the keys requested and if the ColumnOrSuperColumn is what was inserted
for key in keys:
assert rows.has_key(key) == True
assert columns == rows[key]
def test_multi_count(self):
"""Insert multiple keys and count them using the multiget interface"""
_set_keyspace('Keyspace1')
# Generate a list of 10 keys countaining 1 to 10 columns and insert them
num_keys = 10
for i in range(1, num_keys+1):
key = 'key'+str(i)
for j in range(1, i+1):
client.insert(key, ColumnParent('Standard1'), Column('c'+str(j), 'value'+str(j), 0), ConsistencyLevel.ONE)
# Count columns in all 10 keys
keys = ['key'+str(i) for i in range(1, num_keys+1)]
p = SlicePredicate(slice_range=SliceRange('', '', False, 1000))
counts = client.multiget_count(keys, ColumnParent('Standard1'), p, ConsistencyLevel.ONE)
# Check the returned counts
for i in range(1, num_keys+1):
key = 'key'+str(i)
assert counts[key] == i
def test_batch_mutate_super_deletion(self):
_set_keyspace('Keyspace1')
_insert_super('test')
d = Deletion(1, predicate=SlicePredicate(column_names=['sc1']))
cfmap = {'Super1': [Mutation(deletion=d)]}
client.batch_mutate({'test': cfmap}, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Super1', 'sc1'), ConsistencyLevel.ONE))
def test_super_reinsert(self):
_set_keyspace('Keyspace1')
for x in xrange(3):
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(x), 'value', 1), ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('Super1'), 2, ConsistencyLevel.ONE)
for x in xrange(3):
client.insert('key1', ColumnParent('Super1', 'sc2'), Column(_i64(x + 3), 'value', 3), ConsistencyLevel.ONE)
for n in xrange(1, 4):
p = SlicePredicate(slice_range=SliceRange('', '', False, n))
slice = client.get_slice('key1', ColumnParent('Super1', 'sc2'), p, ConsistencyLevel.ONE)
assert len(slice) == n, "expected %s results; found %s" % (n, slice)
def test_describe_keyspace(self):
kspaces = client.describe_keyspaces()
assert len(kspaces) == 5, kspaces # ['Keyspace2', 'Keyspace1', 'system', 'system_traces', 'system_auth']
sysks = client.describe_keyspace("system")
assert sysks in kspaces
ks1 = client.describe_keyspace("Keyspace1")
assert ks1.strategy_options['replication_factor'] == '1', ks1.strategy_options
for cf in ks1.cf_defs:
if cf.name == "Standard1":
cf0 = cf
break;
assert cf0.comparator_type == "org.apache.cassandra.db.marshal.BytesType"
def test_describe(self):
server_version = client.describe_version()
assert server_version == VERSION, (server_version, VERSION)
assert client.describe_cluster_name() == 'Test Cluster'
def test_describe_ring(self):
assert list(client.describe_ring('Keyspace1'))[0].endpoints == ['127.0.0.1']
def test_describe_token_map(self):
# test/conf/cassandra.yaml specifies org.apache.cassandra.dht.ByteOrderedPartitioner
# which uses BytesToken, so this just tests that the string representation of the token
# matches a regex pattern for BytesToken.toString().
ring = client.describe_token_map().items()
assert len(ring) == 1
token, node = ring[0]
assert re.match("[0-9A-Fa-f]{32}", token)
assert node == '127.0.0.1'
def test_describe_partitioner(self):
# Make sure this just reads back the values from the config.
assert client.describe_partitioner() == "org.apache.cassandra.dht.ByteOrderedPartitioner"
def test_describe_snitch(self):
assert client.describe_snitch() == "org.apache.cassandra.locator.SimpleSnitch"
def test_invalid_ks_names(self):
def invalid_keyspace():
client.system_add_keyspace(KsDef('in-valid', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor':'1'}, cf_defs=[]))
_expect_exception(invalid_keyspace, InvalidRequestException)
def test_invalid_strategy_class(self):
def add_invalid_keyspace():
client.system_add_keyspace(KsDef('ValidKs', 'InvalidStrategyClass', {}, cf_defs=[]))
exc = _expect_exception(add_invalid_keyspace, InvalidRequestException)
s = str(exc)
assert s.find("InvalidStrategyClass") > -1, s
assert s.find("Unable to find replication strategy") > -1, s
def update_invalid_keyspace():
client.system_add_keyspace(KsDef('ValidKsForUpdate', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor':'1'}, cf_defs=[]))
client.system_update_keyspace(KsDef('ValidKsForUpdate', 'InvalidStrategyClass', {}, cf_defs=[]))
exc = _expect_exception(update_invalid_keyspace, InvalidRequestException)
s = str(exc)
assert s.find("InvalidStrategyClass") > -1, s
assert s.find("Unable to find replication strategy") > -1, s
def test_invalid_cf_names(self):
def invalid_cf():
_set_keyspace('Keyspace1')
newcf = CfDef('Keyspace1', 'in-valid')
client.system_add_column_family(newcf)
_expect_exception(invalid_cf, InvalidRequestException)
def invalid_cf_inside_new_ks():
cf = CfDef('ValidKsName_invalid_cf', 'in-valid')
_set_keyspace('system')
client.system_add_keyspace(KsDef('ValidKsName_invalid_cf', 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor': '1'}, cf_defs=[cf]))
_expect_exception(invalid_cf_inside_new_ks, InvalidRequestException)
def test_system_cf_recreate(self):
"ensures that keyspaces and column familes can be dropped and recreated in short order"
for x in range(2):
keyspace = 'test_cf_recreate'
cf_name = 'recreate_cf'
# create
newcf = CfDef(keyspace, cf_name)
newks = KsDef(keyspace, 'org.apache.cassandra.locator.SimpleStrategy', {'replication_factor':'1'}, cf_defs=[newcf])
client.system_add_keyspace(newks)
_set_keyspace(keyspace)
# insert
client.insert('key0', ColumnParent(cf_name), Column('colA', 'colA-value', 0), ConsistencyLevel.ONE)
col1 = client.get_slice('key0', ColumnParent(cf_name), SlicePredicate(slice_range=SliceRange('', '', False, 100)), ConsistencyLevel.ONE)[0].column
assert col1.name == 'colA' and col1.value == 'colA-value'
# drop
client.system_drop_column_family(cf_name)
# recreate
client.system_add_column_family(newcf)
# query
cosc_list = client.get_slice('key0', ColumnParent(cf_name), SlicePredicate(slice_range=SliceRange('', '', False, 100)), ConsistencyLevel.ONE)
# this was failing prior to CASSANDRA-1477.
assert len(cosc_list) == 0 , 'cosc length test failed'
client.system_drop_keyspace(keyspace)
def test_system_keyspace_operations(self):
# create. note large RF, this is OK
keyspace = KsDef('CreateKeyspace',
'org.apache.cassandra.locator.SimpleStrategy',
{'replication_factor': '10'},
cf_defs=[CfDef('CreateKeyspace', 'CreateKsCf')])
client.system_add_keyspace(keyspace)
newks = client.describe_keyspace('CreateKeyspace')
assert 'CreateKsCf' in [x.name for x in newks.cf_defs]
_set_keyspace('CreateKeyspace')
# modify valid
modified_keyspace = KsDef('CreateKeyspace',
'org.apache.cassandra.locator.OldNetworkTopologyStrategy',
{'replication_factor': '1'},
cf_defs=[])
client.system_update_keyspace(modified_keyspace)
modks = client.describe_keyspace('CreateKeyspace')
assert modks.strategy_class == modified_keyspace.strategy_class
assert modks.strategy_options == modified_keyspace.strategy_options
# drop
client.system_drop_keyspace('CreateKeyspace')
def get_second_ks():
client.describe_keyspace('CreateKeyspace')
_expect_exception(get_second_ks, NotFoundException)
def test_create_then_drop_ks(self):
keyspace = KsDef('AddThenDrop',
strategy_class='org.apache.cassandra.locator.SimpleStrategy',
strategy_options={'replication_factor':'1'},
cf_defs=[])
def test_existence():
client.describe_keyspace(keyspace.name)
_expect_exception(test_existence, NotFoundException)
client.set_keyspace('system')
client.system_add_keyspace(keyspace)
test_existence()
client.system_drop_keyspace(keyspace.name)
def test_column_validators(self):
# columndef validation for regular CF
ks = 'Keyspace1'
_set_keyspace(ks)
cd = ColumnDef('col', 'LongType', None, None)
cf = CfDef('Keyspace1', 'ValidatorColumnFamily', column_metadata=[cd])
client.system_add_column_family(cf)
ks_def = client.describe_keyspace(ks)
assert 'ValidatorColumnFamily' in [x.name for x in ks_def.cf_defs]
cp = ColumnParent('ValidatorColumnFamily')
col0 = Column('col', _i64(42), 0)
col1 = Column('col', "ceci n'est pas 64bit", 0)
client.insert('key0', cp, col0, ConsistencyLevel.ONE)
e = _expect_exception(lambda: client.insert('key1', cp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# columndef validation for super CF
scf = CfDef('Keyspace1', 'ValidatorSuperColumnFamily', column_type='Super', column_metadata=[cd])
client.system_add_column_family(scf)
ks_def = client.describe_keyspace(ks)
assert 'ValidatorSuperColumnFamily' in [x.name for x in ks_def.cf_defs]
scp = ColumnParent('ValidatorSuperColumnFamily','sc1')
client.insert('key0', scp, col0, ConsistencyLevel.ONE)
e = _expect_exception(lambda: client.insert('key1', scp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# columndef and cfdef default validation
cf = CfDef('Keyspace1', 'DefaultValidatorColumnFamily', column_metadata=[cd], default_validation_class='UTF8Type')
client.system_add_column_family(cf)
ks_def = client.describe_keyspace(ks)
assert 'DefaultValidatorColumnFamily' in [x.name for x in ks_def.cf_defs]
dcp = ColumnParent('DefaultValidatorColumnFamily')
# inserting a longtype into column 'col' is valid at the columndef level
client.insert('key0', dcp, col0, ConsistencyLevel.ONE)
# inserting a UTF8type into column 'col' fails at the columndef level
e = _expect_exception(lambda: client.insert('key1', dcp, col1, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# insert a longtype into column 'fcol' should fail at the cfdef level
col2 = Column('fcol', _i64(4224), 0)
e = _expect_exception(lambda: client.insert('key1', dcp, col2, ConsistencyLevel.ONE), InvalidRequestException)
assert e.why.find("failed validation") >= 0
# insert a UTF8type into column 'fcol' is valid at the cfdef level
col3 = Column('fcol', "Stringin' it up in the Stringtel Stringifornia", 0)
client.insert('key0', dcp, col3, ConsistencyLevel.ONE)
def test_system_column_family_operations(self):
_set_keyspace('Keyspace1')
# create
cd = ColumnDef('ValidationColumn', 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'NewColumnFamily', column_metadata=[cd])
client.system_add_column_family(newcf)
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewColumnFamily' in [x.name for x in ks1.cf_defs]
cfid = [x.id for x in ks1.cf_defs if x.name=='NewColumnFamily'][0]
# modify invalid
modified_cf = CfDef('Keyspace1', 'NewColumnFamily', column_metadata=[cd])
modified_cf.id = cfid
def fail_invalid_field():
modified_cf.comparator_type = 'LongType'
client.system_update_column_family(modified_cf)
_expect_exception(fail_invalid_field, InvalidRequestException)
# modify valid
modified_cf.comparator_type = 'BytesType' # revert back to old value.
modified_cf.gc_grace_seconds = 1
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name=='NewColumnFamily'][0]
assert server_cf
assert server_cf.gc_grace_seconds == 1
# drop
client.system_drop_column_family('NewColumnFamily')
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewColumnFamily' not in [x.name for x in ks1.cf_defs]
assert 'Standard1' in [x.name for x in ks1.cf_defs]
# Make a LongType CF and add a validator
newcf = CfDef('Keyspace1', 'NewLongColumnFamily', comparator_type='LongType')
client.system_add_column_family(newcf)
three = _i64(3)
cd = ColumnDef(three, 'LongType', None, None)
ks1 = client.describe_keyspace('Keyspace1')
modified_cf = [x for x in ks1.cf_defs if x.name=='NewLongColumnFamily'][0]
modified_cf.column_metadata = [cd]
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name=='NewLongColumnFamily'][0]
assert server_cf.column_metadata[0].name == _i64(3), server_cf.column_metadata
def test_dynamic_indexes_creation_deletion(self):
_set_keyspace('Keyspace1')
cfdef = CfDef('Keyspace1', 'BlankCF')
client.system_add_column_family(cfdef)
ks1 = client.describe_keyspace('Keyspace1')
cfid = [x.id for x in ks1.cf_defs if x.name=='BlankCF'][0]
modified_cd = ColumnDef('birthdate', 'BytesType', IndexType.KEYS, None)
modified_cf = CfDef('Keyspace1', 'BlankCF', column_metadata=[modified_cd])
modified_cf.id = cfid
client.system_update_column_family(modified_cf)
# Add a second indexed CF ...
birthdate_coldef = ColumnDef('birthdate', 'BytesType', IndexType.KEYS, None)
age_coldef = ColumnDef('age', 'BytesType', IndexType.KEYS, 'age_index')
cfdef = CfDef('Keyspace1', 'BlankCF2', column_metadata=[birthdate_coldef, age_coldef])
client.system_add_column_family(cfdef)
# ... and update it to have a third index
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name=='BlankCF2'][0]
name_coldef = ColumnDef('name', 'BytesType', IndexType.KEYS, 'name_index')
cfdef.column_metadata.append(name_coldef)
client.system_update_column_family(cfdef)
# Now drop the indexes
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name=='BlankCF2'][0]
birthdate_coldef = ColumnDef('birthdate', 'BytesType', None, None)
age_coldef = ColumnDef('age', 'BytesType', None, None)
name_coldef = ColumnDef('name', 'BytesType', None, None)
cfdef.column_metadata = [birthdate_coldef, age_coldef, name_coldef]
client.system_update_column_family(cfdef)
ks1 = client.describe_keyspace('Keyspace1')
cfdef = [x for x in ks1.cf_defs if x.name=='BlankCF'][0]
birthdate_coldef = ColumnDef('birthdate', 'BytesType', None, None)
cfdef.column_metadata = [birthdate_coldef]
client.system_update_column_family(cfdef)
client.system_drop_column_family('BlankCF')
client.system_drop_column_family('BlankCF2')
def test_dynamic_indexes_with_system_update_cf(self):
_set_keyspace('Keyspace1')
cd = ColumnDef('birthdate', 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'ToBeIndexed', default_validation_class='LongType', column_metadata=[cd])
client.system_add_column_family(newcf)
client.insert('key1', ColumnParent('ToBeIndexed'), Column('birthdate', _i64(1), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('ToBeIndexed'), Column('birthdate', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('ToBeIndexed'), Column('b', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('ToBeIndexed'), Column('birthdate', _i64(3), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('ToBeIndexed'), Column('b', _i64(3), 0), ConsistencyLevel.ONE)
# First without index
cp = ColumnParent('ToBeIndexed')
sp = SlicePredicate(slice_range=SliceRange('', ''))
key_range = KeyRange('', '', None, None, [IndexExpression('birthdate', IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == 'key1'
assert len(result[0].columns) == 1, result[0].columns
# add an index on 'birthdate'
ks1 = client.describe_keyspace('Keyspace1')
cfid = [x.id for x in ks1.cf_defs if x.name=='ToBeIndexed'][0]
modified_cd = ColumnDef('birthdate', 'BytesType', IndexType.KEYS, 'bd_index')
modified_cf = CfDef('Keyspace1', 'ToBeIndexed', column_metadata=[modified_cd])
modified_cf.id = cfid
client.system_update_column_family(modified_cf)
ks1 = client.describe_keyspace('Keyspace1')
server_cf = [x for x in ks1.cf_defs if x.name=='ToBeIndexed'][0]
assert server_cf
assert server_cf.column_metadata[0].index_type == modified_cd.index_type
assert server_cf.column_metadata[0].index_name == modified_cd.index_name
# sleep a bit to give time for the index to build.
time.sleep(0.5)
# repeat query on one index expression
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == 'key1'
assert len(result[0].columns) == 1, result[0].columns
def test_system_super_column_family_operations(self):
_set_keyspace('Keyspace1')
# create
cd = ColumnDef('ValidationColumn', 'BytesType', None, None)
newcf = CfDef('Keyspace1', 'NewSuperColumnFamily', 'Super', column_metadata=[cd])
client.system_add_column_family(newcf)
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewSuperColumnFamily' in [x.name for x in ks1.cf_defs]
# drop
client.system_drop_column_family('NewSuperColumnFamily')
ks1 = client.describe_keyspace('Keyspace1')
assert 'NewSuperColumnFamily' not in [x.name for x in ks1.cf_defs]
assert 'Standard1' in [x.name for x in ks1.cf_defs]
def test_insert_ttl(self):
""" Test simple insertion of a column with ttl """
_set_keyspace('Keyspace1')
column = Column('cttl1', 'value1', 0, 5)
client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
assert client.get('key1', ColumnPath('Standard1', column='cttl1'), ConsistencyLevel.ONE).column == column
def test_simple_expiration(self):
""" Test that column ttled do expires """
_set_keyspace('Keyspace1')
column = Column('cttl3', 'value1', 0, 2)
client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
time.sleep(1)
c = client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE).column
assert c == column
assert client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE).column == column
time.sleep(2)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE))
def test_simple_expiration_batch_mutate(self):
""" Test that column ttled do expires using batch_mutate """
_set_keyspace('Keyspace1')
column = Column('cttl4', 'value1', 0, 2)
cfmap = {'Standard1': [Mutation(ColumnOrSuperColumn(column))]}
client.batch_mutate({'key1': cfmap}, ConsistencyLevel.ONE)
time.sleep(1)
c = client.get('key1', ColumnPath('Standard1', column='cttl4'), ConsistencyLevel.ONE).column
assert c == column
assert client.get('key1', ColumnPath('Standard1', column='cttl4'), ConsistencyLevel.ONE).column == column
time.sleep(2)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='cttl3'), ConsistencyLevel.ONE))
def test_update_expiring(self):
""" Test that updating a column with ttl override the ttl """
_set_keyspace('Keyspace1')
column1 = Column('cttl4', 'value1', 0, 1)
client.insert('key1', ColumnParent('Standard1'), column1, ConsistencyLevel.ONE)
column2 = Column('cttl4', 'value1', 1)
client.insert('key1', ColumnParent('Standard1'), column2, ConsistencyLevel.ONE)
time.sleep(1.5)
assert client.get('key1', ColumnPath('Standard1', column='cttl4'), ConsistencyLevel.ONE).column == column2
def test_remove_expiring(self):
""" Test removing a column with ttl """
_set_keyspace('Keyspace1')
column = Column('cttl5', 'value1', 0, 10)
client.insert('key1', ColumnParent('Standard1'), column, ConsistencyLevel.ONE)
client.remove('key1', ColumnPath('Standard1', column='cttl5'), 1, ConsistencyLevel.ONE)
_expect_missing(lambda: client.get('key1', ColumnPath('Standard1', column='ctt5'), ConsistencyLevel.ONE))
def test_describe_ring_on_invalid_keyspace(self):
def req():
client.describe_ring('system')
_expect_exception(req, InvalidRequestException)
def test_incr_decr_standard_add(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
d3 = 35
# insert positive and negative values and check the counts
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(0.1)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d2), ConsistencyLevel.ONE)
time.sleep(0.1)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == (d1+d2)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d3), ConsistencyLevel.ONE)
time.sleep(0.1)
rv3 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv3.counter_column.value == (d1+d2+d3)
def test_incr_decr_super_add(self):
_set_keyspace('Keyspace1')
d1 = -234
d2 = 52345
d3 = 3123
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c2', d2), ConsistencyLevel.ONE)
time.sleep(0.1)
rv1 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1'), ConsistencyLevel.ONE)
assert rv1.counter_super_column.columns[0].value == d1
assert rv1.counter_super_column.columns[1].value == d2
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d2), ConsistencyLevel.ONE)
time.sleep(0.1)
rv2 = client.get('key1', ColumnPath('SuperCounter1', 'sc1', 'c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == (d1+d2)
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d3), ConsistencyLevel.ONE)
time.sleep(0.1)
rv3 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv3.counter_column.value == (d1+d2+d3)
def test_incr_standard_remove(self):
_set_keyspace('Keyspace1')
d1 = 124
# insert value and check it exists
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='Counter1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
def test_incr_super_remove(self):
_set_keyspace('Keyspace1')
d1 = 52345
# insert value and check it exists
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
def test_incr_decr_standard_remove(self):
_set_keyspace('Keyspace1')
d1 = 124
# insert value and check it exists
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='Counter1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
def test_incr_decr_super_remove(self):
_set_keyspace('Keyspace1')
d1 = 52345
# insert value and check it exists
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1
# remove the previous column and check that it is gone
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
client.add('key1', ColumnParent(column_family='SuperCounter1', super_column='sc1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1
client.remove_counter('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1'), ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='SuperCounter1', super_column='sc1', column='c1'))
def test_incr_decr_standard_batch_add(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
update_map = {'key1': {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d2))),
]}}
# insert positive and negative values and check the counts
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(0.1)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1+d2
def test_incr_decr_standard_batch_remove(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
# insert positive and negative values and check the counts
update_map = {'key1': {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d2))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
rv1 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv1.counter_column.value == d1+d2
# remove the previous column and check that it is gone
update_map = {'key1': {'Counter1': [
Mutation(deletion=Deletion(predicate=SlicePredicate(column_names=['c1']))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
# insert again and this time delete the whole row, check that it is gone
update_map = {'key1': {'Counter1': [
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d1))),
Mutation(column_or_supercolumn=ColumnOrSuperColumn(counter_column=CounterColumn('c1', d2))),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
rv2 = client.get('key1', ColumnPath(column_family='Counter1', column='c1'), ConsistencyLevel.ONE)
assert rv2.counter_column.value == d1+d2
update_map = {'key1': {'Counter1': [
Mutation(deletion=Deletion()),
]}}
client.batch_mutate(update_map, ConsistencyLevel.ONE)
time.sleep(5)
_assert_no_columnpath('key1', ColumnPath(column_family='Counter1', column='c1'))
def test_incr_decr_standard_slice(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c1', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c2', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d2), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c4', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c5', d1), ConsistencyLevel.ONE)
time.sleep(0.1)
# insert positive and negative values and check the counts
counters = client.get_slice('key1', ColumnParent('Counter1'), SlicePredicate(['c3', 'c4']), ConsistencyLevel.ONE)
assert counters[0].counter_column.value == d1+d2
assert counters[1].counter_column.value == d1
def test_incr_decr_standard_muliget_slice(self):
_set_keyspace('Keyspace1')
d1 = 12
d2 = -21
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c2', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c3', d2), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c4', d1), ConsistencyLevel.ONE)
client.add('key1', ColumnParent(column_family='Counter1'), CounterColumn('c5', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c2', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c3', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c3', d2), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c4', d1), ConsistencyLevel.ONE)
client.add('key2', ColumnParent(column_family='Counter1'), CounterColumn('c5', d1), ConsistencyLevel.ONE)
time.sleep(0.1)
# insert positive and negative values and check the counts
counters = client.multiget_slice(['key1', 'key2'], ColumnParent('Counter1'), SlicePredicate(['c3', 'c4']), ConsistencyLevel.ONE)
assert counters['key1'][0].counter_column.value == d1+d2
assert counters['key1'][1].counter_column.value == d1
assert counters['key2'][0].counter_column.value == d1+d2
assert counters['key2'][1].counter_column.value == d1
def test_counter_get_slice_range(self):
_set_keyspace('Keyspace1')
_insert_counter_range()
_verify_counter_range()
def test_counter_get_slice_super_range(self):
_set_keyspace('Keyspace1')
_insert_counter_super_range()
_verify_counter_super_range()
def test_index_scan(self):
_set_keyspace('Keyspace1')
client.insert('key1', ColumnParent('Indexed1'), Column('birthdate', _i64(1), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('Indexed1'), Column('birthdate', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key2', ColumnParent('Indexed1'), Column('b', _i64(2), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('Indexed1'), Column('birthdate', _i64(3), 0), ConsistencyLevel.ONE)
client.insert('key3', ColumnParent('Indexed1'), Column('b', _i64(3), 0), ConsistencyLevel.ONE)
# simple query on one index expression
cp = ColumnParent('Indexed1')
sp = SlicePredicate(slice_range=SliceRange('', ''))
key_range = KeyRange('', '', None, None, [IndexExpression('birthdate', IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == 'key1'
assert len(result[0].columns) == 1, result[0].columns
# without index
key_range = KeyRange('', '', None, None, [IndexExpression('b', IndexOperator.EQ, _i64(1))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 0, result
# but unindexed expression added to indexed one is ok
key_range = KeyRange('', '', None, None, [IndexExpression('b', IndexOperator.EQ, _i64(3)), IndexExpression('birthdate', IndexOperator.EQ, _i64(3))], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
assert result[0].key == 'key3'
assert len(result[0].columns) == 2, result[0].columns
def test_index_scan_uuid_names(self):
_set_keyspace('Keyspace1')
sp = SlicePredicate(slice_range=SliceRange('', ''))
cp = ColumnParent('Indexed3') # timeuuid name, utf8 values
u = uuid.UUID('00000000-0000-1000-0000-000000000000').bytes
u2 = uuid.UUID('00000000-0000-1000-0000-000000000001').bytes
client.insert('key1', ColumnParent('Indexed3'), Column(u, 'a', 0), ConsistencyLevel.ONE)
client.insert('key1', ColumnParent('Indexed3'), Column(u2, 'b', 0), ConsistencyLevel.ONE)
# name comparator + data validator of incompatible types -- see CASSANDRA-2347
key_range = KeyRange('', '', None, None, [IndexExpression(u, IndexOperator.EQ, 'a'), IndexExpression(u2, IndexOperator.EQ, 'b')], 100)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
cp = ColumnParent('Indexed2') # timeuuid name, long values
# name must be valid (TimeUUID)
key_range = KeyRange('', '', None, None, [IndexExpression('foo', IndexOperator.EQ, uuid.UUID('00000000-0000-1000-0000-000000000000').bytes)], 100)
_expect_exception(lambda: client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE), InvalidRequestException)
# value must be valid (TimeUUID)
key_range = KeyRange('', '', None, None, [IndexExpression(uuid.UUID('00000000-0000-1000-0000-000000000000').bytes, IndexOperator.EQ, "foo")], 100)
_expect_exception(lambda: client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE), InvalidRequestException)
def test_index_scan_expiring(self):
""" Test that column ttled expires from KEYS index"""
_set_keyspace('Keyspace1')
client.insert('key1', ColumnParent('Indexed1'), Column('birthdate', _i64(1), 0, 1), ConsistencyLevel.ONE)
cp = ColumnParent('Indexed1')
sp = SlicePredicate(slice_range=SliceRange('', ''))
key_range = KeyRange('', '', None, None, [IndexExpression('birthdate', IndexOperator.EQ, _i64(1))], 100)
# query before expiration
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 1, result
# wait for expiration and requery
time.sleep(2)
result = client.get_range_slices(cp, sp, key_range, ConsistencyLevel.ONE)
assert len(result) == 0, result
def test_column_not_found_quorum(self):
_set_keyspace('Keyspace1')
key = 'doesntexist'
column_path = ColumnPath(column_family="Standard1", column="idontexist")
try:
client.get(key, column_path, ConsistencyLevel.QUORUM)
assert False, ('columnpath %s existed in %s when it should not' % (column_path, key))
except NotFoundException:
assert True, 'column did not exist'
def test_get_range_slice_after_deletion(self):
_set_keyspace('Keyspace2')
key = 'key1'
# three supercoluns, each with "col1" subcolumn
for i in range(1,4):
client.insert(key, ColumnParent('Super3', 'sc%d' % i), Column('col1', 'val1', 0), ConsistencyLevel.ONE)
cp = ColumnParent('Super3')
predicate = SlicePredicate(slice_range=SliceRange('sc1', 'sc3', False, count=1))
k_range = KeyRange(start_key=key, end_key=key, count=1)
# validate count=1 restricts to 1 supercolumn
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1
# remove sc1; add back subcolumn to override tombstone
client.remove(key, ColumnPath('Super3', 'sc1'), 1, ConsistencyLevel.ONE)
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1
client.insert(key, ColumnParent('Super3', 'sc1'), Column('col1', 'val1', 2), ConsistencyLevel.ONE)
result = client.get_range_slices(cp, predicate, k_range, ConsistencyLevel.ONE)
assert len(result[0].columns) == 1, result[0].columns
assert result[0].columns[0].super_column.name == 'sc1'
class TestTruncate(ThriftTester):
def test_truncate(self):
_set_keyspace('Keyspace1')
_insert_simple()
_insert_super()
# truncate Standard1
client.truncate('Standard1')
assert _big_slice('key1', ColumnParent('Standard1')) == []
# truncate Super1
client.truncate('Super1')
assert _big_slice('key1', ColumnParent('Super1')) == []
assert _big_slice('key1', ColumnParent('Super1', 'sc1')) == []
| apache-2.0 |
EliAndrewC/ensconce | ensconce/util/cpjsonrpc.py | 1 | 11059 | # coding: utf-8
"""
From http://code.google.com/p/cherrypy-jsonrpc (LGPL)
Some modifications:
- Content-Type (application/json)
"""
import sys
import httplib
import cherrypy
import traceback
try:
import jsonlib2 as json
_ParseError = json.ReadError
except ImportError:
import json
_ParseError = ValueError
def _raw_body_reader():
"""
Liest den Body ein, bevor dieser von CherryPy falsch geparst wird.
Reads the body, before CherryPy parses it in a false kind.
"""
if cherrypy.request.method in cherrypy.request.methods_with_bodies:
cherrypy.request.raw_body = cherrypy.request.rfile.read()
cherrypy.tools.raw_body_reader = cherrypy.Tool("before_request_body", _raw_body_reader)
def set_content_type_json():
"""
Setzt den Content-Type des Response auf "text/x-json"
"""
cherrypy.response.headers["Content-Type"] = "application/json"
class SuccessfulResponse(object):
"""
Represents a successful response.
"""
def __init__(self, jsonrpc = None, id = None, result = None):
"""
:param jsonrpc: JSON-RPC version string
:param id: JSON-RPC transaction id
:param result: Result data
"""
self.jsonrpc = jsonrpc
self.id = id
self.result = result
def to_dict(self):
"""
Returns the response object as dictionary.
"""
retdict = {}
if self.jsonrpc:
retdict["jsonrpc"] = self.jsonrpc
if not self.id is None:
retdict["id"] = self.id
if not self.result is None:
retdict["result"] = self.result
return retdict
class ErrorResponse(object):
"""
Represents an error response object
"""
code = None
message = None
def __init__(self, jsonrpc = None, id = None, data = None):
"""
:param jsonrpc: JSON-RPC version string
:param id: JSON-RPC transaction id
:param data: Additional error informations. Can be any, to JSON
translatable, data structure.
"""
self.jsonrpc = jsonrpc
self.id = id
self.data = data
def to_dict(self):
"""
Returns the response object as dictionary.
"""
retdict = {"error": {}}
if self.jsonrpc:
retdict["jsonrpc"] = self.jsonrpc
retdict["id"] = self.id
retdict["error"]["code"] = self.code
retdict["error"]["message"] = self.message
if self.data:
retdict["error"]["data"] = self.data
if isinstance(self.data, basestring):
if self.message:
retdict["error"]["message"] = \
self.message + u" " + self.data.capitalize()
else:
retdict["error"]["message"] = self.data.capitalize()
return retdict
class ParseErrorResponse(ErrorResponse):
code = -32700
message = u"Invalid JSON was received by the server."
class InvalidRequestResponse(ErrorResponse):
code = -32600
message = u"The JSON sent is not a valid Request object."
class MethodNotFoundResponse(ErrorResponse):
code = -32601
message = u"The method does not exist / is not available."
class InvalidParamsResponse(ErrorResponse):
code = -32602
message = u"Invalid method parameter(s)."
class InternalErrorResponse(ErrorResponse):
code = -32603
message = u"Internal JSON-RPC error."
class JsonRpcMethods(object):
"""
Erbt man von dieser Klasse, dann werden die mit *exposed* markierten
Methoden der Klasseninstanz automatisch zu JSON-RPC-Methoden.
"""
_cp_config = {
"tools.encode.on": True,
"tools.encode.encoding": "utf-8",
"tools.decode.on": True,
"tools.raw_body_reader.on": True,
}
def __init__(self, debug = False):
self.debug = debug
# Alle mit *exposed* markierten Attribute/Methoden (ausgenommen die
# *default*-Methode) dieser Klasse werden als JSON-RPC-Methoden markiert.
# Weiters wird deren *exposed*-Flag entfernt.
rpc_methods = {}
for attribute_name in dir(self):
if (
not attribute_name.startswith("_") and
attribute_name != "default"
):
item = getattr(self, attribute_name)
if hasattr(item, "exposed") and item.exposed:
# Es handelt sich um eine mit exposed markierte Funktion
rpc_methods[attribute_name] = item
del item.__dict__["exposed"]
self.rpc_methods = rpc_methods
def default(self, *args, **kwargs):
"""
Nimmt die JSON-RPC-Anfrage entgegen und übergibt sie an die entsprechende
JSON-RPC-Methode.
"""
responses = []
# Response content type -> JSON
set_content_type_json()
# Get data
if cherrypy.request.method == "GET":
data = kwargs
if "params" in data:
if self.debug:
cherrypy.log("")
cherrypy.log(u"params (raw): " + repr(data["params"]))
cherrypy.log("")
try:
data["params"] = json.loads(data["params"])
except _ParseError, err:
traceback_info = "".join(traceback.format_exception(*sys.exc_info()))
cherrypy.log(traceback_info)
return json.dumps(
ParseErrorResponse(
data = unicode(err)
).to_dict()
)
requests = [data]
elif cherrypy.request.method == "POST":
if self.debug:
cherrypy.log("")
cherrypy.log(u"cherrypy.request.raw_body:")
cherrypy.log(repr(cherrypy.request.raw_body))
cherrypy.log("")
try:
data = json.loads(cherrypy.request.raw_body)
except _ParseError, err:
traceback_info = "".join(traceback.format_exception(*sys.exc_info()))
cherrypy.log(traceback_info)
return json.dumps(
ParseErrorResponse(
data = unicode(err)
).to_dict()
)
if isinstance(data, list):
requests = data
else:
requests = [data]
else:
raise cherrypy.HTTPError(
status = httplib.BAD_REQUEST,
message = "Only GET or POST allowed"
)
# Every JSON-RPC request in a batch of requests
for request in requests:
# jsonrpc
jsonrpc = request.get("jsonrpc")
# method
method = str(request.get("method", ""))
# id
id = request.get("id")
# split positional and named params
positional_params = []
named_params = {}
params = request.get("params", [])
if isinstance(params, list):
positional_params = params
elif isinstance(params, dict):
positional_params = params.get("__args", [])
if positional_params:
del params["__args"]
named_params = params
# Debug
if self.debug:
cherrypy.log("")
cherrypy.log(u"jsonrpc: " + repr(jsonrpc))
cherrypy.log(u"request: " + repr(request))
cherrypy.log(u"positional_params: " + repr(positional_params))
cherrypy.log(u"named_params: " + repr(named_params))
cherrypy.log(u"method: " + repr(method))
cherrypy.log(u"id: " + repr(id))
cherrypy.log("")
# Do we know the method name?
if not method in self.rpc_methods:
traceback_info = "".join(traceback.format_exception(*sys.exc_info()))
cherrypy.log("JSON-RPC method '%s' not found" % method)
responses.append(
MethodNotFoundResponse(jsonrpc = jsonrpc, id = id).to_dict()
)
continue
# Call the method with parameters
try:
rpc_function = self.rpc_methods[method]
result = rpc_function(*positional_params, **named_params)
# No return value is OK if we don´t have an ID (=notification)
if result is None:
if id:
cherrypy.log("No result from JSON-RPC method '%s'" % method)
responses.append(
InternalErrorResponse(
jsonrpc = jsonrpc,
id = id,
data = u"No result from JSON-RPC method."
).to_dict()
)
else:
# Successful response
responses.append(
SuccessfulResponse(
jsonrpc = jsonrpc, id = id, result = result
).to_dict()
)
except TypeError, err:
traceback_info = "".join(traceback.format_exception(*sys.exc_info()))
cherrypy.log(traceback_info)
if "takes exactly" in unicode(err) and "arguments" in unicode(err):
responses.append(
InvalidParamsResponse(jsonrpc = jsonrpc, id = id).to_dict()
)
else:
responses.append(
InternalErrorResponse(
jsonrpc = jsonrpc,
id = id,
data = unicode(err)
).to_dict()
)
except BaseException, err:
traceback_info = "".join(traceback.format_exception(*sys.exc_info()))
cherrypy.log(traceback_info)
if hasattr(err, "data"):
error_data = err.data
else:
error_data = None
responses.append(
InternalErrorResponse(
jsonrpc = jsonrpc,
id = id,
data = error_data or unicode(err)
).to_dict()
)
# Return as JSON-String (batch or normal)
if len(requests) == 1:
return json.dumps(responses[0])
elif len(requests) > 1:
return json.dumps(responses)
else:
return None
default.exposed = True
| bsd-3-clause |
guewen/odoo | addons/portal_project_issue/tests/__init__.py | 167 | 1124 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2013-TODAY OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import test_access_rights
checks = [
test_access_rights,
]
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
github-account-because-they-want-it/django | tests/db_functions/models.py | 245 | 1332 | """
Tests for built in Function expressions.
"""
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=50)
alias = models.CharField(max_length=50, null=True, blank=True)
goes_by = models.CharField(max_length=50, null=True, blank=True)
age = models.PositiveSmallIntegerField(default=30)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Article(models.Model):
authors = models.ManyToManyField(Author, related_name='articles')
title = models.CharField(max_length=50)
summary = models.CharField(max_length=200, null=True, blank=True)
text = models.TextField()
written = models.DateTimeField()
published = models.DateTimeField(null=True, blank=True)
updated = models.DateTimeField(null=True, blank=True)
views = models.PositiveIntegerField(default=0)
def __str__(self):
return self.title
@python_2_unicode_compatible
class Fan(models.Model):
name = models.CharField(max_length=50)
age = models.PositiveSmallIntegerField(default=30)
author = models.ForeignKey(Author, models.CASCADE, related_name='fans')
def __str__(self):
return self.name
| bsd-3-clause |
suyashdb/hcp2bids | setup.py | 1 | 2182 | from setuptools import setup
import os, glob, shutil
import re, json, numpy
import nibabel as ni
here = os.path.abspath(os.path.dirname(__file__))
setup(
name="hcp2bids",
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='0.1.0',
description="Command line tool to convert HCP dataset to a Brain Imaging Data Structure "
"compatible dataset.",
long_description="Command line tool to convert HCP dataset to a Brain Imaging Data Structure "
"compatible dataset.",
# The project URL.
url='https://github.com/suyashdb/hcp2bids',
# Choose your license
license='BSD',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='BIDS HCP NIH',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages.
packages=["hcp2bids"],
# List run-time dependencies here. These will be installed by pip when your
# project is installed.
install_requires = ["numpy",
"pandas",
'nibabel'],
include_package_data=True,
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'hcp2bids=hcp2bids.main:main',
],
},
)
| mit |
Endika/mitmproxy | libmproxy/contrib/jsbeautifier/__init__.py | 50 | 41216 | import sys
import getopt
import re
import string
#
# Originally written by Einar Lielmanis et al.,
# Conversion to python by Einar Lielmanis, [email protected],
# MIT licence, enjoy.
#
# Python is not my native language, feel free to push things around.
#
# Use either from command line (script displays its usage when run
# without any parameters),
#
#
# or, alternatively, use it as a module:
#
# import jsbeautifier
# res = jsbeautifier.beautify('your javascript string')
# res = jsbeautifier.beautify_file('some_file.js')
#
# you may specify some options:
#
# opts = jsbeautifier.default_options()
# opts.indent_size = 2
# res = jsbeautifier.beautify('some javascript', opts)
#
#
# Here are the available options: (read source)
class BeautifierOptions:
def __init__(self):
self.indent_size = 4
self.indent_char = ' '
self.indent_with_tabs = False
self.preserve_newlines = True
self.max_preserve_newlines = 10.
self.jslint_happy = False
self.brace_style = 'collapse'
self.keep_array_indentation = False
self.keep_function_indentation = False
self.eval_code = False
def __repr__(self):
return \
"""indent_size = %d
indent_char = [%s]
preserve_newlines = %s
max_preserve_newlines = %d
jslint_happy = %s
indent_with_tabs = %s
brace_style = %s
keep_array_indentation = %s
eval_code = %s
""" % ( self.indent_size,
self.indent_char,
self.preserve_newlines,
self.max_preserve_newlines,
self.jslint_happy,
self.indent_with_tabs,
self.brace_style,
self.keep_array_indentation,
self.eval_code,
)
class BeautifierFlags:
def __init__(self, mode):
self.previous_mode = 'BLOCK'
self.mode = mode
self.var_line = False
self.var_line_tainted = False
self.var_line_reindented = False
self.in_html_comment = False
self.if_line = False
self.in_case = False
self.eat_next_space = False
self.indentation_baseline = -1
self.indentation_level = 0
self.ternary_depth = 0
def default_options():
return BeautifierOptions()
def beautify(string, opts = default_options() ):
b = Beautifier()
return b.beautify(string, opts)
def beautify_file(file_name, opts = default_options() ):
if file_name == '-': # stdin
f = sys.stdin
else:
try:
f = open(file_name)
except Exception as ex:
return 'The file could not be opened'
b = Beautifier()
return b.beautify(''.join(f.readlines()), opts)
def usage():
print("""Javascript beautifier (http://jsbeautifier.org/)
Usage: jsbeautifier.py [options] <infile>
<infile> can be "-", which means stdin.
<outfile> defaults to stdout
Input options:
-i, --stdin read input from stdin
Output options:
-s, --indent-size=NUMBER indentation size. (default 4).
-c, --indent-char=CHAR character to indent with. (default space).
-t, --indent-with-tabs Indent with tabs, overrides -s and -c
-d, --disable-preserve-newlines do not preserve existing line breaks.
-j, --jslint-happy more jslint-compatible output
-b, --brace-style=collapse brace style (collapse, expand, end-expand)
-k, --keep-array-indentation keep array indentation.
-o, --outfile=FILE specify a file to output to (default stdout)
-f, --keep-function-indentation Do not re-indent function bodies defined in var lines.
Rarely needed options:
--eval-code evaluate code if a JS interpreter is
installed. May be useful with some obfuscated
script but poses a potential security issue.
-l, --indent-level=NUMBER initial indentation level. (default 0).
-h, --help, --usage prints this help statement.
""")
class Beautifier:
def __init__(self, opts = default_options() ):
self.opts = opts
self.blank_state()
def blank_state(self):
# internal flags
self.flags = BeautifierFlags('BLOCK')
self.flag_store = []
self.wanted_newline = False
self.just_added_newline = False
self.do_block_just_closed = False
if self.opts.indent_with_tabs:
self.indent_string = "\t"
else:
self.indent_string = self.opts.indent_char * self.opts.indent_size
self.preindent_string = ''
self.last_word = '' # last TK_WORD seen
self.last_type = 'TK_START_EXPR' # last token type
self.last_text = '' # last token text
self.last_last_text = '' # pre-last token text
self.input = None
self.output = [] # formatted javascript gets built here
self.whitespace = ["\n", "\r", "\t", " "]
self.wordchar = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_$'
self.digits = '0123456789'
self.punct = '+ - * / % & ++ -- = += -= *= /= %= == === != !== > < >= <= >> << >>> >>>= >>= <<= && &= | || ! !! , : ? ^ ^= |= ::'
self.punct += ' <?= <? ?> <%= <% %>'
self.punct = self.punct.split(' ')
# Words which always should start on a new line
self.line_starters = 'continue,try,throw,return,var,if,switch,case,default,for,while,break,function'.split(',')
self.set_mode('BLOCK')
global parser_pos
parser_pos = 0
def beautify(self, s, opts = None ):
if opts != None:
self.opts = opts
if self.opts.brace_style not in ['expand', 'collapse', 'end-expand']:
raise(Exception('opts.brace_style must be "expand", "collapse" or "end-expand".'))
self.blank_state()
while s and s[0] in [' ', '\t']:
self.preindent_string += s[0]
s = s[1:]
#self.input = self.unpack(s, opts.eval_code)
# CORTESI
self.input = s
parser_pos = 0
while True:
token_text, token_type = self.get_next_token()
#print (token_text, token_type, self.flags.mode)
if token_type == 'TK_EOF':
break
handlers = {
'TK_START_EXPR': self.handle_start_expr,
'TK_END_EXPR': self.handle_end_expr,
'TK_START_BLOCK': self.handle_start_block,
'TK_END_BLOCK': self.handle_end_block,
'TK_WORD': self.handle_word,
'TK_SEMICOLON': self.handle_semicolon,
'TK_STRING': self.handle_string,
'TK_EQUALS': self.handle_equals,
'TK_OPERATOR': self.handle_operator,
'TK_BLOCK_COMMENT': self.handle_block_comment,
'TK_INLINE_COMMENT': self.handle_inline_comment,
'TK_COMMENT': self.handle_comment,
'TK_UNKNOWN': self.handle_unknown,
}
handlers[token_type](token_text)
self.last_last_text = self.last_text
self.last_type = token_type
self.last_text = token_text
sweet_code = self.preindent_string + re.sub('[\n ]+$', '', ''.join(self.output))
return sweet_code
def unpack(self, source, evalcode=False):
import jsbeautifier.unpackers as unpackers
try:
return unpackers.run(source, evalcode)
except unpackers.UnpackingError as error:
print('error:', error)
return ''
def trim_output(self, eat_newlines = False):
while len(self.output) \
and (
self.output[-1] == ' '\
or self.output[-1] == self.indent_string \
or self.output[-1] == self.preindent_string \
or (eat_newlines and self.output[-1] in ['\n', '\r'])):
self.output.pop()
def is_special_word(self, s):
return s in ['case', 'return', 'do', 'if', 'throw', 'else'];
def is_array(self, mode):
return mode in ['[EXPRESSION]', '[INDENDED-EXPRESSION]']
def is_expression(self, mode):
return mode in ['[EXPRESSION]', '[INDENDED-EXPRESSION]', '(EXPRESSION)', '(FOR-EXPRESSION)', '(COND-EXPRESSION)']
def append_newline_forced(self):
old_array_indentation = self.opts.keep_array_indentation
self.opts.keep_array_indentation = False
self.append_newline()
self.opts.keep_array_indentation = old_array_indentation
def append_newline(self, ignore_repeated = True):
self.flags.eat_next_space = False
if self.opts.keep_array_indentation and self.is_array(self.flags.mode):
return
self.flags.if_line = False
self.trim_output()
if len(self.output) == 0:
# no newline on start of file
return
if self.output[-1] != '\n' or not ignore_repeated:
self.just_added_newline = True
self.output.append('\n')
if self.preindent_string:
self.output.append(self.preindent_string)
for i in range(self.flags.indentation_level):
self.output.append(self.indent_string)
if self.flags.var_line and self.flags.var_line_reindented:
self.output.append(self.indent_string)
def append(self, s):
if s == ' ':
# do not add just a single space after the // comment, ever
if self.last_type == 'TK_COMMENT':
return self.append_newline()
# make sure only single space gets drawn
if self.flags.eat_next_space:
self.flags.eat_next_space = False
elif len(self.output) and self.output[-1] not in [' ', '\n', self.indent_string]:
self.output.append(' ')
else:
self.just_added_newline = False
self.flags.eat_next_space = False
self.output.append(s)
def indent(self):
self.flags.indentation_level = self.flags.indentation_level + 1
def remove_indent(self):
if len(self.output) and self.output[-1] in [self.indent_string, self.preindent_string]:
self.output.pop()
def set_mode(self, mode):
prev = BeautifierFlags('BLOCK')
if self.flags:
self.flag_store.append(self.flags)
prev = self.flags
self.flags = BeautifierFlags(mode)
if len(self.flag_store) == 1:
self.flags.indentation_level = 0
else:
self.flags.indentation_level = prev.indentation_level
if prev.var_line and prev.var_line_reindented:
self.flags.indentation_level = self.flags.indentation_level + 1
self.flags.previous_mode = prev.mode
def restore_mode(self):
self.do_block_just_closed = self.flags.mode == 'DO_BLOCK'
if len(self.flag_store) > 0:
mode = self.flags.mode
self.flags = self.flag_store.pop()
self.flags.previous_mode = mode
def get_next_token(self):
global parser_pos
self.n_newlines = 0
if parser_pos >= len(self.input):
return '', 'TK_EOF'
self.wanted_newline = False
c = self.input[parser_pos]
parser_pos += 1
keep_whitespace = self.opts.keep_array_indentation and self.is_array(self.flags.mode)
if keep_whitespace:
# slight mess to allow nice preservation of array indentation and reindent that correctly
# first time when we get to the arrays:
# var a = [
# ....'something'
# we make note of whitespace_count = 4 into flags.indentation_baseline
# so we know that 4 whitespaces in original source match indent_level of reindented source
#
# and afterwards, when we get to
# 'something,
# .......'something else'
# we know that this should be indented to indent_level + (7 - indentation_baseline) spaces
whitespace_count = 0
while c in self.whitespace:
if c == '\n':
self.trim_output()
self.output.append('\n')
self.just_added_newline = True
whitespace_count = 0
elif c == '\t':
whitespace_count += 4
elif c == '\r':
pass
else:
whitespace_count += 1
if parser_pos >= len(self.input):
return '', 'TK_EOF'
c = self.input[parser_pos]
parser_pos += 1
if self.flags.indentation_baseline == -1:
self.flags.indentation_baseline = whitespace_count
if self.just_added_newline:
for i in range(self.flags.indentation_level + 1):
self.output.append(self.indent_string)
if self.flags.indentation_baseline != -1:
for i in range(whitespace_count - self.flags.indentation_baseline):
self.output.append(' ')
else: # not keep_whitespace
while c in self.whitespace:
if c == '\n':
if self.opts.max_preserve_newlines == 0 or self.opts.max_preserve_newlines > self.n_newlines:
self.n_newlines += 1
if parser_pos >= len(self.input):
return '', 'TK_EOF'
c = self.input[parser_pos]
parser_pos += 1
if self.opts.preserve_newlines and self.n_newlines > 1:
for i in range(self.n_newlines):
self.append_newline(i == 0)
self.just_added_newline = True
self.wanted_newline = self.n_newlines > 0
if c in self.wordchar:
if parser_pos < len(self.input):
while self.input[parser_pos] in self.wordchar:
c = c + self.input[parser_pos]
parser_pos += 1
if parser_pos == len(self.input):
break
# small and surprisingly unugly hack for 1E-10 representation
if parser_pos != len(self.input) and self.input[parser_pos] in '+-' \
and re.match('^[0-9]+[Ee]$', c):
sign = self.input[parser_pos]
parser_pos += 1
t = self.get_next_token()
c += sign + t[0]
return c, 'TK_WORD'
if c == 'in': # in is an operator, need to hack
return c, 'TK_OPERATOR'
if self.wanted_newline and \
self.last_type != 'TK_OPERATOR' and\
self.last_type != 'TK_EQUALS' and\
not self.flags.if_line and \
(self.opts.preserve_newlines or self.last_text != 'var'):
self.append_newline()
return c, 'TK_WORD'
if c in '([':
return c, 'TK_START_EXPR'
if c in ')]':
return c, 'TK_END_EXPR'
if c == '{':
return c, 'TK_START_BLOCK'
if c == '}':
return c, 'TK_END_BLOCK'
if c == ';':
return c, 'TK_SEMICOLON'
if c == '/':
comment = ''
inline_comment = True
comment_mode = 'TK_INLINE_COMMENT'
if self.input[parser_pos] == '*': # peek /* .. */ comment
parser_pos += 1
if parser_pos < len(self.input):
while not (self.input[parser_pos] == '*' and \
parser_pos + 1 < len(self.input) and \
self.input[parser_pos + 1] == '/')\
and parser_pos < len(self.input):
c = self.input[parser_pos]
comment += c
if c in '\r\n':
comment_mode = 'TK_BLOCK_COMMENT'
parser_pos += 1
if parser_pos >= len(self.input):
break
parser_pos += 2
return '/*' + comment + '*/', comment_mode
if self.input[parser_pos] == '/': # peek // comment
comment = c
while self.input[parser_pos] not in '\r\n':
comment += self.input[parser_pos]
parser_pos += 1
if parser_pos >= len(self.input):
break
parser_pos += 1
if self.wanted_newline:
self.append_newline()
return comment, 'TK_COMMENT'
if c == "'" or c == '"' or \
(c == '/' and ((self.last_type == 'TK_WORD' and self.is_special_word(self.last_text)) or \
(self.last_type == 'TK_END_EXPR' and self.flags.previous_mode in ['(FOR-EXPRESSION)', '(COND-EXPRESSION)']) or \
(self.last_type in ['TK_COMMENT', 'TK_START_EXPR', 'TK_START_BLOCK', 'TK_END_BLOCK', 'TK_OPERATOR',
'TK_EQUALS', 'TK_EOF', 'TK_SEMICOLON']))):
sep = c
esc = False
resulting_string = c
in_char_class = False
if parser_pos < len(self.input):
if sep == '/':
# handle regexp
in_char_class = False
while esc or in_char_class or self.input[parser_pos] != sep:
resulting_string += self.input[parser_pos]
if not esc:
esc = self.input[parser_pos] == '\\'
if self.input[parser_pos] == '[':
in_char_class = True
elif self.input[parser_pos] == ']':
in_char_class = False
else:
esc = False
parser_pos += 1
if parser_pos >= len(self.input):
# incomplete regex when end-of-file reached
# bail out with what has received so far
return resulting_string, 'TK_STRING'
else:
# handle string
while esc or self.input[parser_pos] != sep:
resulting_string += self.input[parser_pos]
if not esc:
esc = self.input[parser_pos] == '\\'
else:
esc = False
parser_pos += 1
if parser_pos >= len(self.input):
# incomplete string when end-of-file reached
# bail out with what has received so far
return resulting_string, 'TK_STRING'
parser_pos += 1
resulting_string += sep
if sep == '/':
# regexps may have modifiers /regexp/MOD, so fetch those too
while parser_pos < len(self.input) and self.input[parser_pos] in self.wordchar:
resulting_string += self.input[parser_pos]
parser_pos += 1
return resulting_string, 'TK_STRING'
if c == '#':
# she-bang
if len(self.output) == 0 and len(self.input) > 1 and self.input[parser_pos] == '!':
resulting_string = c
while parser_pos < len(self.input) and c != '\n':
c = self.input[parser_pos]
resulting_string += c
parser_pos += 1
self.output.append(resulting_string.strip() + "\n")
self.append_newline()
return self.get_next_token()
# Spidermonkey-specific sharp variables for circular references
# https://developer.mozilla.org/En/Sharp_variables_in_JavaScript
# http://mxr.mozilla.org/mozilla-central/source/js/src/jsscan.cpp around line 1935
sharp = '#'
if parser_pos < len(self.input) and self.input[parser_pos] in self.digits:
while True:
c = self.input[parser_pos]
sharp += c
parser_pos += 1
if parser_pos >= len(self.input) or c == '#' or c == '=':
break
if c == '#' or parser_pos >= len(self.input):
pass
elif self.input[parser_pos] == '[' and self.input[parser_pos + 1] == ']':
sharp += '[]'
parser_pos += 2
elif self.input[parser_pos] == '{' and self.input[parser_pos + 1] == '}':
sharp += '{}'
parser_pos += 2
return sharp, 'TK_WORD'
if c == '<' and self.input[parser_pos - 1 : parser_pos + 3] == '<!--':
parser_pos += 3
c = '<!--'
while parser_pos < len(self.input) and self.input[parser_pos] != '\n':
c += self.input[parser_pos]
parser_pos += 1
self.flags.in_html_comment = True
return c, 'TK_COMMENT'
if c == '-' and self.flags.in_html_comment and self.input[parser_pos - 1 : parser_pos + 2] == '-->':
self.flags.in_html_comment = False
parser_pos += 2
if self.wanted_newline:
self.append_newline()
return '-->', 'TK_COMMENT'
if c in self.punct:
while parser_pos < len(self.input) and c + self.input[parser_pos] in self.punct:
c += self.input[parser_pos]
parser_pos += 1
if parser_pos >= len(self.input):
break
if c == '=':
return c, 'TK_EQUALS'
else:
return c, 'TK_OPERATOR'
return c, 'TK_UNKNOWN'
def handle_start_expr(self, token_text):
if token_text == '[':
if self.last_type == 'TK_WORD' or self.last_text == ')':
if self.last_text in self.line_starters:
self.append(' ')
self.set_mode('(EXPRESSION)')
self.append(token_text)
return
if self.flags.mode in ['[EXPRESSION]', '[INDENTED-EXPRESSION]']:
if self.last_last_text == ']' and self.last_text == ',':
# ], [ goes to a new line
if self.flags.mode == '[EXPRESSION]':
self.flags.mode = '[INDENTED-EXPRESSION]'
if not self.opts.keep_array_indentation:
self.indent()
self.set_mode('[EXPRESSION]')
if not self.opts.keep_array_indentation:
self.append_newline()
elif self.last_text == '[':
if self.flags.mode == '[EXPRESSION]':
self.flags.mode = '[INDENTED-EXPRESSION]'
if not self.opts.keep_array_indentation:
self.indent()
self.set_mode('[EXPRESSION]')
if not self.opts.keep_array_indentation:
self.append_newline()
else:
self.set_mode('[EXPRESSION]')
else:
self.set_mode('[EXPRESSION]')
else:
if self.last_text == 'for':
self.set_mode('(FOR-EXPRESSION)')
elif self.last_text in ['if', 'while']:
self.set_mode('(COND-EXPRESSION)')
else:
self.set_mode('(EXPRESSION)')
if self.last_text == ';' or self.last_type == 'TK_START_BLOCK':
self.append_newline()
elif self.last_type in ['TK_END_EXPR', 'TK_START_EXPR', 'TK_END_BLOCK'] or self.last_text == '.':
# do nothing on (( and )( and ][ and ]( and .(
if self.wanted_newline:
self.append_newline();
elif self.last_type not in ['TK_WORD', 'TK_OPERATOR']:
self.append(' ')
elif self.last_word == 'function' or self.last_word == 'typeof':
# function() vs function (), typeof() vs typeof ()
if self.opts.jslint_happy:
self.append(' ')
elif self.last_text in self.line_starters or self.last_text == 'catch':
self.append(' ')
self.append(token_text)
def handle_end_expr(self, token_text):
if token_text == ']':
if self.opts.keep_array_indentation:
if self.last_text == '}':
self.remove_indent()
self.append(token_text)
self.restore_mode()
return
else:
if self.flags.mode == '[INDENTED-EXPRESSION]':
if self.last_text == ']':
self.restore_mode()
self.append_newline()
self.append(token_text)
return
self.restore_mode()
self.append(token_text)
def handle_start_block(self, token_text):
if self.last_word == 'do':
self.set_mode('DO_BLOCK')
else:
self.set_mode('BLOCK')
if self.opts.brace_style == 'expand':
if self.last_type != 'TK_OPERATOR':
if self.last_text == '=' or (self.is_special_word(self.last_text) and self.last_text != 'else'):
self.append(' ')
else:
self.append_newline(True)
self.append(token_text)
self.indent()
else:
if self.last_type not in ['TK_OPERATOR', 'TK_START_EXPR']:
if self.last_type == 'TK_START_BLOCK':
self.append_newline()
else:
self.append(' ')
else:
# if TK_OPERATOR or TK_START_EXPR
if self.is_array(self.flags.previous_mode) and self.last_text == ',':
if self.last_last_text == '}':
self.append(' ')
else:
self.append_newline()
self.indent()
self.append(token_text)
def handle_end_block(self, token_text):
self.restore_mode()
if self.opts.brace_style == 'expand':
if self.last_text != '{':
self.append_newline()
else:
if self.last_type == 'TK_START_BLOCK':
if self.just_added_newline:
self.remove_indent()
else:
# {}
self.trim_output()
else:
if self.is_array(self.flags.mode) and self.opts.keep_array_indentation:
self.opts.keep_array_indentation = False
self.append_newline()
self.opts.keep_array_indentation = True
else:
self.append_newline()
self.append(token_text)
def handle_word(self, token_text):
if self.do_block_just_closed:
self.append(' ')
self.append(token_text)
self.append(' ')
self.do_block_just_closed = False
return
if token_text == 'function':
if self.flags.var_line:
self.flags.var_line_reindented = not self.opts.keep_function_indentation
if (self.just_added_newline or self.last_text == ';') and self.last_text != '{':
# make sure there is a nice clean space of at least one blank line
# before a new function definition
have_newlines = self.n_newlines
if not self.just_added_newline:
have_newlines = 0
if not self.opts.preserve_newlines:
have_newlines = 1
for i in range(2 - have_newlines):
self.append_newline(False)
if token_text in ['case', 'default']:
if self.last_text == ':':
self.remove_indent()
else:
self.flags.indentation_level -= 1
self.append_newline()
self.flags.indentation_level += 1
self.append(token_text)
self.flags.in_case = True
return
prefix = 'NONE'
if self.last_type == 'TK_END_BLOCK':
if token_text not in ['else', 'catch', 'finally']:
prefix = 'NEWLINE'
else:
if self.opts.brace_style in ['expand', 'end-expand']:
prefix = 'NEWLINE'
else:
prefix = 'SPACE'
self.append(' ')
elif self.last_type == 'TK_SEMICOLON' and self.flags.mode in ['BLOCK', 'DO_BLOCK']:
prefix = 'NEWLINE'
elif self.last_type == 'TK_SEMICOLON' and self.is_expression(self.flags.mode):
prefix = 'SPACE'
elif self.last_type == 'TK_STRING':
prefix = 'NEWLINE'
elif self.last_type == 'TK_WORD':
if self.last_text == 'else':
# eat newlines between ...else *** some_op...
# won't preserve extra newlines in this place (if any), but don't care that much
self.trim_output(True)
prefix = 'SPACE'
elif self.last_type == 'TK_START_BLOCK':
prefix = 'NEWLINE'
elif self.last_type == 'TK_END_EXPR':
self.append(' ')
prefix = 'NEWLINE'
if self.flags.if_line and self.last_type == 'TK_END_EXPR':
self.flags.if_line = False
if token_text in self.line_starters:
if self.last_text == 'else':
prefix = 'SPACE'
else:
prefix = 'NEWLINE'
if token_text == 'function' and self.last_text in ['get', 'set']:
prefix = 'SPACE'
if token_text in ['else', 'catch', 'finally']:
if self.last_type != 'TK_END_BLOCK' \
or self.opts.brace_style == 'expand' \
or self.opts.brace_style == 'end-expand':
self.append_newline()
else:
self.trim_output(True)
self.append(' ')
elif prefix == 'NEWLINE':
if token_text == 'function' and (self.last_type == 'TK_START_EXPR' or self.last_text in '=,'):
# no need to force newline on "function" -
# (function...
pass
elif token_text == 'function' and self.last_text == 'new':
self.append(' ')
elif self.is_special_word(self.last_text):
# no newline between return nnn
self.append(' ')
elif self.last_type != 'TK_END_EXPR':
if (self.last_type != 'TK_START_EXPR' or token_text != 'var') and self.last_text != ':':
# no need to force newline on VAR -
# for (var x = 0...
if token_text == 'if' and self.last_word == 'else' and self.last_text != '{':
self.append(' ')
else:
self.flags.var_line = False
self.flags.var_line_reindented = False
self.append_newline()
elif token_text in self.line_starters and self.last_text != ')':
self.flags.var_line = False
self.flags.var_line_reindented = False
self.append_newline()
elif self.is_array(self.flags.mode) and self.last_text == ',' and self.last_last_text == '}':
self.append_newline() # }, in lists get a newline
elif prefix == 'SPACE':
self.append(' ')
self.append(token_text)
self.last_word = token_text
if token_text == 'var':
self.flags.var_line = True
self.flags.var_line_reindented = False
self.flags.var_line_tainted = False
if token_text == 'if':
self.flags.if_line = True
if token_text == 'else':
self.flags.if_line = False
def handle_semicolon(self, token_text):
self.append(token_text)
self.flags.var_line = False
self.flags.var_line_reindented = False
if self.flags.mode == 'OBJECT':
# OBJECT mode is weird and doesn't get reset too well.
self.flags.mode = 'BLOCK'
def handle_string(self, token_text):
if self.last_type == 'TK_END_EXPR' and self.flags.previous_mode in ['(COND-EXPRESSION)', '(FOR-EXPRESSION)']:
self.append(' ')
if self.last_type in ['TK_STRING', 'TK_START_BLOCK', 'TK_END_BLOCK', 'TK_SEMICOLON']:
self.append_newline()
elif self.last_type == 'TK_WORD':
self.append(' ')
# Try to replace readable \x-encoded characters with their equivalent,
# if it is possible (e.g. '\x41\x42\x43\x01' becomes 'ABC\x01').
def unescape(match):
block, code = match.group(0, 1)
char = chr(int(code, 16))
if block.count('\\') == 1 and char in string.printable:
return char
return block
token_text = re.sub(r'\\{1,2}x([a-fA-F0-9]{2})', unescape, token_text)
self.append(token_text)
def handle_equals(self, token_text):
if self.flags.var_line:
# just got an '=' in a var-line, different line breaking rules will apply
self.flags.var_line_tainted = True
self.append(' ')
self.append(token_text)
self.append(' ')
def handle_operator(self, token_text):
space_before = True
space_after = True
if self.flags.var_line and token_text == ',' and self.is_expression(self.flags.mode):
# do not break on comma, for ( var a = 1, b = 2
self.flags.var_line_tainted = False
if self.flags.var_line and token_text == ',':
if self.flags.var_line_tainted:
self.append(token_text)
self.flags.var_line_reindented = True
self.flags.var_line_tainted = False
self.append_newline()
return
else:
self.flags.var_line_tainted = False
if self.is_special_word(self.last_text):
# return had a special handling in TK_WORD
self.append(' ')
self.append(token_text)
return
if token_text == ':' and self.flags.in_case:
self.append(token_text)
self.append_newline()
self.flags.in_case = False
return
if token_text == '::':
# no spaces around the exotic namespacing syntax operator
self.append(token_text)
return
if token_text == ',':
if self.flags.var_line:
if self.flags.var_line_tainted:
# This never happens, as it's handled previously, right?
self.append(token_text)
self.append_newline()
self.flags.var_line_tainted = False
else:
self.append(token_text)
self.append(' ')
elif self.last_type == 'TK_END_BLOCK' and self.flags.mode != '(EXPRESSION)':
self.append(token_text)
if self.flags.mode == 'OBJECT' and self.last_text == '}':
self.append_newline()
else:
self.append(' ')
else:
if self.flags.mode == 'OBJECT':
self.append(token_text)
self.append_newline()
else:
# EXPR or DO_BLOCK
self.append(token_text)
self.append(' ')
# comma handled
return
elif token_text in ['--', '++', '!'] \
or (token_text in ['+', '-'] \
and self.last_type in ['TK_START_BLOCK', 'TK_START_EXPR', 'TK_EQUALS', 'TK_OPERATOR']) \
or self.last_text in self.line_starters:
space_before = False
space_after = False
if self.last_text == ';' and self.is_expression(self.flags.mode):
# for (;; ++i)
# ^^
space_before = True
if self.last_type == 'TK_WORD' and self.last_text in self.line_starters:
space_before = True
if self.flags.mode == 'BLOCK' and self.last_text in ['{', ';']:
# { foo: --i }
# foo(): --bar
self.append_newline()
elif token_text == '.':
# decimal digits or object.property
space_before = False
elif token_text == ':':
if self.flags.ternary_depth == 0:
self.flags.mode = 'OBJECT'
space_before = False
else:
self.flags.ternary_depth -= 1
elif token_text == '?':
self.flags.ternary_depth += 1
if space_before:
self.append(' ')
self.append(token_text)
if space_after:
self.append(' ')
def handle_block_comment(self, token_text):
lines = token_text.replace('\x0d', '').split('\x0a')
# all lines start with an asterisk? that's a proper box comment
if not any(l for l in lines[1:] if ( l.strip() == '' or (l.lstrip())[0] != '*')):
self.append_newline()
self.append(lines[0])
for line in lines[1:]:
self.append_newline()
self.append(' ' + line.strip())
else:
# simple block comment: leave intact
if len(lines) > 1:
# multiline comment starts on a new line
self.append_newline()
else:
# single line /* ... */ comment stays on the same line
self.append(' ')
for line in lines:
self.append(line)
self.append('\n')
self.append_newline()
def handle_inline_comment(self, token_text):
self.append(' ')
self.append(token_text)
if self.is_expression(self.flags.mode):
self.append(' ')
else:
self.append_newline_forced()
def handle_comment(self, token_text):
if self.wanted_newline:
self.append_newline()
else:
self.append(' ')
self.append(token_text)
self.append_newline_forced()
def handle_unknown(self, token_text):
if self.last_text in ['return', 'throw']:
self.append(' ')
self.append(token_text)
def main():
argv = sys.argv[1:]
try:
opts, args = getopt.getopt(argv, "s:c:o:djbkil:htf", ['indent-size=','indent-char=','outfile=', 'disable-preserve-newlines',
'jslint-happy', 'brace-style=',
'keep-array-indentation', 'indent-level=', 'help',
'usage', 'stdin', 'eval-code', 'indent-with-tabs', 'keep-function-indentation'])
except getopt.GetoptError:
return usage()
js_options = default_options()
file = None
outfile = 'stdout'
if len(args) == 1:
file = args[0]
for opt, arg in opts:
if opt in ('--keep-array-indentation', '-k'):
js_options.keep_array_indentation = True
if opt in ('--keep-function-indentation','-f'):
js_options.keep_function_indentation = True
elif opt in ('--outfile', '-o'):
outfile = arg
elif opt in ('--indent-size', '-s'):
js_options.indent_size = int(arg)
elif opt in ('--indent-char', '-c'):
js_options.indent_char = arg
elif opt in ('--indent-with-tabs', '-t'):
js_options.indent_with_tabs = True
elif opt in ('--disable-preserve_newlines', '-d'):
js_options.preserve_newlines = False
elif opt in ('--jslint-happy', '-j'):
js_options.jslint_happy = True
elif opt in ('--eval-code'):
js_options.eval_code = True
elif opt in ('--brace-style', '-b'):
js_options.brace_style = arg
elif opt in ('--stdin', '-i'):
file = '-'
elif opt in ('--help', '--usage', '-h'):
return usage()
if not file:
return usage()
else:
if outfile == 'stdout':
print(beautify_file(file, js_options))
else:
with open(outfile, 'w') as f:
f.write(beautify_file(file, js_options) + '\n')
| mit |
detiber/ansible | test/sanity/validate-modules/utils.py | 33 | 3443 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2015 Matt Martz <[email protected]>
# Copyright (C) 2015 Rackspace US, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import ast
import sys
# We only use StringIO, since we cannot setattr on cStringIO
from StringIO import StringIO
import yaml
import yaml.reader
def find_globals(g, tree):
"""Uses AST to find globals in an ast tree"""
for child in tree:
if hasattr(child, 'body') and isinstance(child.body, list):
find_globals(g, child.body)
elif isinstance(child, (ast.FunctionDef, ast.ClassDef)):
g.add(child.name)
continue
elif isinstance(child, ast.Assign):
try:
g.add(child.targets[0].id)
except (IndexError, AttributeError):
pass
elif isinstance(child, ast.Import):
g.add(child.names[0].name)
elif isinstance(child, ast.ImportFrom):
for name in child.names:
g_name = name.asname or name.name
if g_name == '*':
continue
g.add(g_name)
class CaptureStd():
"""Context manager to handle capturing stderr and stdout"""
def __enter__(self):
self.sys_stdout = sys.stdout
self.sys_stderr = sys.stderr
sys.stdout = self.stdout = StringIO()
sys.stderr = self.stderr = StringIO()
setattr(sys.stdout, 'encoding', self.sys_stdout.encoding)
setattr(sys.stderr, 'encoding', self.sys_stderr.encoding)
return self
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout = self.sys_stdout
sys.stderr = self.sys_stderr
def get(self):
"""Return ``(stdout, stderr)``"""
return self.stdout.getvalue(), self.stderr.getvalue()
def parse_yaml(value, lineno, module, name, load_all=False):
traces = []
errors = []
data = None
if load_all:
loader = yaml.safe_load_all
else:
loader = yaml.safe_load
try:
data = loader(value)
if load_all:
data = list(data)
except yaml.MarkedYAMLError as e:
e.problem_mark.line += lineno - 1
e.problem_mark.name = '%s.%s' % (module, name)
errors.append('%s is not valid YAML. Line %d column %d' %
(name, e.problem_mark.line + 1,
e.problem_mark.column + 1))
traces.append(e)
except yaml.reader.ReaderError as e:
traces.append(e)
errors.append('%s is not valid YAML. Character '
'0x%x at position %d.' %
(name, e.character, e.position))
except yaml.YAMLError as e:
traces.append(e)
errors.append('%s is not valid YAML: %s: %s' % (name, type(e), e))
return data, errors, traces
| gpl-3.0 |
captiosus/treadmill | treadmill/infra/setup/ldap.py | 2 | 1481 | from treadmill.infra.setup import base_provision
from treadmill.infra import configuration, connection, constants, instances
from treadmill.api import ipa
import time
class LDAP(base_provision.BaseProvision):
def setup(
self,
image,
count,
key,
cidr_block,
tm_release,
instance_type,
app_root,
cell_subnet_id,
ipa_admin_password,
subnet_id=None
):
# TODO: remove count as parameter
count = 1
self.name = self.name + '-' + str(time.time())
hostname = self.name + '.' + connection.Connection.context.domain
otp = ipa.API().add_host(hostname=hostname)
ipa_server_hostname = instances.Instances.get_hostnames_by_roles(
vpc_id=self.vpc.id,
roles=[
constants.ROLES['IPA']
]
)[constants.ROLES['IPA']]
self.configuration = configuration.LDAP(
cell_subnet_id=cell_subnet_id,
tm_release=tm_release,
app_root=app_root,
hostname=hostname,
ipa_admin_password=ipa_admin_password,
ipa_server_hostname=ipa_server_hostname,
otp=otp
)
super().setup(
image=image,
count=count,
cidr_block=cidr_block,
subnet_id=subnet_id,
key=key,
instance_type=instance_type
)
| apache-2.0 |
dethos/cloudroutes-service | src/actions/actions/saltstack-generic/__init__.py | 6 | 1476 | #!/usr/bin/python
######################################################################
# Cloud Routes Bridge
# -------------------------------------------------------------------
# Actions Module
######################################################################
import requests
import time
def action(**kwargs):
''' This method is called to action a reaction '''
redata = kwargs['redata']
jdata = kwargs['jdata']
logger = kwargs['logger']
run = True
# Check for Trigger
if redata['trigger'] > jdata['failcount']:
run = False
# Check for lastrun
checktime = time.time() - float(redata['lastrun'])
if checktime < redata['frequency']:
run = False
if redata['data']['call_on'] not in jdata['check']['status']:
run = False
if run:
return callSalt(redata, jdata, logger)
else:
return None
def callSalt(redata, jdata, logger):
''' Perform actual call '''
url = redata['data']['url']
payload = redata['data']
try:
req = requests.post(url, data=payload, timeout=3.00, verify=False)
except:
return False
if req.status_code == 200:
line = "saltstack-generic: Reqeust to %s sent for monitor %s - Successful" % (url, jdata['cid'])
logger.info(line)
return True
else:
line = "saltstack-generic: Request to %s sent for monitor %s - False" % (url, jdata['cid'])
logger.info(line)
return False
| agpl-3.0 |
nextgis/NextGIS_QGIS_open | python/plugins/processing/script/WrongScriptException.py | 46 | 1234 | # -*- coding: utf-8 -*-
"""
***************************************************************************
WrongScriptException.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
class WrongScriptException(Exception):
def __init__(self, msg):
Exception.__init__(self)
self.msg = msg
| gpl-2.0 |
raiabril/arduino_logger | py2app-0.9-py2.7.egg/py2app/script_py2applet.py | 9 | 5469 | """
Create an applet from a Python script.
You can drag in packages, Info.plist files, icons, etc.
It's expected that only one Python script is dragged in.
"""
from __future__ import print_function
import os, sys
from distutils.core import setup
from plistlib import Plist
import py2app
import tempfile
import shutil
import imp
import pprint
from py2app.util import copy_tree
from py2app import build_app
try:
set
except NameError:
from sets import Set as set
if sys.version_info[0] == 3:
raw_input = input
HELP_TEXT = """
usage: py2applet --make-setup [options...] script.py [data files...]
or: py2applet [options...] script.py [data files...]
or: py2applet --help
"""
SETUP_TEMPLATE = '''"""
This is a setup.py script generated by py2applet
Usage:
python setup.py py2app
"""
from setuptools import setup
APP = %s
DATA_FILES = %s
OPTIONS = %s
setup(
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
'''
def get_option_map():
optmap = {}
for option in build_app.py2app.user_options:
opt_long, opt_short = option[:2]
if opt_short:
optmap['-' + opt_short] = opt_long.rstrip('=')
return optmap
def get_cmd_options():
options = set()
for option in build_app.py2app.user_options:
opt_long, opt_short = option[:2]
if opt_long.endswith('=') and opt_short:
options.add('-' + opt_short)
return options
def main():
if not sys.argv[1:]:
print(HELP_TEXT)
return
scripts = []
data_files = []
packages = []
args = []
plist = {}
iconfile = None
parsing_options = True
next_is_option = False
cmd_options = get_cmd_options()
is_make_setup = False
for fn in sys.argv[1:]:
if parsing_options:
if next_is_option:
args.append(fn)
next_is_option = False
continue
elif fn == '--make-setup':
is_make_setup = True
continue
elif fn.startswith('-'):
args.append(fn)
if fn in cmd_options:
next_is_option = True
continue
parsing_options = False
if not is_make_setup:
fn = os.path.abspath(fn)
if fn.endswith('.py'):
if scripts:
data_files.append(fn)
else:
scripts.append(fn)
elif os.path.basename(fn) == 'Info.plist':
plist = Plist.fromFile(fn)
elif fn.endswith('.icns') and not iconfile:
iconfile = os.path.abspath(fn)
elif os.path.isdir(fn):
sys.path.insert(0, [os.path.dirname(fn)])
try:
path = imp.find_module(os.path.basename(fn))[0]
except ImportError:
path = ''
del sys.path[0]
if os.path.realpath(path) == os.path.realpath(fn):
packages.append(os.path.basename(fn))
else:
data_files.append(fn)
else:
data_files.append(fn)
options = dict(
packages=packages,
plist=plist,
iconfile=iconfile,
argv_emulation=True,
)
for k,v in list(options.items()):
if not v:
del options[k]
if is_make_setup:
make_setup(args, scripts, data_files, options)
else:
build(args, scripts, data_files, options)
def make_setup(args, scripts, data_files, options):
optmap = get_option_map()
cmd_options = get_cmd_options()
while args:
cmd = args.pop(0)
if cmd in cmd_options:
cmd = optmap[cmd]
options[cmd.replace('-', '_')] = args.pop(0)
elif '=' in cmd:
cmd, val = cmd.split('=', 1)
options[cmd.lstrip('-').replace('-', '_')] = val
else:
cmd = optmap.get(cmd, cmd)
options[cmd.lstrip('-').replace('-', '_')] = True
if os.path.exists('setup.py'):
res = ''
while res.lower() not in ('y', 'n'):
res = raw_input('Existing setup.py detected, replace? [Y/n] ')
if not res:
break
if res == 'n':
print('aborted!')
return
f = open('setup.py', 'w')
tvars = tuple(map(pprint.pformat, (scripts, data_files, options)))
f.write(SETUP_TEMPLATE % tvars)
f.flush()
f.close()
print('Wrote setup.py')
def build(args, scripts, data_files, options):
old_argv = sys.argv
sys.argv = [sys.argv[0], 'py2app'] + args
old_path = sys.path
path_insert = set()
for script in scripts:
path_insert.add(os.path.dirname(script))
sys.path = list(path_insert) + old_path
old_dir = os.getcwd()
tempdir = tempfile.mkdtemp()
os.chdir(tempdir)
try:
d = setup(
app=scripts,
data_files=data_files,
options={'py2app': options},
)
for target in d.app:
copy_tree(
target.appdir,
os.path.join(
os.path.dirname(target.script),
os.path.basename(target.appdir),
),
preserve_symlinks=True,
)
finally:
os.chdir(old_dir)
shutil.rmtree(tempdir, ignore_errors=True)
sys.argv = old_argv
sys.path = old_path
if __name__ == '__main__':
main()
| gpl-2.0 |
goddino/libjingle | trunk/third_party/yasm/source/patched-yasm/tools/python-yasm/tests/test_symrec.py | 32 | 2976 | from tests import TestCase, add
from yasm import SymbolTable, Expression, YasmError
class TSymbolTable(TestCase):
def setUp(self):
self.symtab = SymbolTable()
def test_keys(self):
self.assertEquals(len(self.symtab.keys()), 0)
self.symtab.declare("foo", None, 0)
keys = self.symtab.keys()
self.assertEquals(len(keys), 1)
self.assertEquals(keys[0], "foo")
def test_contains(self):
self.assert_("foo" not in self.symtab)
self.symtab.declare("foo", None, 0)
self.assert_("foo" in self.symtab)
def test_exception(self):
expr = Expression('+', 1, 2)
self.symtab.define_equ("foo", expr, 0)
self.assertRaises(YasmError, self.symtab.define_equ, "foo", expr, 0)
self.symtab.define_equ("bar", expr, 0) # cleared
self.assertRaises(YasmError, self.symtab.define_special, "bar",
'global')
def test_iters(self):
tab = self.symtab
tab.declare("foo", None, 0)
tab.declare("bar", None, 0)
tab.declare("baz", None, 0)
# while ordering is not known, it must be consistent
self.assertEquals(list(tab.keys()), list(tab.iterkeys()))
self.assertEquals(list(tab.values()), list(tab.itervalues()))
self.assertEquals(list(tab.items()), list(tab.iteritems()))
self.assertEquals(list(tab.iteritems()), zip(tab.keys(), tab.values()))
add(TSymbolTable)
class TSymbolAttr(TestCase):
def setUp(self):
self.symtab = SymbolTable()
self.declsym = self.symtab.declare("foo", None, 0)
def test_visibility(self):
sym = self.symtab.declare("local1", None, 0)
self.assertEquals(sym.visibility, set())
sym = self.symtab.declare("local2", '', 0)
self.assertEquals(sym.visibility, set())
sym = self.symtab.declare("local3", 'local', 0)
self.assertEquals(sym.visibility, set())
sym = self.symtab.declare("global", 'global', 0)
self.assertEquals(sym.visibility, set(['global']))
sym = self.symtab.declare("common", 'common', 0)
self.assertEquals(sym.visibility, set(['common']))
sym = self.symtab.declare("extern", 'extern', 0)
self.assertEquals(sym.visibility, set(['extern']))
sym = self.symtab.declare("dlocal", 'dlocal', 0)
self.assertEquals(sym.visibility, set(['dlocal']))
self.assertRaises(ValueError,
lambda: self.symtab.declare("extern2", 'foo', 0))
def test_name(self):
self.assertEquals(self.declsym.name, "foo")
def test_equ(self):
self.assertRaises(AttributeError, lambda: self.declsym.equ)
def test_label(self):
self.assertRaises(AttributeError, lambda: self.declsym.label)
def test_is_special(self):
self.assertEquals(self.declsym.is_special, False)
def test_is_curpos(self):
self.assertEquals(self.declsym.is_curpos, False)
add(TSymbolAttr)
| bsd-3-clause |
stanlyxiang/incubator-hawq | tools/bin/gppylib/gpMgmttest/__init__.py | 12 | 3044 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest2 as unittest
import time
class GpMgmtTestRunner(unittest.TextTestRunner):
def _makeResult(self):
return GpMgmtTextTestResult(self.stream, self.descriptions, self.verbosity)
class GpMgmtTextTestResult(unittest.TextTestResult):
def __init__(self, stream, descriptions, verbosity):
super(GpMgmtTextTestResult, self).__init__(stream, descriptions, verbosity)
self.verbosity = verbosity
self.startTime = 0
def getDescription(self, test):
case_name, full_name = test.__str__().split()
suite_name, class_name = full_name.strip('()').rsplit('.',1)
if self.verbosity > 1:
if test.shortDescription():
return 'Test Suite Name|%s|Test Case Name|%s|Test Details|%s' % (suite_name, case_name, test.shortDescription())
else:
return 'Test Suite Name|%s|Test Case Name|%s|Test Details|' % (suite_name, case_name)
def startTest(self, test):
super(GpMgmtTextTestResult, self).startTest(test)
self.startTime = test.start_time = time.time()
def addSuccess(self, test):
test.end_time = time.time()
self._show_run_time()
self.stream.write('|Test Status|')
super(GpMgmtTextTestResult, self).addSuccess(test)
def addError(self, test, err):
test.end_time = time.time()
self._show_run_time()
self.stream.write('|Test Status|')
super(GpMgmtTextTestResult, self).addError(test, err)
def addFailure(self, test, err):
test.end_time = time.time()
self._show_run_time()
self.stream.write('|Test Status|')
super(GpMgmtTextTestResult, self).addFailure(test, err)
def addSkip(self, test, err):
self._show_run_time()
self.stream.write('|Test Status|')
super(GpMgmtTextTestResult, self).addSkip(test, err)
def addExpectedFailure(self, test, err):
self.end_time = time.time()
self._show_run_time()
self.stream.write('|Test Status|')
super(GpMgmtTextTestResult, self).addExpectedFailure(test, err)
def _show_run_time(self):
etime = time.time()
elapsed = etime - self.startTime
self.stream.write('(%4.2f ms)' % (elapsed*1000))
| apache-2.0 |
frioux/offlineimap | offlineimap/mbnames.py | 10 | 3676 | # Mailbox name generator
#
# Copyright (C) 2002-2015 John Goerzen & contributors
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import os.path
import re # for folderfilter
from threading import Lock
boxes = {}
localroots = {}
config = None
accounts = None
mblock = Lock()
def init(conf, accts):
global config, accounts
config = conf
accounts = accts
def add(accountname, foldername, localfolders):
if not accountname in boxes:
boxes[accountname] = []
localroots[accountname] = localfolders
if not foldername in boxes[accountname]:
boxes[accountname].append(foldername)
def write(allcomplete):
incremental = config.getdefaultboolean("mbnames", "incremental", False)
# Skip writing if we don't want incremental writing and we're not done.
if not incremental and not allcomplete:
return
# Skip writing if we want incremental writing and we're done.
if incremental and allcomplete:
return
# See if we're ready to write it out.
for account in accounts:
if account not in boxes:
return
__genmbnames()
def __genmbnames():
"""Takes a configparser object and a boxlist, which is a list of hashes
containing 'accountname' and 'foldername' keys."""
xforms = [os.path.expanduser, os.path.expandvars]
mblock.acquire()
try:
localeval = config.getlocaleval()
if not config.getdefaultboolean("mbnames", "enabled", 0):
return
path = config.apply_xforms(config.get("mbnames", "filename"), xforms)
file = open(path, "wt")
file.write(localeval.eval(config.get("mbnames", "header")))
folderfilter = lambda accountname, foldername: 1
if config.has_option("mbnames", "folderfilter"):
folderfilter = localeval.eval(config.get("mbnames", "folderfilter"),
{'re': re})
mb_sort_keyfunc = lambda d: (d['accountname'], d['foldername'])
if config.has_option("mbnames", "sort_keyfunc"):
mb_sort_keyfunc = localeval.eval(config.get("mbnames", "sort_keyfunc"),
{'re': re})
itemlist = []
for accountname in boxes.keys():
localroot = localroots[accountname]
for foldername in boxes[accountname]:
if folderfilter(accountname, foldername):
itemlist.append({'accountname': accountname,
'foldername': foldername,
'localfolders': localroot})
itemlist.sort(key = mb_sort_keyfunc)
format_string = config.get("mbnames", "peritem", raw=1)
itemlist = [format_string % d for d in itemlist]
file.write(localeval.eval(config.get("mbnames", "sep")).join(itemlist))
file.write(localeval.eval(config.get("mbnames", "footer")))
file.close()
finally:
mblock.release()
| apache-2.0 |
llonchj/sentry | tests/sentry/utils/auth/tests.py | 30 | 1779 | from __future__ import absolute_import
from django.core.urlresolvers import reverse
from django.http import HttpRequest
from sentry.models import User
from sentry.testutils import TestCase
from sentry.utils.auth import EmailAuthBackend, get_login_redirect
class EmailAuthBackendTest(TestCase):
def setUp(self):
self.user = User(username="foo", email="[email protected]")
self.user.set_password("bar")
self.user.save()
@property
def backend(self):
return EmailAuthBackend()
def test_can_authenticate_with_username(self):
result = self.backend.authenticate(username='foo', password='bar')
self.assertEquals(result, self.user)
def test_can_authenticate_with_email(self):
result = self.backend.authenticate(username='[email protected]', password='bar')
self.assertEquals(result, self.user)
def test_does_not_authenticate_with_invalid_password(self):
result = self.backend.authenticate(username='foo', password='pizza')
self.assertEquals(result, None)
class GetLoginRedirectTest(TestCase):
def make_request(self, next=None):
request = HttpRequest()
request.session = {}
request.user = self.user
if next:
request.session['_next'] = next
return request
def test_schema_uses_default(self):
result = get_login_redirect(self.make_request('http://example.com'))
assert result == reverse('sentry')
def test_login_uses_default(self):
result = get_login_redirect(self.make_request(reverse('sentry-login')))
assert result == reverse('sentry')
def test_no_value_uses_default(self):
result = get_login_redirect(self.make_request())
assert result == reverse('sentry')
| bsd-3-clause |
jburger424/MediaQueueHCI | m-q-env/lib/python3.4/site-packages/pip/_vendor/requests/packages/chardet/mbcssm.py | 982 | 19608 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
# BIG5
BIG5_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
4,4,4,4,4,4,4,4, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
4,3,3,3,3,3,3,3, # a0 - a7
3,3,3,3,3,3,3,3, # a8 - af
3,3,3,3,3,3,3,3, # b0 - b7
3,3,3,3,3,3,3,3, # b8 - bf
3,3,3,3,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
BIG5_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,#08-0f
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart#10-17
)
Big5CharLenTable = (0, 1, 1, 2, 0)
Big5SMModel = {'classTable': BIG5_cls,
'classFactor': 5,
'stateTable': BIG5_st,
'charLenTable': Big5CharLenTable,
'name': 'Big5'}
# CP949
CP949_cls = (
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f
1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f
1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f
4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f
1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f
5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f
0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f
6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f
6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af
7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf
7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff
)
CP949_st = (
#cls= 0 1 2 3 4 5 6 7 8 9 # previous state =
eError,eStart, 3,eError,eStart,eStart, 4, 5,eError, 6, # eStart
eError,eError,eError,eError,eError,eError,eError,eError,eError,eError, # eError
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe, # eItsMe
eError,eError,eStart,eStart,eError,eError,eError,eStart,eStart,eStart, # 3
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 4
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 5
eError,eStart,eStart,eStart,eStart,eError,eError,eStart,eStart,eStart, # 6
)
CP949CharLenTable = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
CP949SMModel = {'classTable': CP949_cls,
'classFactor': 10,
'stateTable': CP949_st,
'charLenTable': CP949CharLenTable,
'name': 'CP949'}
# EUC-JP
EUCJP_cls = (
4,4,4,4,4,4,4,4, # 00 - 07
4,4,4,4,4,4,5,5, # 08 - 0f
4,4,4,4,4,4,4,4, # 10 - 17
4,4,4,5,4,4,4,4, # 18 - 1f
4,4,4,4,4,4,4,4, # 20 - 27
4,4,4,4,4,4,4,4, # 28 - 2f
4,4,4,4,4,4,4,4, # 30 - 37
4,4,4,4,4,4,4,4, # 38 - 3f
4,4,4,4,4,4,4,4, # 40 - 47
4,4,4,4,4,4,4,4, # 48 - 4f
4,4,4,4,4,4,4,4, # 50 - 57
4,4,4,4,4,4,4,4, # 58 - 5f
4,4,4,4,4,4,4,4, # 60 - 67
4,4,4,4,4,4,4,4, # 68 - 6f
4,4,4,4,4,4,4,4, # 70 - 77
4,4,4,4,4,4,4,4, # 78 - 7f
5,5,5,5,5,5,5,5, # 80 - 87
5,5,5,5,5,5,1,3, # 88 - 8f
5,5,5,5,5,5,5,5, # 90 - 97
5,5,5,5,5,5,5,5, # 98 - 9f
5,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,0,5 # f8 - ff
)
EUCJP_st = (
3, 4, 3, 5,eStart,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eStart,eError,eStart,eError,eError,eError,#10-17
eError,eError,eStart,eError,eError,eError, 3,eError,#18-1f
3,eError,eError,eError,eStart,eStart,eStart,eStart#20-27
)
EUCJPCharLenTable = (2, 2, 2, 3, 1, 0)
EUCJPSMModel = {'classTable': EUCJP_cls,
'classFactor': 6,
'stateTable': EUCJP_st,
'charLenTable': EUCJPCharLenTable,
'name': 'EUC-JP'}
# EUC-KR
EUCKR_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,3,3,3, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,3,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,0 # f8 - ff
)
EUCKR_st = (
eError,eStart, 3,eError,eError,eError,eError,eError,#00-07
eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,eStart #08-0f
)
EUCKRCharLenTable = (0, 1, 2, 0)
EUCKRSMModel = {'classTable': EUCKR_cls,
'classFactor': 4,
'stateTable': EUCKR_st,
'charLenTable': EUCKRCharLenTable,
'name': 'EUC-KR'}
# EUC-TW
EUCTW_cls = (
2,2,2,2,2,2,2,2, # 00 - 07
2,2,2,2,2,2,0,0, # 08 - 0f
2,2,2,2,2,2,2,2, # 10 - 17
2,2,2,0,2,2,2,2, # 18 - 1f
2,2,2,2,2,2,2,2, # 20 - 27
2,2,2,2,2,2,2,2, # 28 - 2f
2,2,2,2,2,2,2,2, # 30 - 37
2,2,2,2,2,2,2,2, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,2, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,6,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,3,4,4,4,4,4,4, # a0 - a7
5,5,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,3,1,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
EUCTW_st = (
eError,eError,eStart, 3, 3, 3, 4,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eStart,eError,#10-17
eStart,eStart,eStart,eError,eError,eError,eError,eError,#18-1f
5,eError,eError,eError,eStart,eError,eStart,eStart,#20-27
eStart,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
EUCTWCharLenTable = (0, 0, 1, 2, 2, 2, 3)
EUCTWSMModel = {'classTable': EUCTW_cls,
'classFactor': 7,
'stateTable': EUCTW_st,
'charLenTable': EUCTWCharLenTable,
'name': 'x-euc-tw'}
# GB2312
GB2312_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
3,3,3,3,3,3,3,3, # 30 - 37
3,3,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,4, # 78 - 7f
5,6,6,6,6,6,6,6, # 80 - 87
6,6,6,6,6,6,6,6, # 88 - 8f
6,6,6,6,6,6,6,6, # 90 - 97
6,6,6,6,6,6,6,6, # 98 - 9f
6,6,6,6,6,6,6,6, # a0 - a7
6,6,6,6,6,6,6,6, # a8 - af
6,6,6,6,6,6,6,6, # b0 - b7
6,6,6,6,6,6,6,6, # b8 - bf
6,6,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
6,6,6,6,6,6,6,6, # e0 - e7
6,6,6,6,6,6,6,6, # e8 - ef
6,6,6,6,6,6,6,6, # f0 - f7
6,6,6,6,6,6,6,0 # f8 - ff
)
GB2312_st = (
eError,eStart,eStart,eStart,eStart,eStart, 3,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,#10-17
4,eError,eStart,eStart,eError,eError,eError,eError,#18-1f
eError,eError, 5,eError,eError,eError,eItsMe,eError,#20-27
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
# To be accurate, the length of class 6 can be either 2 or 4.
# But it is not necessary to discriminate between the two since
# it is used for frequency analysis only, and we are validing
# each code range there as well. So it is safe to set it to be
# 2 here.
GB2312CharLenTable = (0, 1, 1, 1, 1, 1, 2)
GB2312SMModel = {'classTable': GB2312_cls,
'classFactor': 7,
'stateTable': GB2312_st,
'charLenTable': GB2312CharLenTable,
'name': 'GB2312'}
# Shift_JIS
SJIS_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
3,3,3,3,3,3,3,3, # 80 - 87
3,3,3,3,3,3,3,3, # 88 - 8f
3,3,3,3,3,3,3,3, # 90 - 97
3,3,3,3,3,3,3,3, # 98 - 9f
#0xa0 is illegal in sjis encoding, but some pages does
#contain such byte. We need to be more error forgiven.
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,4,4,4, # e8 - ef
4,4,4,4,4,4,4,4, # f0 - f7
4,4,4,4,4,0,0,0 # f8 - ff
)
SJIS_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart,eStart,eStart #10-17
)
SJISCharLenTable = (0, 1, 1, 2, 0, 0)
SJISSMModel = {'classTable': SJIS_cls,
'classFactor': 6,
'stateTable': SJIS_st,
'charLenTable': SJISCharLenTable,
'name': 'Shift_JIS'}
# UCS2-BE
UCS2BE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2BE_st = (
5, 7, 7,eError, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 6, 6, 6, 6,eError,eError,#10-17
6, 6, 6, 6, 6,eItsMe, 6, 6,#18-1f
6, 6, 6, 6, 5, 7, 7,eError,#20-27
5, 8, 6, 6,eError, 6, 6, 6,#28-2f
6, 6, 6, 6,eError,eError,eStart,eStart #30-37
)
UCS2BECharLenTable = (2, 2, 2, 0, 2, 2)
UCS2BESMModel = {'classTable': UCS2BE_cls,
'classFactor': 6,
'stateTable': UCS2BE_st,
'charLenTable': UCS2BECharLenTable,
'name': 'UTF-16BE'}
# UCS2-LE
UCS2LE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2LE_st = (
6, 6, 7, 6, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 5, 5, 5,eError,eItsMe,eError,#10-17
5, 5, 5,eError, 5,eError, 6, 6,#18-1f
7, 6, 8, 8, 5, 5, 5,eError,#20-27
5, 5, 5,eError,eError,eError, 5, 5,#28-2f
5, 5, 5,eError, 5,eError,eStart,eStart #30-37
)
UCS2LECharLenTable = (2, 2, 2, 2, 2, 2)
UCS2LESMModel = {'classTable': UCS2LE_cls,
'classFactor': 6,
'stateTable': UCS2LE_st,
'charLenTable': UCS2LECharLenTable,
'name': 'UTF-16LE'}
# UTF-8
UTF8_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
2,2,2,2,3,3,3,3, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
5,5,5,5,5,5,5,5, # a0 - a7
5,5,5,5,5,5,5,5, # a8 - af
5,5,5,5,5,5,5,5, # b0 - b7
5,5,5,5,5,5,5,5, # b8 - bf
0,0,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
7,8,8,8,8,8,8,8, # e0 - e7
8,8,8,8,8,9,8,8, # e8 - ef
10,11,11,11,11,11,11,11, # f0 - f7
12,13,13,13,14,15,0,0 # f8 - ff
)
UTF8_st = (
eError,eStart,eError,eError,eError,eError, 12, 10,#00-07
9, 11, 8, 7, 6, 5, 4, 3,#08-0f
eError,eError,eError,eError,eError,eError,eError,eError,#10-17
eError,eError,eError,eError,eError,eError,eError,eError,#18-1f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#20-27
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#28-2f
eError,eError, 5, 5, 5, 5,eError,eError,#30-37
eError,eError,eError,eError,eError,eError,eError,eError,#38-3f
eError,eError,eError, 5, 5, 5,eError,eError,#40-47
eError,eError,eError,eError,eError,eError,eError,eError,#48-4f
eError,eError, 7, 7, 7, 7,eError,eError,#50-57
eError,eError,eError,eError,eError,eError,eError,eError,#58-5f
eError,eError,eError,eError, 7, 7,eError,eError,#60-67
eError,eError,eError,eError,eError,eError,eError,eError,#68-6f
eError,eError, 9, 9, 9, 9,eError,eError,#70-77
eError,eError,eError,eError,eError,eError,eError,eError,#78-7f
eError,eError,eError,eError,eError, 9,eError,eError,#80-87
eError,eError,eError,eError,eError,eError,eError,eError,#88-8f
eError,eError, 12, 12, 12, 12,eError,eError,#90-97
eError,eError,eError,eError,eError,eError,eError,eError,#98-9f
eError,eError,eError,eError,eError, 12,eError,eError,#a0-a7
eError,eError,eError,eError,eError,eError,eError,eError,#a8-af
eError,eError, 12, 12, 12,eError,eError,eError,#b0-b7
eError,eError,eError,eError,eError,eError,eError,eError,#b8-bf
eError,eError,eStart,eStart,eStart,eStart,eError,eError,#c0-c7
eError,eError,eError,eError,eError,eError,eError,eError #c8-cf
)
UTF8CharLenTable = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
UTF8SMModel = {'classTable': UTF8_cls,
'classFactor': 16,
'stateTable': UTF8_st,
'charLenTable': UTF8CharLenTable,
'name': 'UTF-8'}
# flake8: noqa
| mit |
sporksmith/polygraph | experiments/workloads/generate_workloads.py | 2 | 2794 | #!/usr/bin/env python
# Polygraph (release 0.1)
# Signature generation algorithms for polymorphic worms
#
# Copyright (c) 2004-2005, Intel Corporation
# All Rights Reserved
#
# This software is distributed under the terms of the Eclipse Public
# License, Version 1.0 which can be found in the file named LICENSE.
# ANY USE, REPRODUCTION OR DISTRIBUTION OF THIS SOFTWARE CONSTITUTES
# RECIPIENT'S ACCEPTANCE OF THIS AGREEMENT
def create_workload(generator, filename):
import cPickle
workload = [sample for sample in generator]
f = open(filename, 'w')
cPickle.dump(workload, f, cPickle.HIGHEST_PROTOCOL)
f.close()
def create_noise_workload(tracefile, count, filename):
# get total number of streams in the trace
import polygraph.trace_crunching.stream_trace as stream_trace
s = stream_trace.StreamTrace(tracefile)
# select which streams to use
import random
indices = range(s.numstreams())
random.shuffle(indices)
indices = indices[:count]
indices.sort()
# get those streams
workload = []
for i in indices:
s.seek(i)
workload.append(s.next())
if s.numstreams() < count:
print '*' * 80
print 'WARNING: Only %d streams in %s, need %d to' % \
(s.numstreams(), tracefile, count)
print 'generate noise workload. Will cludge by duplicating'
print 'streams as necessary.'
print '*' * 80
workload *= (count / s.numstreams()) + 1
workload = workload[:count]
random.shuffle(workload)
# write to file
import cPickle
f = open(filename, 'w')
cPickle.dump(workload, f, cPickle.HIGHEST_PROTOCOL)
f.close()
if __name__ == '__main__':
# these should correspond to the largest workload needed
trials=5
dynamic_range=range(2,10)+range(10,50,5)+range(50,101,10)
addtl = 1000
number = trials * sum(dynamic_range) + addtl
#http noise
import sys
sys.path.append('../')
import config
create_noise_workload(config.traces[80]['eval'], number,'http_noise.pickle')
#dns noise
create_noise_workload(config.traces[53]['eval'], number, 'dns_noise.pickle')
#atphttpd workload
import polygraph.worm_gen.atphttpd as atphttpd
create_workload(atphttpd.ATPhttpd().generate(number), 'atphttpd.pickle')
#apache knacker workload
import polygraph.worm_gen.apache_host as apache_host
create_workload(apache_host.ApacheHost().generate(number), 'apache.pickle')
#lion (dns tsig) workload
import polygraph.worm_gen.bindTSIG as bindTSIG
create_workload(bindTSIG.bindTSIG().generate(number), 'tsig.pickle')
#clet workload
import polygraph.worm_gen.clet as clet
create_workload(clet.Clet().generate(number), 'clet.pickle')
| epl-1.0 |
iychoi/syndicate | old/ms/django_lib/gatewayforms.py | 2 | 4283 | '''
John Whelchel
Summer 2013
Forms used just for gateways (AG, RG, and UG).
'''
from django import forms
from django_lib.forms import LONGEST_CHAR_FIELD, LONGEST_PASS_FIELD, LONGEST_JSON_FIELD, ReadOnlyWidget
LARGEST_PORT = 65535
class ModifyGatewayConfig(forms.Form):
json_config = forms.FileField(required=False,
label="Gateway Configuration"
)
class ChangeVolume(forms.Form):
volume_name = forms.CharField(label="New Volume name",
max_length=LONGEST_CHAR_FIELD)
class ModifyGatewayLocation(forms.Form):
host = forms.CharField(label="New Gateway host",
max_length = LONGEST_CHAR_FIELD)
port = forms.IntegerField(label="New Port number",
max_value=LARGEST_PORT)
class GatewayRemoveVolume(forms.Form):
volume_name = forms.CharField(label="Volume name",
widget=ReadOnlyWidget(),
required=False,
max_length=LONGEST_CHAR_FIELD)
remove = forms.BooleanField(label="Remove",
required=False)
class GatewayAddVolume(forms.Form):
volume_name = forms.CharField(label="Volume name",
max_length=LONGEST_CHAR_FIELD)
class DeleteGateway(forms.Form):
confirm_delete = forms.BooleanField(required=True,
label="Yes, I understand that this action is permament and my gateway will be gone.")
g_password = forms.CharField(label="Gateway password",
max_length=LONGEST_PASS_FIELD,
widget=forms.PasswordInput,
help_text="You must also own this gateway to delete it.")
class CreateGateway(forms.Form):
g_name = forms.CharField(label="Gateway name",
initial="My Gateway",
max_length=LONGEST_CHAR_FIELD,
help_text="Your gateway's name cannot be changed later.")
g_password = forms.CharField(label="Gateway password",
max_length=LONGEST_CHAR_FIELD,
widget=forms.PasswordInput)
host = forms.CharField(label="Host name",
max_length=LONGEST_CHAR_FIELD,)
port = forms.IntegerField(label="Port number",
max_value=LARGEST_PORT)
class CreateUG(CreateGateway):
volume_name = forms.CharField(label="Volume name (optional)",
required=False,
max_length=LONGEST_CHAR_FIELD)
read_write = forms.BooleanField(required=False,
label="UG can write to other gateways.")
class CreateAG(CreateGateway):
json_config = forms.FileField(required=False,
label="Gateway Configuration",
help_text="If no file is specified, blank config will be used.")
json_config_text = forms.CharField(required=False,
max_length=LONGEST_JSON_FIELD,
widget=forms.Textarea,
label="Gateway Configuration (alternate)",
help_text="This can also be used to manually config the gateway with text in JSON format. The upload file will take priority however.")
class CreateRG(CreateGateway):
json_config = forms.FileField(required=False,
label="Gateway Configuration",
help_text="If no file is specified, blank config will be used.")
json_config_text = forms.CharField(required=False,
max_length=LONGEST_JSON_FIELD,
widget=forms.Textarea,
label="Gateway Configuration (alternate)",
help_text="This can also be used to manually config the gateway with text in JSON format. The upload file will take priority however.")
private = forms.BooleanField(required=False,
label="Replica Gateway is private. It can only be attached to volumes owned by you.") | apache-2.0 |
OmnesRes/onco_lnc | mrna/cox/LGG/patient_info.py | 1 | 7037 | ## A script for extracting info about the patients used in the analysis
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','clinical','nationwidechildrens.org_clinical_follow_up_v1.0_lgg.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
elif re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
else:
pass
else:
if re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
## Removing the empty value.
clinical=clinical1[1:]
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['G2']=2
grade_dict['G3']=3
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','clinical','nationwidechildrens.org_clinical_patient_lgg.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
grade_column=columns.index('tumor_grade')
sex_column=columns.index('gender')
age_column=columns.index('age_at_initial_pathologic_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
clinical4=[]
data=[i.split('\t') for i in f]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
elif re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
else:
pass
except:
pass
new_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in clinical]:
new_clinical.append(i)
else:
if i[1]<=clinical[[j[0] for j in clinical].index(i[0])][1]:
new_clinical.append(clinical[[j[0] for j in clinical].index(i[0])])
else:
new_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in clinical:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in new_clinical if i[1]>0]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
final_clinical=[]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','LGG','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
##print average age at diagnosis
age=np.mean([i[5] for i in clinical_and_files])
##print number of males
males=len([i for i in clinical_and_files if i[4]==0])
##print number of females
females=len([i for i in clinical_and_files if i[4]==1])
##to get the median survival we need to call survfit from r
##prepare variables for R
ro.globalenv['times']=ro.IntVector([i[1] for i in clinical_and_files])
##need to create a dummy variable group
ro.globalenv['group']=ro.IntVector([0 for i in clinical_and_files])
##need a vector for deaths
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
ro.globalenv['died']=ro.IntVector([death_dic[i[2]] for i in clinical_and_files])
res=ro.r('survfit(Surv(times,died) ~ as.factor(group))')
#the number of events(deaths) is the fourth column of the output
deaths=str(res).split('\n')[-2].strip().split()[3]
#the median survival time is the fifth column of the output
median=str(res).split('\n')[-2].strip().split()[4]
##write data to a file
f=open('patient_info.txt','w')
f.write('Average Age')
f.write('\t')
f.write('Males')
f.write('\t')
f.write('Females')
f.write('\t')
f.write('Deaths')
f.write('\t')
f.write('Median Survival')
f.write('\n')
f.write(str(age))
f.write('\t')
f.write(str(males))
f.write('\t')
f.write(str(females))
f.write('\t')
f.write(deaths)
f.write('\t')
f.write(median)
f.close()
| mit |
shootstar/ctest | ceilometer/compute/manager.py | 2 | 3043 | # -*- encoding: utf-8 -*-
#
# Copyright © 2012-2013 eNovance <[email protected]>
#
# Author: Julien Danjou <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
from ceilometer import agent
from ceilometer.compute.virt import inspector as virt_inspector
from ceilometer import extension_manager
from ceilometer import nova_client
from ceilometer.openstack.common import log
LOG = log.getLogger(__name__)
class PollingTask(agent.PollingTask):
def poll_and_publish_instances(self, instances):
with self.publish_context as publisher:
for instance in instances:
if getattr(instance, 'OS-EXT-STS:vm_state', None) != 'error':
# TODO(yjiang5) passing counters to get_counters to avoid
# polling all counters one by one
for pollster in self.pollsters:
try:
LOG.info("Polling pollster %s", pollster.name)
publisher(list(pollster.obj.get_counters(
self.manager,
instance)))
except Exception as err:
LOG.warning('Continue after error from %s: %s',
pollster.name, err)
LOG.exception(err)
def poll_and_publish(self):
self.poll_and_publish_instances(
self.manager.nv.instance_get_all_by_host(cfg.CONF.host))
class AgentManager(agent.AgentManager):
def __init__(self):
super(AgentManager, self).__init__(
extension_manager.ActivatedExtensionManager(
namespace='ceilometer.poll.compute',
disabled_names=cfg.CONF.disabled_compute_pollsters,
),
)
self._inspector = virt_inspector.get_hypervisor_inspector()
self.nv = nova_client.Client()
def create_polling_task(self):
return PollingTask(self)
def setup_notifier_task(self):
"""For nova notifier usage."""
task = PollingTask(self)
for pollster in self.pollster_manager.extensions:
task.add(
pollster,
self.pipeline_manager.pipelines)
self.notifier_task = task
def poll_instance(self, context, instance):
"""Poll one instance."""
self.notifier_task.poll_and_publish_instances([instance])
@property
def inspector(self):
return self._inspector
| apache-2.0 |
sshleifer/object_detection_kitti | inception/inception/slim/variables.py | 23 | 10358 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains convenience wrappers for creating variables in TF-Slim.
The variables module is typically used for defining model variables from the
ops routines (see slim.ops). Such variables are used for training, evaluation
and inference of models.
All the variables created through this module would be added to the
MODEL_VARIABLES collection, if you create a model variable outside slim, it can
be added with slim.variables.add_variable(external_variable, reuse).
Usage:
weights_initializer = tf.truncated_normal_initializer(stddev=0.01)
l2_regularizer = lambda t: losses.l2_loss(t, weight=0.0005)
weights = variables.variable('weights',
shape=[100, 100],
initializer=weights_initializer,
regularizer=l2_regularizer,
device='/cpu:0')
biases = variables.variable('biases',
shape=[100],
initializer=tf.zeros_initializer(),
device='/cpu:0')
# More complex example.
net = slim.ops.conv2d(input, 32, [3, 3], scope='conv1')
net = slim.ops.conv2d(net, 64, [3, 3], scope='conv2')
with slim.arg_scope([variables.variable], restore=False):
net = slim.ops.conv2d(net, 64, [3, 3], scope='conv3')
# Get all model variables from all the layers.
model_variables = slim.variables.get_variables()
# Get all model variables from a specific the layer, i.e 'conv1'.
conv1_variables = slim.variables.get_variables('conv1')
# Get all weights from all the layers.
weights = slim.variables.get_variables_by_name('weights')
# Get all bias from all the layers.
biases = slim.variables.get_variables_by_name('biases')
# Get all variables to restore.
# (i.e. only those created by 'conv1' and 'conv2')
variables_to_restore = slim.variables.get_variables_to_restore()
************************************************
* Initializing model variables from a checkpoint
************************************************
# Create some variables.
v1 = slim.variables.variable(name="v1", ..., restore=False)
v2 = slim.variables.variable(name="v2", ...) # By default restore=True
...
# The list of variables to restore should only contain 'v2'.
variables_to_restore = slim.variables.get_variables_to_restore()
restorer = tf.train.Saver(variables_to_restore)
with tf.Session() as sess:
# Restore variables from disk.
restorer.restore(sess, "/tmp/model.ckpt")
print("Model restored.")
# Do some work with the model
...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from inception.slim import scopes
# Collection containing all the variables created using slim.variables
MODEL_VARIABLES = '_model_variables_'
# Collection containing the slim.variables that are created with restore=True.
VARIABLES_TO_RESTORE = '_variables_to_restore_'
def add_variable(var, restore=True):
"""Adds a variable to the MODEL_VARIABLES collection.
Optionally it will add the variable to the VARIABLES_TO_RESTORE collection.
Args:
var: a variable.
restore: whether the variable should be added to the
VARIABLES_TO_RESTORE collection.
"""
collections = [MODEL_VARIABLES]
if restore:
collections.append(VARIABLES_TO_RESTORE)
for collection in collections:
if var not in tf.get_collection(collection):
tf.add_to_collection(collection, var)
def get_variables(scope=None, suffix=None):
"""Gets the list of variables, filtered by scope and/or suffix.
Args:
scope: an optional scope for filtering the variables to return.
suffix: an optional suffix for filtering the variables to return.
Returns:
a copied list of variables with scope and suffix.
"""
candidates = tf.get_collection(MODEL_VARIABLES, scope)[:]
if suffix is not None:
candidates = [var for var in candidates if var.op.name.endswith(suffix)]
return candidates
def get_variables_to_restore():
"""Gets the list of variables to restore.
Returns:
a copied list of variables.
"""
return tf.get_collection(VARIABLES_TO_RESTORE)[:]
def get_variables_by_name(given_name, scope=None):
"""Gets the list of variables that were given that name.
Args:
given_name: name given to the variable without scope.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and prefix.
"""
return get_variables(scope=scope, suffix=given_name)
def get_unique_variable(name):
"""Gets the variable uniquely identified by that name.
Args:
name: a name that uniquely identifies the variable.
Returns:
a tensorflow variable.
Raises:
ValueError: if no variable uniquely identified by the name exists.
"""
candidates = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, name)
if not candidates:
raise ValueError('Couldnt find variable %s' % name)
for candidate in candidates:
if candidate.op.name == name:
return candidate
raise ValueError('Variable %s does not uniquely identify a variable', name)
class VariableDeviceChooser(object):
"""Slim device chooser for variables.
When using a parameter server it will assign them in a round-robin fashion.
When not using a parameter server it allows GPU:0 placement otherwise CPU:0.
"""
def __init__(self,
num_parameter_servers=0,
ps_device='/job:ps',
placement='CPU:0'):
"""Initialize VariableDeviceChooser.
Args:
num_parameter_servers: number of parameter servers.
ps_device: string representing the parameter server device.
placement: string representing the placement of the variable either CPU:0
or GPU:0. When using parameter servers forced to CPU:0.
"""
self._num_ps = num_parameter_servers
self._ps_device = ps_device
self._placement = placement if num_parameter_servers == 0 else 'CPU:0'
self._next_task_id = 0
def __call__(self, op):
device_string = ''
if self._num_ps > 0:
task_id = self._next_task_id
self._next_task_id = (self._next_task_id + 1) % self._num_ps
device_string = '%s/task:%d' % (self._ps_device, task_id)
device_string += '/%s' % self._placement
return device_string
# TODO(sguada) Remove once get_variable is able to colocate op.devices.
def variable_device(device, name):
"""Fix the variable device to colocate its ops."""
if callable(device):
var_name = tf.get_variable_scope().name + '/' + name
var_def = tf.NodeDef(name=var_name, op='Variable')
device = device(var_def)
if device is None:
device = ''
return device
@scopes.add_arg_scope
def global_step(device=''):
"""Returns the global step variable.
Args:
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
Returns:
the tensor representing the global step variable.
"""
global_step_ref = tf.get_collection(tf.GraphKeys.GLOBAL_STEP)
if global_step_ref:
return global_step_ref[0]
else:
collections = [
VARIABLES_TO_RESTORE,
tf.GraphKeys.GLOBAL_VARIABLES,
tf.GraphKeys.GLOBAL_STEP,
]
# Get the device for the variable.
with tf.device(variable_device(device, 'global_step')):
return tf.get_variable('global_step', shape=[], dtype=tf.int64,
initializer=tf.zeros_initializer(),
trainable=False, collections=collections)
@scopes.add_arg_scope
def variable(name, shape=None, dtype=tf.float32, initializer=None,
regularizer=None, trainable=True, collections=None, device='',
restore=True):
"""Gets an existing variable with these parameters or creates a new one.
It also add itself to a group with its name.
Args:
name: the name of the new or existing variable.
shape: shape of the new or existing variable.
dtype: type of the new or existing variable (defaults to `DT_FLOAT`).
initializer: initializer for the variable if one is created.
regularizer: a (Tensor -> Tensor or None) function; the result of
applying it on a newly created variable will be added to the collection
GraphKeys.REGULARIZATION_LOSSES and can be used for regularization.
trainable: If `True` also add the variable to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
collections: A list of collection names to which the Variable will be added.
Note that the variable is always also added to the tf.GraphKeys.GLOBAL_VARIABLES
and MODEL_VARIABLES collections.
device: Optional device to place the variable. It can be an string or a
function that is called to get the device for the variable.
restore: whether the variable should be added to the
VARIABLES_TO_RESTORE collection.
Returns:
The created or existing variable.
"""
collections = list(collections or [])
# Make sure variables are added to tf.GraphKeys.GLOBAL_VARIABLES and MODEL_VARIABLES
collections += [tf.GraphKeys.GLOBAL_VARIABLES, MODEL_VARIABLES]
# Add to VARIABLES_TO_RESTORE if necessary
if restore:
collections.append(VARIABLES_TO_RESTORE)
# Remove duplicates
collections = set(collections)
# Get the device for the variable.
with tf.device(variable_device(device, name)):
return tf.get_variable(name, shape=shape, dtype=dtype,
initializer=initializer, regularizer=regularizer,
trainable=trainable, collections=collections)
| apache-2.0 |
PingaxAnalytics/koob_auth | testproject/ptvs_virtualenv_proxy.py | 1 | 4166 | # ############################################################################
#
# Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
# ###########################################################################
import datetime
import os
import sys
import traceback
if sys.version_info[0] == 3:
def to_str(value):
return value.decode(sys.getfilesystemencoding())
def execfile(path, global_dict):
"""Execute a file"""
with open(path, 'r') as f:
code = f.read()
code = code.replace('\r\n', '\n') + '\n'
exec (code, global_dict)
else:
def to_str(value):
return value.encode(sys.getfilesystemencoding())
def log(txt):
"""Logs fatal errors to a log file if WSGI_LOG env var is defined"""
log_file = os.environ.get('WSGI_LOG')
if log_file:
f = open(log_file, 'a+')
try:
f.write('%s: %s' % (datetime.datetime.now(), txt))
finally:
f.close()
ptvsd_secret = os.getenv('WSGI_PTVSD_SECRET')
if ptvsd_secret:
log('Enabling ptvsd ...\n')
try:
import ptvsd
try:
ptvsd.enable_attach(ptvsd_secret)
log('ptvsd enabled.\n')
except:
log('ptvsd.enable_attach failed\n')
except ImportError:
log('error importing ptvsd.\n');
def get_wsgi_handler(handler_name):
if not handler_name:
raise Exception('WSGI_ALT_VIRTUALENV_HANDLER env var must be set')
if not isinstance(handler_name, str):
handler_name = to_str(handler_name)
module_name, _, callable_name = handler_name.rpartition('.')
should_call = callable_name.endswith('()')
callable_name = callable_name[:-2] if should_call else callable_name
name_list = [(callable_name, should_call)]
handler = None
last_tb = ''
while module_name:
try:
handler = __import__(module_name, fromlist=[name_list[0][0]])
last_tb = ''
for name, should_call in name_list:
handler = getattr(handler, name)
if should_call:
handler = handler()
break
except ImportError:
module_name, _, callable_name = module_name.rpartition('.')
should_call = callable_name.endswith('()')
callable_name = callable_name[:-2] if should_call else callable_name
name_list.insert(0, (callable_name, should_call))
handler = None
last_tb = ': ' + traceback.format_exc()
if handler is None:
raise ValueError('"%s" could not be imported%s' % (handler_name, last_tb))
return handler
activate_this = os.getenv('WSGI_ALT_VIRTUALENV_ACTIVATE_THIS')
if not activate_this:
raise Exception('WSGI_ALT_VIRTUALENV_ACTIVATE_THIS is not set')
def get_virtualenv_handler():
log('Activating virtualenv with %s\n' % activate_this)
execfile(activate_this, dict(__file__=activate_this))
log('Getting handler %s\n' % os.getenv('WSGI_ALT_VIRTUALENV_HANDLER'))
handler = get_wsgi_handler(os.getenv('WSGI_ALT_VIRTUALENV_HANDLER'))
log('Got handler: %r\n' % handler)
return handler
def get_venv_handler():
log('Activating venv with executable at %s\n' % activate_this)
import site
sys.executable = activate_this
old_sys_path, sys.path = sys.path, []
site.main()
sys.path.insert(0, '')
for item in old_sys_path:
if item not in sys.path:
sys.path.append(item)
log('Getting handler %s\n' % os.getenv('WSGI_ALT_VIRTUALENV_HANDLER'))
handler = get_wsgi_handler(os.getenv('WSGI_ALT_VIRTUALENV_HANDLER'))
log('Got handler: %r\n' % handler)
return handler | mit |
MiniSEC/GRR_clone | client/client_actions/linux/ko_patcher.py | 6 | 7766 | #!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
"""A kernel module rewriter.
This is a hack that rewrites kernel modules such that they can be loaded on
kernels they were not compiled for.
"""
import os
import platform
import struct
import sys
import logging
from grr.lib import flags
class KernelObjectPatcher(object):
"""The kernel object patching class."""
ELF_MAGIC = "\x7F\x45\x4C\x46"
def __init__(self, log=False):
self.log = log
def GetSectionOffsets(self, file_data):
"""Returns offsets and lengths of all the sections of this elf file."""
if file_data[:4] != self.ELF_MAGIC:
raise RuntimeError("Not an elf file.")
section_header_offset = struct.unpack("<Q", file_data[40:40+8])[0]
(section_header_size, num_section_headers,
string_table) = struct.unpack("<HHH", file_data[58:58+6])
# Read the string table first.
start = section_header_offset + string_table * section_header_size
header_data = file_data[start:start + section_header_size]
offset, size = struct.unpack("<IIQQQQIIQQ", header_data)[4:6]
string_data = file_data[offset:offset+size]
sections = {}
for start in xrange(section_header_offset,
section_header_offset + (
num_section_headers * section_header_size),
section_header_size):
header_data = file_data[start: start + section_header_size]
header = struct.unpack("<IIQQQQIIQQ", header_data)
name_offset, data_offset, data_size = header[0], header[4], header[5]
name = string_data[name_offset:string_data.find("\x00", name_offset)]
if data_size:
sections[name] = (data_offset, data_size)
return sections
def ParseVersionSection(self, version_data):
"""Returns the checksums found for all the imports."""
checksums = {}
while version_data:
act_version = version_data[:0x40]
version_data = version_data[0x40:]
function = act_version[8:]
chksum = act_version[:8]
checksums[function] = chksum
return checksums
def GetImportedVersions(self, file_data, sections):
if "__versions" not in sections:
return {}
start, length = sections["__versions"]
version_data = file_data[start:start+length]
return self.ParseVersionSection(version_data)
def GetModuleVersion(self, file_data, sections):
info_start, info_length = sections[".modinfo"]
modinfo = file_data[info_start:info_start + info_length]
for line in modinfo.split("\x00"):
if line.startswith("vermagic"):
return line[len("vermagic") + 1:]
msg = "Could not find vermagic string."
logging.info(msg)
raise RuntimeError(msg)
def _RewriteModinfo(self, modinfo, obj_kernel_version, this_kernel_version,
info_strings=None, to_remove=None):
new_modinfo = ""
for line in modinfo.split("\x00"):
if not line:
continue
if to_remove and line.split("=")[0] == to_remove:
continue
if info_strings is not None:
info_strings.add(line.split("=")[0])
if line.startswith("vermagic"):
line = line.replace(obj_kernel_version, this_kernel_version)
new_modinfo += line + "\x00"
return new_modinfo
def RewriteModinfo(self, file_data, sections, obj_kernel_version,
this_kernel_version):
"""This rewrites the modinfo section and updates the kernel version."""
info_start, info_length = sections[".modinfo"]
modinfo = file_data[info_start:info_start + info_length]
info_strings = set()
new_modinfo = self._RewriteModinfo(modinfo, obj_kernel_version,
this_kernel_version, info_strings)
if len(new_modinfo) <= info_length:
new_modinfo += "\x00" * (info_length - len(new_modinfo))
return new_modinfo
logging.info("Rewritten modinfo section is too big.")
info_strings -= set(["vermagic", "srcversion", "depends"])
try:
to_remove = info_strings.pop()
except KeyError:
msg = "Could not remove anything from modinfo, giving up."
logging.info(msg)
raise RuntimeError(msg)
logging.info("Will try to remove %s from modinfo.", to_remove)
return self._RewriteModinfo(modinfo, obj_kernel_version,
this_kernel_version, to_remove=to_remove)
def GetKnownImports(self, needed_versions):
"""Parses the driver directory to find valid import checksums."""
needed_versions = set(needed_versions)
found_versions = {}
driver_path = "/lib/modules/%s/kernel/drivers" % platform.uname()[2]
num_files = 0
for (directory, _, files) in os.walk(driver_path):
for filename in files:
if filename[-3:] == ".ko":
try:
fd = open("%s/%s" % (directory, filename), "rb")
num_files += 1
data = fd.read()
sections = self.GetSectionOffsets(data)
versions = self.GetImportedVersions(data, sections)
found_versions.update(versions)
if set(found_versions.keys()) >= needed_versions:
logging.info("All imports found, gathered data from %d modules.",
num_files)
return found_versions
except IOError:
pass
missing = needed_versions - set(found_versions.keys())
msg = "Imports %s could not be found." % ",".join(missing)
logging.info(msg)
raise RuntimeError(msg)
def ReplaceSection(self, file_data, offset, new_section_data):
result = file_data[:offset]
result += new_section_data
result += file_data[offset + len(new_section_data):]
return result
def Patch(self, file_data, force_patch=False):
try:
sections = self.GetSectionOffsets(file_data)
obj_version = self.GetModuleVersion(file_data, sections)
obj_kernel_version = obj_version.split(" ")[0]
this_kernel_version = platform.uname()[2]
logging.info("Module version is %s, kernel version is %s.",
obj_kernel_version, this_kernel_version)
if obj_kernel_version == this_kernel_version and not force_patch:
return file_data
needed_imports = self.GetImportedVersions(file_data, sections)
good_imports = self.GetKnownImports(needed_imports)
rewritten_version_data = ""
for function in needed_imports.keys():
if needed_imports[function] == good_imports[function]:
logging.info("Matching checksum for %s.",
function.replace("\x00", ""))
else:
logging.info("Checksum mismatch for %s.",
function.replace("\x00", ""))
rewritten_version_data += good_imports[function] + function
rewritten_modinfo_data = self.RewriteModinfo(
file_data, sections, obj_kernel_version, this_kernel_version)
file_data = self.ReplaceSection(file_data, sections["__versions"][0],
rewritten_version_data)
file_data = self.ReplaceSection(file_data, sections[".modinfo"][0],
rewritten_modinfo_data)
return file_data
except (RuntimeError, KeyError) as e:
logging.info(str(e))
# Something didn't work, we can just use the data we were sent.
return file_data
def main(_):
if len(sys.argv) < 3:
print "Usage: python %s <kernel_module> <outfile>" % sys.argv[0]
exit()
in_fd = open(sys.argv[1], "rb")
out_data = KernelObjectPatcher(log=True).Patch(in_fd.read(), force_patch=True)
with open(sys.argv[2], "wb") as out_fd:
out_fd.write(out_data)
logging.info("Kernel Object patched.")
if __name__ == "__main__":
flags.StartMain(main)
| apache-2.0 |
40223117cda/w16cdaa | static/Brython3.1.3-20150514-095342/Lib/site-packages/pygame/display.py | 603 | 25179 | #!/usr/bin/env python
'''Pygame module to control the display window and screen.
This module offers control over the pygame display. Pygame has a single display
Surface that is either contained in a window or runs full screen. Once you
create the display you treat it as a regular Surface. Changes are not
immediately visible onscreen, you must choose one of the two flipping functions
to update the actual display.
The pygame display can actually be initialized in one of several modes. By
default the display is a basic software driven framebuffer. You can request
special modules like hardware acceleration and OpenGL support. These are
controlled by flags passed to pygame.display.set_mode().
Pygame can only have a single display active at any time. Creating a new one
with pygame.display.set_mode() will close the previous display. If precise
control is needed over the pixel format or display resolutions, use the
functions pygame.display.mode_ok(), pygame.display.list_modes(), and
pygame.display.Info() to query information about the display.
Once the display Surface is created, the functions from this module
effect the single existing display. The Surface becomes invalid if the module
is uninitialized. If a new display mode is set, the existing Surface will
automatically switch to operate on the new display.
Then the display mode is set, several events are placed on the pygame
event queue. pygame.QUIT is sent when the user has requested the program
to shutdown. The window will receive pygame.ACTIVEEVENT events as the
display gains and loses input focus. If the display is set with the
pygame.RESIZABLE flag, pygame.VIDEORESIZE events will be sent when the
user adjusts the window dimensions. Hardware displays that draw direct
to the screen will get pygame.VIDEOEXPOSE events when portions of the
window must be redrawn.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import sys
from SDL import *
import pygame.base
import pygame.pkgdata
import pygame.surface
#brython
import pygame.constants
from browser import window
#from javascript import console
_display_surface = None
_icon_was_set = 0
_icon_defaultname = 'pygame_icon.bmp'
_init_video=False
def __PYGAMEinit__():
pygame.base.register_quit(_display_autoquit)
def _display_autoquit():
global _display_surface
_display_surface = None
def init():
'''Initialize the display module.
Initializes the pygame display module. The display module cannot do
anything until it is initialized. This is usually handled for you
automatically when you call the higher level `pygame.init`.
Pygame will select from one of several internal display backends when it
is initialized. The display mode will be chosen depending on the platform
and permissions of current user. Before the display module is initialized
the environment variable SDL_VIDEODRIVER can be set to control which
backend is used. The systems with multiple choices are listed here.
Windows
windib, directx
Unix
x11, dga, fbcon, directfb, ggi, vgl, svgalib, aalib
On some platforms it is possible to embed the pygame display into an already
existing window. To do this, the environment variable SDL_WINDOWID must be
set to a string containing the window id or handle. The environment variable
is checked when the pygame display is initialized. Be aware that there can
be many strange side effects when running in an embedded display.
It is harmless to call this more than once, repeated calls have no effect.
'''
pygame.base._video_autoinit()
__PYGAMEinit__()
def quit():
'''Uninitialize the display module.
This will shut down the entire display module. This means any active
displays will be closed. This will also be handled automatically when the
program exits.
It is harmless to call this more than once, repeated calls have no effect.
'''
pygame.base._video_autoquit()
_display_autoquit()
def get_init():
'''Get status of display module initialization.
:rtype: bool
:return: True if SDL's video system is currently initialized.
'''
return SDL_WasInit(SDL_INIT_VIDEO) != 0
def set_mode(resolution, flags=0, depth=0):
'''Initialize a window or screen for display.
This function will create a display Surface. The arguments passed in are
requests for a display type. The actual created display will be the best
possible match supported by the system.
The `resolution` argument is a pair of numbers representing the width and
height. The `flags` argument is a collection of additional options.
The `depth` argument represents the number of bits to use for color.
The Surface that gets returned can be drawn to like a regular Surface but
changes will eventually be seen on the monitor.
It is usually best to not pass the depth argument. It will default to the
best and fastest color depth for the system. If your game requires a
specific color format you can control the depth with this argument. Pygame
will emulate an unavailable color depth which can be slow.
When requesting fullscreen display modes, sometimes an exact match for the
requested resolution cannot be made. In these situations pygame will select
the closest compatable match. The returned surface will still always match
the requested resolution.
The flags argument controls which type of display you want. There are
several to choose from, and you can even combine multiple types using the
bitwise or operator, (the pipe "|" character). If you pass 0 or no flags
argument it will default to a software driven window. Here are the display
flags you will want to choose from:
pygame.FULLSCREEN
create a fullscreen display
pygame.DOUBLEBUF
recommended for HWSURFACE or OPENGL
pygame.HWSURFACE
hardware accelereated, only in FULLSCREEN
pygame.OPENGL
create an opengl renderable display
pygame.RESIZABLE
display window should be sizeable
pygame.NOFRAME
display window will have no border or controls
:Parameters:
- `resolution`: int, int
- `flags`: int
- `depth`: int
:rtype: `Surface`
'''
global _display_surface
w, h = resolution
if w <= 0 or h <= 0:
raise pygame.base.error('Cannot set 0 sized display mode')
if not SDL_WasInit(SDL_INIT_VIDEO):
init()
if flags & SDL_OPENGL:
if flags & SDL_DOUBLEBUF:
flags &= ~SDL_DOUBLEBUF
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1)
else:
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 0)
if depth:
SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, depth)
surf = SDL_SetVideoMode(w, h, depth, flags)
if SDL_GL_GetAttribute(SDL_GL_DOUBLEBUFFER):
surf.flags |= SDL_DOUBLEBUF
else:
if not depth:
flags |= SDL_ANYFORMAT
surf = SDL_SetVideoMode(w, h, depth, flags)
title, icontitle = SDL_WM_GetCaption()
if not title:
SDL_WM_SetCaption('pygame window', 'pygame')
SDL_PumpEvents()
if _display_surface:
_display_surface._surf = surf
else:
#_display_surface = pygame.surface.Surface(surf=surf)
_display_surface = pygame.surface.Surface(dim=(w,h))
document['pydiv'] <= _display_surface.canvas
if sys.platform != 'darwin':
if not _icon_was_set:
try:
file = pygame.pkgdata.getResource(_icon_defaultname)
iconsurf = pygame.image.load(file)
SDL_SetColorKey(iconsurf._surf, SDL_SRCCOLORKEY, 0)
set_icon(iconsurf)
except IOError:
# Not worth dying over.
pass
return _display_surface
def get_surface():
'''Get current display surface.
Returns a `Surface` object representing the current display. Will
return None if called before the display mode is set.
:rtype: `Surface`
'''
return _display_surface
def flip():
'''Update the full display surface to the screen.
This will update the contents of the entire display. If your display mode
is using the flags pygame.HWSURFACE and pygame.DOUBLEBUF, this will wait
for a vertical retrace and swap the surfaces. If you are using a different
type of display mode, it will simply update the entire contents of the
surface.
When using an pygame.OPENGL display mode this will perform a gl buffer
swap.
'''
pass
_video_init_check()
screen = SDL_GetVideoSurface()
if not screen:
raise pygame.base.error('Display mode not set')
if screen.flags & SDL_OPENGL:
SDL_GL_SwapBuffers()
else:
SDL_Flip(screen)
def _crop_rect(w, h, rect):
if rect.x >= w or rect.y >= h or \
rect.x + rect.w <= 0 or rect.y + rect.h <= 0:
return None
rect.x = max(rect.x, 0)
rect.y = max(rect.y, 0)
rect.w = min(rect.x + rect.w, w) - rect.x
rect.h = min(rect.y + rect.h, h) - rect.y
return rect
def update(*rectangle):
'''Update portions of the screen for software displays.
This function is like an optimized version of pygame.display.flip() for
software displays. It allows only a portion of the screen to updated,
instead of the entire area. If no argument is passed it updates the entire
Surface area like `flip`.
You can pass the function a single rectangle, or a sequence of rectangles.
It is more efficient to pass many rectangles at once than to call update
multiple times with single or a partial list of rectangles. If passing
a sequence of rectangles it is safe to include None values in the list,
which will be skipped.
This call cannot be used on pygame.OPENGL displays and will generate an
exception.
:Parameters:
`rectangle` : Rect or sequence of Rect
Area(s) to update
'''
# Undocumented: also allows argument tuple to represent one rect;
# e.g. update(0, 0, 10, 10) or update((0, 0), (10, 10))
_video_init_check()
screen = SDL_GetVideoSurface()
if not screen:
raise pygame.base.error('Display mode not set')
if screen.flags & SDL_OPENGL:
raise pygame.base.error('Cannot update an OPENGL display')
if not rectangle:
SDL_UpdateRect(screen, 0, 0, 0, 0)
else:
w, h = screen.w, screen.h
w, h = screen.width, screen.height
try:
rect = pygame.rect._rect_from_object(rectangle)._r
rect = _crop_rect(w, h, rect)
if rect:
SDL_UpdateRect(screen, rect.x, rect.y, rect.w, rect.h)
except TypeError:
rectangle = rectangle[0]
rects = [_crop_rect(w, h, pygame.rect._rect_from_object(r)._r) \
for r in rectangle if r]
SDL_UpdateRects(screen, rects)
def get_driver():
'''Get name of the pygame display backend.
Pygame chooses one of many available display backends when it is
initialized. This returns the internal name used for the display backend.
This can be used to provide limited information about what display
capabilities might be accelerated.
:rtype: str
'''
_video_init_check()
return SDL_VideoDriverName()
def Info():
'''Create a video display information object.
Creates a simple object containing several attributes to describe the
current graphics environment. If this is called before
`set_mode` some platforms can provide information about the default
display mode. This can also be called after setting the display mode to
verify specific display options were satisfied.
:see: `VideoInfo`
:rtype: `VideoInfo`
'''
_video_init_check()
return VideoInfo()
class VideoInfo:
'''Video display information.
:Ivariables:
`hw` : bool
True if the display is hardware accelerated.
`wm` : bool
True if windowed display modes can be used.
`video_mem` : int
The amount of video memory on the displaoy, in megabytes. 0 if
unknown.
`bitsize` : int
Number of bits used to store each pixel.
`bytesize` : int
Number of bytes used to store each pixel.
`masks` : (int, int, int, int)
RGBA component mask.
`shifts` : (int, int, int, int)
RGBA component shift amounts.
`losses` : (int, int, int, int)
Number of bits lost from a 32 bit depth for each RGBA component.
`blit_hw` : bool
True if hardware Surface blitting is accelerated
`blit_hw_CC` : bool
True if hardware Surface colorkey blitting is accelerated
`blit_hw_A` : bool
True if hardware Surface pixel alpha blitting is accelerated
`blit_sw` : bool
True if software Surface blitting is accelerated
`blit_sw_CC` : bool
True if software Surface colorkey blitting is accelerated
`blit_sw_A` : bool
True if software Surface pixel alpha blitting is acclerated
'''
def __init__(self):
#brython
#info = SDL_GetVideoInfo()
info=None
if not info:
raise pygame.base.error('Could not retrieve video info')
self.hw = info.hw_available
self.wm = info.wm_available
self.blit_hw = info.blit_hw
self.blit_hw_CC = info.blit_hw_CC
self.blit_hw_A = info.blit_hw_A
self.blit_sw = info.blit_sw
self.blit_sw_CC = info.blit_sw_CC
self.blit_sw_A = info.blit_sw_A
self.blit_fill = info.blit_fill
self.video_mem = info.video_mem
self.bitsize = info.vfmt.BitsPerPixel
self.bytesize = info.vfmt.BytesPerPixel
self.masks = (info.vfmt.Rmask, info.vfmt.Gmask,
info.vfmt.Bmask, info.vfmt.Amask)
self.shifts = (info.vfmt.Rshift, info.vfmt.Gshift,
info.vfmt.Bshift, info.vfmt.Ashift)
self.losses = (info.vfmt.Rloss, info.vfmt.Gloss,
info.vfmt.Bloss, info.vfmt.Aloss)
def __str__(self):
return ('<VideoInfo(hw = %d, wm = %d,video_mem = %d\n' + \
' blit_hw = %d, blit_hw_CC = %d, blit_hw_A = %d,\n'
' blit_sw = %d, blit_sw_CC = %d, blit_sw_A = %d,\n'
' bitsize = %d, bytesize = %d,\n'
' masks = (%d, %d, %d, %d),\n'
' shifts = (%d, %d, %d, %d),\n'
' losses = (%d, %d, %d, %d)>\n') % \
(self.hw, self.wm, self.video_mem,
self.blit_hw, self.blit_hw_CC, self.blit_hw_A,
self.blit_sw, self.blit_sw_CC, self.blit_sw_A,
self.bitsize, self.bytesize,
self.masks[0], self.masks[1], self.masks[2], self.masks[3],
self.shifts[0], self.shifts[1], self.shifts[2], self.shifts[3],
self.losses[0], self.losses[1], self.losses[2], self.losses[3])
def __repr__(self):
return str(self)
def get_wm_info():
'''Get settings from the system window manager.
:note: Currently unimplemented, returns an empty dict.
:rtype: dict
'''
_video_init_check()
return {}
def list_modes(depth=0, flags=pygame.constants.FULLSCREEN):
'''Get list of available fullscreen modes.
This function returns a list of possible dimensions for a specified color
depth. The return value will be an empty list if no display modes are
available with the given arguments. A return value of -1 means that any
requested resolution should work (this is likely the case for windowed
modes). Mode sizes are sorted from biggest to smallest.
If depth is 0, SDL will choose the current/best color depth for the
display. The flags defaults to pygame.FULLSCREEN, but you may need to add
additional flags for specific fullscreen modes.
:rtype: list of (int, int), or -1
:return: list of (width, height) pairs, or -1 if any mode is suitable.
'''
_video_init_check()
#brython
#format = SDL_PixelFormat()
#format.BitsPerPixel = depth
#brython
#if not format.BitsPerPixel:
# format.BitsPerPixel = SDL_GetVideoInfo().vfmt.BitsPerPixel
#brython
#rects = SDL_ListModes(format, flags)
if rects == -1:
return -1
return [(r.w, r.h) for r in rects]
def mode_ok(size, flags=0, depth=0):
'''Pick the best color depth for a display mode
This function uses the same arguments as pygame.display.set_mode(). It is
used to depermine if a requested display mode is available. It will return
0 if the display mode cannot be set. Otherwise it will return a pixel
depth that best matches the display asked for.
Usually the depth argument is not passed, but some platforms can support
multiple display depths. If passed it will hint to which depth is a better
match.
The most useful flags to pass will be pygame.HWSURFACE, pygame.DOUBLEBUF,
and maybe pygame.FULLSCREEN. The function will return 0 if these display
flags cannot be set.
:rtype: int
:return: depth, in bits per pixel, or 0 if the requested mode cannot be
set.
'''
_video_init_check()
if not depth:
depth = SDL_GetVideoInfo().vfmt.BitsPerPixel
return SDL_VideoModeOK(size[0], size[1], depth, flags)
def gl_set_attribute(flag, value):
'''Set special OpenGL attributes.
When calling `pygame.display.set_mode` with the OPENGL flag,
pygame automatically handles setting the OpenGL attributes like
color and doublebuffering. OpenGL offers several other attributes
you may want control over. Pass one of these attributes as the
flag, and its appropriate value.
This must be called before `pygame.display.set_mode`.
The OPENGL flags are: GL_ALPHA_SIZE, GL_DEPTH_SIZE, GL_STENCIL_SIZE,
GL_ACCUM_RED_SIZE, GL_ACCUM_GREEN_SIZE, GL_ACCUM_BLUE_SIZE,
GL_ACCUM_ALPHA_SIZE GL_MULTISAMPLEBUFFERS, GL_MULTISAMPLESAMPLES,
GL_STEREO.
:Parameters:
- `flag`: int
- `value`: int
'''
_video_init_check()
SDL_GL_SetAttribute(flag, value)
def gl_get_attribute(flag):
'''Get special OpenGL attributes.
After calling `pygame.display.set_mode` with the OPENGL flag
you will likely want to check the value of any special OpenGL
attributes you requested. You will not always get what you
requested.
See `gl_set_attribute` for a list of flags.
:Parameters:
- `flag`: int
:rtype: int
'''
_video_init_check()
return SDL_GL_GetAttribute(flag)
def get_active():
'''Get state of display mode
Returns True if the current display is active on the screen. This
done with the call to ``pygame.display.set_mode()``. It is
potentially subject to the activity of a running window manager.
Calling `set_mode` will change all existing display surface
to reference the new display mode. The old display surface will
be lost after this call.
'''
brython
return SDL_GetAppState() & SDL_APPACTIVE != 0
def iconify():
'''Iconify the display surface.
Request the window for the display surface be iconified or hidden. Not all
systems and displays support an iconified display. The function will
return True if successfull.
When the display is iconified pygame.display.get_active() will return
False. The event queue should receive a pygame.APPACTIVE event when the
window has been iconified.
:rtype: bool
:return: True on success
'''
_video_init_check()
try:
SDL_WM_IconifyWindow()
return True
except SDL_Exception:
return False
def toggle_fullscreen():
'''Switch between fullscreen and windowed displays.
Switches the display window between windowed and fullscreen modes. This
function only works under the unix x11 video driver. For most situations
it is better to call pygame.display.set_mode() with new display flags.
:rtype: bool
'''
_video_init_check()
screen = SDL_GetVideoSurface()
try:
SDL_WM_ToggleFullScreen(screen)
return True
except SDL_Exception:
return False
return False
def set_gamma(red, green=None, blue=None):
'''Change the hardware gamma ramps.
Set the red, green, and blue gamma values on the display hardware. If the
green and blue arguments are not passed, they will both be the same as
red. Not all systems and hardware support gamma ramps, if the function
succeeds it will return True.
A gamma value of 1.0 creates a linear color table. Lower values will
darken the display and higher values will brighten.
:Parameters:
`red` : float
Red gamma value
`green` : float
Green gamma value
`blue` : float
Blue gamma value
:rtype: bool
'''
brython
_video_init_check()
if not green or not blue:
green = red
blue = red
try:
SDL_SetGamma(red, green, blue)
return True
except SDL_Exception:
return False
def set_gamma_ramp(red, green, blue):
'''Change the hardware gamma ramps with a custom lookup.
Set the red, green, and blue gamma ramps with an explicit lookup table.
Each argument should be sequence of 256 integers. The integers should
range between 0 and 0xffff. Not all systems and hardware support gamma
ramps, if the function succeeds it will return True.
:Parameters:
`red` : sequence of int
Sequence of 256 ints in range [0, 0xffff] giving red component
lookup.
`green` : sequence of int
Sequence of 256 ints in range [0, 0xffff] giving green component
lookup.
`blue` : sequence of int
Sequence of 256 ints in range [0, 0xffff] giving blue component
lookup.
:rtype: bool
'''
_video_init_check()
try:
SDL_SetGammaRamp(red, green, blue)
return True
except SDL_Exception:
return False
def set_icon(surface):
'''Change the system image for the display window.
Sets the runtime icon the system will use to represent the display window.
All windows default to a simple pygame logo for the window icon.
You can pass any surface, but most systems want a smaller image around
32x32. The image can have colorkey transparency which will be passed to
the system.
Some systems do not allow the window icon to change after it has been
shown. This function can be called before `set_mode` to
create the icon before the display mode is set.
:Parameters:
`surface` : `Surface`
Surface containing image to set.
'''
global _icon_was_set
pygame.base._video_autoinit()
SDL_WM_SetIcon(surface._surf, None)
_icon_was_set = 1
def set_caption(title, icontitle=None):
'''Set the current window caption.
If the display has a window title, this function will change the name on
the window. Some systems support an alternate shorter title to be used for
minimized displays.
:Parameters:
`title` : unicode
Window caption
`icontitle` : unicode
Icon caption, if supported
'''
if not icontitle:
icontitle = title
SDL_WM_SetCaption(title, icontitle)
def get_caption():
'''Get the current window caption.
Returns the title and icontitle for the display Surface. These will often
be the same value.
:rtype: (unicode, unicode)
:return: title, icontitle
'''
# XXX deviation from pygame, don't return () if title == None
#return SDL_WM_GetCaption()
return "", ""
def set_palette(palette=None):
'''Set the display color palette for indexed displays.
This will change the video display color palette for 8bit displays. This
does not change the palette for the actual display Surface, only the
palette that is used to display the Surface. If no palette argument is
passed, the system default palette will be restored. The palette is a
sequence of RGB triplets.
:Parameters:
`palette` : sequence of (int, int, int)
Sequence having at most 256 RGB triplets.
'''
_video_init_check()
surf = SDL_GetVideoSurface()
if not surf:
raise pygame.base.error('No display mode is set')
if surf.format.BytesPerPixel != 1 or not surf.format._palette:
raise pygame.base.error('Display mode is not colormapped')
if not palette:
SDL_SetPalette(surf, SDL_PHYSPAL, surf.format.palette.colors, 0)
length = min(surf.format.palette.ncolors, len(palette))
colors = [SDL_Color(r, g, b) for r, g, b in palette[:length]]
SDL_SetPalette(surf, SDL_PHYSPAL, colors, 0)
def _video_init_check():
if not SDL_WasInit(SDL_INIT_VIDEO):
raise pygame.base.error('video system not initialized')
| gpl-3.0 |
airbnb/streamalert | streamalert/apps/_apps/aliyun.py | 1 | 8258 | """
Copyright 2018-present Airbnb, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from datetime import datetime
import json
import re
from aliyunsdkcore.client import AcsClient
from aliyunsdkcore.acs_exception.exceptions import ServerException, ClientException
from aliyunsdkactiontrail.request.v20171204 import LookupEventsRequest
from . import AppIntegration, StreamAlertApp, get_logger
LOGGER = get_logger(__name__)
@StreamAlertApp
class AliyunApp(AppIntegration):
"""An app that collects events from the ActionTrail feature of Aliyun.
Results are paginated, with a NextToken field provided that is used in subsequent queries.
The result of a successful api call is json whose outermost schema is
{
"EndTime": <end of the time range of events>,
"NextToken": <the token to use to request the next page of data>,
"RequestId": <the ID of the request>,
"StartTime": <start of the time range of events>,
"Events": [entries],
}
If there are no more events in the queried range, the 'NextToken' element is not present.
Resource:
https://www.alibabacloud.com/help/doc-detail/28849.htm
"""
# The maximum number of results to be returned. Valid values: 0 to 50.
_MAX_RESULTS = 50
# In aliyun sdk PR https://github.com/aliyun/aliyun-openapi-python-sdk/pull/216, it separates
# timeout to connection and read timeout and also lower the default connection timeout time
# from 10 to 5 seconds. We notice the connection to server gets timed out more often recently,
# increase default timeout will be helpful.
_CONNECT_TIMEOUT = 15
_READ_TIMEOUT = 15
def __init__(self, event, context):
super(AliyunApp, self).__init__(event, context)
auth = self._config.auth
self.client = AcsClient(auth['access_key_id'], auth['access_key_secret'], auth['region_id'])
self.request = LookupEventsRequest.LookupEventsRequest()
self.request.set_MaxResults(self._MAX_RESULTS)
self.request.set_StartTime(self._config.last_timestamp)
# Source code can be found here https://github.com/aliyun/aliyun-openapi-python-sdk/
# blob/master/aliyun-python-sdk-actiontrail/aliyunsdkactiontrail/request/v20171204/
# LookupEventsRequest.py
self.request.set_EndTime(datetime.utcnow().strftime(self.date_formatter()))
self.request.set_connect_timeout(self._CONNECT_TIMEOUT)
self.request.set_read_timeout(self._READ_TIMEOUT)
@classmethod
def _type(cls):
return 'actiontrail'
@classmethod
def service(cls):
return 'aliyun'
@classmethod
def date_formatter(cls):
"""Return a format string for a date, ie: 2014-05-26T12:00:00Z
This format is consistent with the format used by the Aliyun API:
https://www.alibabacloud.com/help/doc-detail/28849.htm
"""
return '%Y-%m-%dT%H:%M:%SZ'
def _gather_logs(self):
"""Fetch ActionTrail events and return a list of events
Example response from do_action_with_exception method
{
'EndTime': '2019-08-22T04:41:32Z',
'NextToken': '2',
'RequestId': '562D9C08-E766-4038-B49F-B0D2BE1980FE',
'StartTime': '2019-08-01T04:31:52Z',
'Events': [{
'eventId': '60.152_1566447558068_1247',
'eventVersion': '1',
'acsRegion': 'cn-hangzhou',
'additionalEventData': {
'mfaChecked': 'true',
'callbackUrl': 'https://home.console.aliyun.com/'
},
'eventType': 'ConsoleSignin',
'errorMessage': 'success',
'eventTime': '2019-08-22T04:19:18Z',
'eventName': 'ConsoleSignin',
'userIdentity': {
'userName': 'dead_joke',
'type': 'ram-user',
'principalId': '222222222222222222',
'accountId': '1111111111111111'
},
'eventSource': 'signin.aliyun.com',
'requestId': '60.152_1566447558068_1247',
'userAgent': 'some browser version',
'sourceIpAddress': '1.1.1.1',
'serviceName': 'AasSub'
}, {
'eventId': '029B39F0-5E23-4931-B4C9-BA72C7261ADF',
...
'eventTime': '2019-08-21T22:26:09Z',
...
}]
}
"""
try:
response = self.client.do_action_with_exception(self.request)
json_response = json.loads(response)
# Note: ActionTrail API return ActionTrail events in sorted order, and
# it is latest events first. There still has a small chance that it may not get
# all the logs when there are still more logs to pull when lambda function
# timeout reached, and remaining logs will be lost because the last_timestamp
# is updated to "EndTime" during the first lambda function call.
#
# To lower the data loss possibility, suggest to have longer timeout for lambda
# function (aliyun app) and set app schedule more frequently, e.g. every 10 mins
self._last_timestamp = json_response['EndTime']
if 'NextToken' in json_response:
self._more_to_poll = True
self.request.set_NextToken(json_response['NextToken'])
else:
self._more_to_poll = False
return json_response['Events']
except (ServerException, ClientException) as e:
LOGGER.exception("%s error occurred", e.get_error_type())
raise
@classmethod
def _required_auth_info(cls):
"""Required credentials for access to the resources"""
def region_validator(region):
"""Region names pulled from https://www.alibabacloud.com/help/doc-detail/40654.htm"""
if region in {'cn-qingdao', 'cn-beijing', 'cn-zhangjiakou', 'cn-huhehaote',
'cn-hangzhou', 'cn-shanghai', 'cn-shenzhen', 'cn-hongkong',
'ap-southeast-1', 'ap-southeast-2', 'ap-southeast-3', 'ap-southeast-5',
'ap-northeast-1', 'ap-south-1', 'us-west-1', 'us-east-1',
'eu-central-1', 'me-east-1'}:
return region
return False
return {
'access_key_id': {
'description': ('The access key id generated for a RAM user. This '
'should be a string of alphanumeric characters.'),
'format': re.compile(r'.*')
},
'access_key_secret': {
'description': ('The access key secret generated for a RAM user. This '
'should be a string of alphanumeric characters.'),
'format': re.compile(r'.*')
},
'region_id': {
'description': ('The region for the Aliyun API. This should be '
'a string like \'ap-northeast-1\'.'),
'format': region_validator
},
}
@classmethod
def _sleep_seconds(cls):
"""Return the number of seconds this polling function should sleep for
between requests to avoid failed requests. The Aliyun documentation doesn't
list limits on the requests portion of the actionTrail feature, so the only
limit is the general limit on Aliyun API requests, which is no more than
100 per second. We can set this value to 0 safely.
Resource:
https://www.alibabacloud.com/help/doc-detail/29474.htm
Returns:
int: Number of seconds the polling function should sleep for
"""
return 0
| apache-2.0 |
bingwen/shadowsocks | setup.py | 929 | 1321 | import codecs
from setuptools import setup
with codecs.open('README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(
name="shadowsocks",
version="2.8.2",
license='http://www.apache.org/licenses/LICENSE-2.0',
description="A fast tunnel proxy that help you get through firewalls",
author='clowwindy',
author_email='[email protected]',
url='https://github.com/shadowsocks/shadowsocks',
packages=['shadowsocks', 'shadowsocks.crypto'],
package_data={
'shadowsocks': ['README.rst', 'LICENSE']
},
install_requires=[],
entry_points="""
[console_scripts]
sslocal = shadowsocks.local:main
ssserver = shadowsocks.server:main
""",
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: Proxy Servers',
],
long_description=long_description,
)
| apache-2.0 |
agaffney/ansible | test/support/windows-integration/plugins/modules/win_acl.py | 85 | 4112 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Phil Schwartz <[email protected]>
# Copyright: (c) 2015, Trond Hindenes
# Copyright: (c) 2015, Hans-Joachim Kliemeck <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = r'''
---
module: win_acl
version_added: "2.0"
short_description: Set file/directory/registry permissions for a system user or group
description:
- Add or remove rights/permissions for a given user or group for the specified
file, folder, registry key or AppPool identifies.
options:
path:
description:
- The path to the file or directory.
type: str
required: yes
user:
description:
- User or Group to add specified rights to act on src file/folder or
registry key.
type: str
required: yes
state:
description:
- Specify whether to add C(present) or remove C(absent) the specified access rule.
type: str
choices: [ absent, present ]
default: present
type:
description:
- Specify whether to allow or deny the rights specified.
type: str
required: yes
choices: [ allow, deny ]
rights:
description:
- The rights/permissions that are to be allowed/denied for the specified
user or group for the item at C(path).
- If C(path) is a file or directory, rights can be any right under MSDN
FileSystemRights U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.filesystemrights.aspx).
- If C(path) is a registry key, rights can be any right under MSDN
RegistryRights U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.registryrights.aspx).
type: str
required: yes
inherit:
description:
- Inherit flags on the ACL rules.
- Can be specified as a comma separated list, e.g. C(ContainerInherit),
C(ObjectInherit).
- For more information on the choices see MSDN InheritanceFlags enumeration
at U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.inheritanceflags.aspx).
- Defaults to C(ContainerInherit, ObjectInherit) for Directories.
type: str
choices: [ ContainerInherit, ObjectInherit ]
propagation:
description:
- Propagation flag on the ACL rules.
- For more information on the choices see MSDN PropagationFlags enumeration
at U(https://msdn.microsoft.com/en-us/library/system.security.accesscontrol.propagationflags.aspx).
type: str
choices: [ InheritOnly, None, NoPropagateInherit ]
default: "None"
notes:
- If adding ACL's for AppPool identities (available since 2.3), the Windows
Feature "Web-Scripting-Tools" must be enabled.
seealso:
- module: win_acl_inheritance
- module: win_file
- module: win_owner
- module: win_stat
author:
- Phil Schwartz (@schwartzmx)
- Trond Hindenes (@trondhindenes)
- Hans-Joachim Kliemeck (@h0nIg)
'''
EXAMPLES = r'''
- name: Restrict write and execute access to User Fed-Phil
win_acl:
user: Fed-Phil
path: C:\Important\Executable.exe
type: deny
rights: ExecuteFile,Write
- name: Add IIS_IUSRS allow rights
win_acl:
path: C:\inetpub\wwwroot\MySite
user: IIS_IUSRS
rights: FullControl
type: allow
state: present
inherit: ContainerInherit, ObjectInherit
propagation: 'None'
- name: Set registry key right
win_acl:
path: HKCU:\Bovine\Key
user: BUILTIN\Users
rights: EnumerateSubKeys
type: allow
state: present
inherit: ContainerInherit, ObjectInherit
propagation: 'None'
- name: Remove FullControl AccessRule for IIS_IUSRS
win_acl:
path: C:\inetpub\wwwroot\MySite
user: IIS_IUSRS
rights: FullControl
type: allow
state: absent
inherit: ContainerInherit, ObjectInherit
propagation: 'None'
- name: Deny Intern
win_acl:
path: C:\Administrator\Documents
user: Intern
rights: Read,Write,Modify,FullControl,Delete
type: deny
state: present
'''
| gpl-3.0 |
fiunchinho/ansible-modules-extras | notification/telegram.py | 42 | 2670 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2016, Artem Feofanov <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
module: telegram
version_added: "2.2"
author: "Artem Feofanov (@tyouxa)"
short_description: module for sending notifications via telegram
description:
- Send notifications via telegram bot, to a verified group or user
notes:
- You will require a telegram account and create telegram bot to use this module.
options:
msg:
description:
- What message you wish to send.
required: true
token:
description:
- Token identifying your telegram bot.
required: true
chat_id:
description:
- Telegram group or user chat_id
required: true
"""
EXAMPLES = """
send a message to chat in playbook
- telegram: token=bot9999999:XXXXXXXXXXXXXXXXXXXXXXX
chat_id=000000
msg="Ansible task finished"
"""
RETURN = """
msg:
description: The message you attempted to send
returned: success
type: string
sample: "Ansible task finished"
"""
import urllib
def main():
module = AnsibleModule(
argument_spec = dict(
token = dict(type='str',required=True,no_log=True),
chat_id = dict(type='str',required=True,no_log=True),
msg = dict(type='str',required=True)),
supports_check_mode=True
)
token = urllib.quote(module.params.get('token'))
chat_id = urllib.quote(module.params.get('chat_id'))
msg = urllib.quote(module.params.get('msg'))
url = 'https://api.telegram.org/' + token + '/sendMessage?text=' + msg + '&chat_id=' + chat_id
if module.check_mode:
module.exit_json(changed=False)
response, info = fetch_url(module, url)
if info['status'] == 200:
module.exit_json(changed=True)
else:
module.fail_json(msg="failed to send message, return status=%s" % str(info['status']))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main()
| gpl-3.0 |
saurabh6790/pow-app | accounts/doctype/sales_invoice/pos.py | 29 | 1618 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
@webnotes.whitelist()
def get_items(price_list, sales_or_purchase, item=None, item_group=None):
condition = ""
args = {"price_list": price_list}
if sales_or_purchase == "Sales":
condition = "i.is_sales_item='Yes'"
else:
condition = "i.is_purchase_item='Yes'"
if item_group and item_group != "All Item Groups":
condition += " and i.item_group='%s'" % item_group
if item:
condition += " and CONCAT(i.name, i.item_name) like %(name)s"
args["name"] = "%%%s%%" % item
return webnotes.conn.sql("""select i.name, i.item_name, i.image,
item_det.ref_rate, item_det.currency
from `tabItem` i LEFT JOIN
(select item_code, ref_rate, currency from
`tabItem Price` where price_list=%s) item_det
ON
item_det.item_code=i.name
where
%s""" % ('%(price_list)s', condition), args, as_dict=1)
@webnotes.whitelist()
def get_item_code(barcode_serial_no):
input_via = "serial_no"
item_code = webnotes.conn.sql("""select name, item_code from `tabSerial No` where
name=%s""", (barcode_serial_no), as_dict=1)
if not item_code:
input_via = "barcode"
item_code = webnotes.conn.sql("""select name from `tabItem` where barcode=%s""",
(barcode_serial_no), as_dict=1)
if item_code:
return item_code, input_via
else:
webnotes.throw("Invalid Barcode / Serial No")
@webnotes.whitelist()
def get_mode_of_payment():
return webnotes.conn.sql("""select name from `tabMode of Payment`""", as_dict=1) | agpl-3.0 |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/third_party/webdriver/pylib/test/selenium/webdriver/common/webserver.py | 20 | 4133 | # Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A simple web server for testing purpose.
It serves the testing html pages that are needed by the webdriver unit tests."""
import logging
import os
import socket
import threading
import urllib
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
def updir():
dirname = os.path.dirname
return dirname(dirname(__file__))
LOGGER = logging.getLogger(__name__)
WEBDRIVER = os.environ.get("WEBDRIVER", updir())
HTML_ROOT = os.path.join(WEBDRIVER, "../../../../../../common/src/web")
if not os.path.isdir(HTML_ROOT):
message = ("Can't find 'common_web' directory, try setting WEBDRIVER"
" environment variable WEBDRIVER:" + WEBDRIVER + " HTML_ROOT:" + HTML_ROOT )
LOGGER.error(message)
assert 0, message
DEFAULT_PORT = 8000
class HtmlOnlyHandler(BaseHTTPRequestHandler):
"""Http handler."""
def do_GET(self):
"""GET method handler."""
try:
path = self.path[1:].split('?')[0]
html = open(os.path.join(HTML_ROOT, path))
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(html.read())
html.close()
except IOError:
self.send_error(404, 'File Not Found: %s' % path)
def log_message(self, format, *args):
"""Override default to avoid trashing stderr"""
pass
class SimpleWebServer(object):
"""A very basic web server."""
def __init__(self, port=DEFAULT_PORT):
self.stop_serving = False
port = port
while True:
try:
self.server = HTTPServer(
('', port), HtmlOnlyHandler)
self.port = port
break
except socket.error:
LOGGER.debug("port %d is in use, trying to next one"
% port)
port += 1
self.thread = threading.Thread(target=self._run_web_server)
def _run_web_server(self):
"""Runs the server loop."""
LOGGER.debug("web server started")
while not self.stop_serving:
self.server.handle_request()
self.server.server_close()
def start(self):
"""Starts the server."""
self.thread.start()
def stop(self):
"""Stops the server."""
self.stop_serving = True
try:
# This is to force stop the server loop
urllib.URLopener().open("http://localhost:%d" % self.port)
except Exception:
pass
LOGGER.info("Shutting down the webserver")
self.thread.join()
def main(argv=None):
from optparse import OptionParser
from time import sleep
if argv is None:
import sys
argv = sys.argv
parser = OptionParser("%prog [options]")
parser.add_option("-p", "--port", dest="port", type="int",
help="port to listen (default: %s)" % DEFAULT_PORT,
default=DEFAULT_PORT)
opts, args = parser.parse_args(argv[1:])
if args:
parser.error("wrong number of arguments") # Will exit
server = SimpleWebServer(opts.port)
server.start()
print "Server started on port %s, hit CTRL-C to quit" % opts.port
try:
while 1:
sleep(0.1)
except KeyboardInterrupt:
pass
if __name__ == "__main__":
main()
| mit |
adalekin/django-balancer | docs/conf.py | 4 | 7200 | # -*- coding: utf-8 -*-
#
# django-balancer documentation build configuration file, created by
# sphinx-quickstart on Mon Oct 18 10:17:32 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
DOCS_BASE = os.path.dirname(__file__)
sys.path.insert(0, os.path.abspath(os.path.join(DOCS_BASE, '..')))
import balancer
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'django-balancer'
copyright = u'2010, Brandon Konkle'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = balancer.get_version(short=True)
# The full version, including alpha/beta/rc tags.
release = balancer.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-balancerdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'django-balancer.tex', u'django-balancer Documentation',
u'Brandon Konkle', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'django-balancer', u'django-balancer Documentation',
[u'Brandon Konkle'], 1)
]
| bsd-3-clause |
htygithub/bokeh | bokeh/state.py | 2 | 8592 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2015, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
""" Encapsulate implicit state that is useful for Bokeh plotting APIs.
Generating output for Bokeh plots requires coordinating several things:
:class:`Documents <bokeh.document>`
Group together Bokeh models that may be shared between plots (e.g.,
range or data source objects) into one common namespace.
:class:`Resources <bokeh.resources>`
Control how JavaScript and CSS for the client library BokehJS are
included and used in the generated output.
It is certainly possible to handle the configuration of these objects
manually, and several examples of this can be found in ``examples/glyphs``.
When developing sophisticated applications, it may be necessary or
desirable to work at this level. However, for general use this would
quickly become burdensome. The ``bokeh.state`` module provides a ``State``
class that encapsulates these objects and ensures their proper configuration.
"""
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Stdlib imports
from __future__ import absolute_import
import logging
logger = logging.getLogger(__name__)
import os
# Third-party imports
# Bokeh imports
from .document import Document
from .resources import Resources
from .client import DEFAULT_SESSION_ID
from bokeh.resources import DEFAULT_SERVER_HTTP_URL
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
class State(object):
""" Manage state related to controlling Bokeh output.
Attributes:
document (:class:`bokeh.document.Document`): a default document to use
file (dict) : default filename, resources, etc. for file output
This dictionary has the following form::
{
'filename' : # filename to use when saving
'resources' : # resources configuration
'title' : # a title for the HTML document
}
notebook (bool) : whether to generate notebook output
session_id (str) : a default session ID for Bokeh server output
autoadd (bool) : whether certain functions automatically add roots to the document
autosave (bool) : whether certain functions automatically save the file
autopush (bool): whether certain functions automatically push to the server
"""
def __init__(self):
# TODO (havocp) right now there's no way to turn off autoadd
self._autoadd = True
self.reset()
@property
def document(self):
return self._document
@document.setter
def document(self, doc):
self._document = doc
@property
def file(self):
return self._file
@property
def notebook(self):
return self._notebook
@property
def session_id(self):
return self._session_id
@property
def server_url(self):
return self._server_url
@property
def autoadd(self):
return self._autoadd
@property
def autosave(self):
return self._autosave
@property
def autopush(self):
return self._autopush
def _reset_keeping_doc(self):
self._file = None
self._notebook = False
self._session_id = None
self._server_url = None
self._autosave = False
self._autopush = False
def _reset_with_doc(self, doc):
self._document = doc
self._reset_keeping_doc()
def reset(self):
''' Deactivate all currently active output modes and set curdoc() to a fresh empty Document.
Subsequent calls to show() will not render until a new output mode is
activated.
Returns:
None
'''
self._reset_with_doc(Document())
def output_file(self, filename, title="Bokeh Plot", autosave=False, mode="cdn", root_dir=None):
"""Output to a standalone HTML file.
Does not change the current Document from curdoc(). File,
server, and notebook output may be active at the same
time, so this does not clear the effects of
output_server() or output_notebook().
Args:
filename (str) : a filename for saving the HTML document
title (str, optional) : a title for the HTML document
autosave (bool, optional) : whether to automatically save (default: False)
If True, then Bokeh plotting APIs may opt to automatically
save the file more frequently (e.g., after any plotting
command). If False, then the file is only saved upon calling
:func:`show` or :func:`save`.
mode (str, optional) : how to include BokehJS (default: ``'cdn'``)
One of: ``'inline'``, ``'cdn'``, ``'relative(-dev)'`` or
``'absolute(-dev)'``. See :class:`bokeh.resources.Resources` for more details.
root_dir (str, optional) : root directory to use for 'absolute' resources. (default: None)
This value is ignored for other resource types, e.g. ``INLINE`` or
``CDN``.
.. warning::
This output file will be overwritten on every save, e.g., each time
show() or save() is invoked, or any time a Bokeh plotting API
causes a save, if ``autosave`` is True.
"""
self._file = {
'filename' : filename,
'resources' : Resources(mode=mode, root_dir=root_dir),
'title' : title
}
self._autosave = autosave
if os.path.isfile(filename):
logger.info("Session output file '%s' already exists, will be overwritten." % filename)
def output_notebook(self):
"""Generate output in Jupyter/IPython notebook cells.
This does not clear the effects of output_file() or
output_server(), it only adds an additional output
destination (publishing to IPython Notebook). If
output_server() has been called, the notebook output cell
will be loaded from a Bokeh server; otherwise, Bokeh
publishes HTML to the notebook directly.
Returns:
None
"""
self._notebook = True
def output_server(self, session_id=DEFAULT_SESSION_ID, url="default", autopush=False):
"""Store Bokeh plots and objects on a Bokeh server.
File, server, and notebook output may be active at the
same time, so this does not clear the effects of
output_file() or output_notebook(). output_server()
changes the behavior of output_notebook(), so the notebook
will load output cells from the server rather than
receiving them as inline HTML.
Args:
session_id (str) : Name of session to push on Bokeh server
Any existing session with the same name will be overwritten.
url (str, optional) : base URL of the Bokeh server (default: "default")
If "default" use the default localhost URL.
autopush (bool, optional) : whether to automatically push (default: False)
If True, then Bokeh plotting APIs may opt to automatically
push the document more frequently (e.g., after any plotting
command). If False, then the document is only pushed upon calling
:func:`show` or :func:`push`.
Returns:
None
.. warning::
Calling this function will replace any existing server-side document in the named session.
"""
if url == "default":
url = DEFAULT_SERVER_HTTP_URL
self._session_id = session_id
self._server_url = url
self._autopush = autopush
| bsd-3-clause |
sysadmin75/ansible-modules-core | cloud/rackspace/rax_dns_record.py | 16 | 11455 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# This is a DOCUMENTATION stub specific to this module, it extends
# a documentation fragment located in ansible.utils.module_docs_fragments
DOCUMENTATION = '''
---
module: rax_dns_record
short_description: Manage DNS records on Rackspace Cloud DNS
description:
- Manage DNS records on Rackspace Cloud DNS
version_added: 1.5
options:
comment:
description:
- Brief description of the domain. Maximum length of 160 characters
data:
description:
- IP address for A/AAAA record, FQDN for CNAME/MX/NS, or text data for
SRV/TXT
required: True
domain:
description:
- Domain name to create the record in. This is an invalid option when
type=PTR
loadbalancer:
description:
- Load Balancer ID to create a PTR record for. Only used with type=PTR
version_added: 1.7
name:
description:
- FQDN record name to create
required: True
overwrite:
description:
- Add new records if data doesn't match, instead of updating existing
record with matching name. If there are already multiple records with
matching name and overwrite=true, this module will fail.
default: true
version_added: 2.1
priority:
description:
- Required for MX and SRV records, but forbidden for other record types.
If specified, must be an integer from 0 to 65535.
server:
description:
- Server ID to create a PTR record for. Only used with type=PTR
version_added: 1.7
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
ttl:
description:
- Time to live of record in seconds
default: 3600
type:
description:
- DNS record type
choices:
- A
- AAAA
- CNAME
- MX
- NS
- SRV
- TXT
- PTR
required: true
notes:
- "It is recommended that plays utilizing this module be run with
C(serial: 1) to avoid exceeding the API request limit imposed by
the Rackspace CloudDNS API"
- To manipulate a C(PTR) record either C(loadbalancer) or C(server) must be
supplied
- As of version 1.7, the C(type) field is required and no longer defaults to an C(A) record.
- C(PTR) record support was added in version 1.7
author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace
'''
EXAMPLES = '''
- name: Create DNS Records
hosts: all
gather_facts: False
tasks:
- name: Create A record
local_action:
module: rax_dns_record
credentials: ~/.raxpub
domain: example.org
name: www.example.org
data: "{{ rax_accessipv4 }}"
type: A
register: a_record
- name: Create PTR record
local_action:
module: rax_dns_record
credentials: ~/.raxpub
server: "{{ rax_id }}"
name: "{{ inventory_hostname }}"
region: DFW
register: ptr_record
'''
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
def rax_dns_record_ptr(module, data=None, comment=None, loadbalancer=None,
name=None, server=None, state='present', ttl=7200):
changed = False
results = []
dns = pyrax.cloud_dns
if not dns:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if loadbalancer:
item = rax_find_loadbalancer(module, pyrax, loadbalancer)
elif server:
item = rax_find_server(module, pyrax, server)
if state == 'present':
current = dns.list_ptr_records(item)
for record in current:
if record.data == data:
if record.ttl != ttl or record.name != name:
try:
dns.update_ptr_record(item, record, name, data, ttl)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
record.ttl = ttl
record.name = name
results.append(rax_to_dict(record))
break
else:
results.append(rax_to_dict(record))
break
if not results:
record = dict(name=name, type='PTR', data=data, ttl=ttl,
comment=comment)
try:
results = dns.add_ptr_records(item, [record])
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, records=results)
elif state == 'absent':
current = dns.list_ptr_records(item)
for record in current:
if record.data == data:
results.append(rax_to_dict(record))
break
if results:
try:
dns.delete_ptr_records(item, data)
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, records=results)
def rax_dns_record(module, comment=None, data=None, domain=None, name=None,
overwrite=True, priority=None, record_type='A',
state='present', ttl=7200):
"""Function for manipulating record types other than PTR"""
changed = False
dns = pyrax.cloud_dns
if not dns:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if state == 'present':
if not priority and record_type in ['MX', 'SRV']:
module.fail_json(msg='A "priority" attribute is required for '
'creating a MX or SRV record')
try:
domain = dns.find(name=domain)
except Exception, e:
module.fail_json(msg='%s' % e.message)
try:
if overwrite:
record = domain.find_record(record_type, name=name)
else:
record = domain.find_record(record_type, name=name, data=data)
except pyrax.exceptions.DomainRecordNotUnique, e:
module.fail_json(msg='overwrite=true and there are multiple matching records')
except pyrax.exceptions.DomainRecordNotFound, e:
try:
record_data = {
'type': record_type,
'name': name,
'data': data,
'ttl': ttl
}
if comment:
record_data.update(dict(comment=comment))
if priority and record_type.upper() in ['MX', 'SRV']:
record_data.update(dict(priority=priority))
record = domain.add_records([record_data])[0]
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
update = {}
if comment != getattr(record, 'comment', None):
update['comment'] = comment
if ttl != getattr(record, 'ttl', None):
update['ttl'] = ttl
if priority != getattr(record, 'priority', None):
update['priority'] = priority
if data != getattr(record, 'data', None):
update['data'] = data
if update:
try:
record.update(**update)
changed = True
record.get()
except Exception, e:
module.fail_json(msg='%s' % e.message)
elif state == 'absent':
try:
domain = dns.find(name=domain)
except Exception, e:
module.fail_json(msg='%s' % e.message)
try:
record = domain.find_record(record_type, name=name, data=data)
except pyrax.exceptions.DomainRecordNotFound, e:
record = {}
pass
except pyrax.exceptions.DomainRecordNotUnique, e:
module.fail_json(msg='%s' % e.message)
if record:
try:
record.delete()
changed = True
except Exception, e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, record=rax_to_dict(record))
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
comment=dict(),
data=dict(required=True),
domain=dict(),
loadbalancer=dict(),
name=dict(required=True),
overwrite=dict(type='bool', default=True),
priority=dict(type='int'),
server=dict(),
state=dict(default='present', choices=['present', 'absent']),
ttl=dict(type='int', default=3600),
type=dict(required=True, choices=['A', 'AAAA', 'CNAME', 'MX', 'NS',
'SRV', 'TXT', 'PTR'])
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together(),
mutually_exclusive=[
['server', 'loadbalancer', 'domain'],
],
required_one_of=[
['server', 'loadbalancer', 'domain'],
],
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
comment = module.params.get('comment')
data = module.params.get('data')
domain = module.params.get('domain')
loadbalancer = module.params.get('loadbalancer')
name = module.params.get('name')
overwrite = module.params.get('overwrite')
priority = module.params.get('priority')
server = module.params.get('server')
state = module.params.get('state')
ttl = module.params.get('ttl')
record_type = module.params.get('type')
setup_rax_module(module, pyrax, False)
if record_type.upper() == 'PTR':
if not server and not loadbalancer:
module.fail_json(msg='one of the following is required: '
'server,loadbalancer')
rax_dns_record_ptr(module, data=data, comment=comment,
loadbalancer=loadbalancer, name=name, server=server,
state=state, ttl=ttl)
else:
rax_dns_record(module, comment=comment, data=data, domain=domain,
name=name, overwrite=overwrite, priority=priority,
record_type=record_type, state=state, ttl=ttl)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.rax import *
### invoke the module
main()
| gpl-3.0 |
sebalix/OpenUpgrade | addons/base_action_rule/__init__.py | 438 | 1098 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_action_rule
import test_models
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ghostsquad/pytest | testing/test_assertinterpret.py | 30 | 8937 | "PYTEST_DONT_REWRITE"
import pytest, py
from _pytest.assertion import util
def exvalue():
return py.std.sys.exc_info()[1]
def f():
return 2
def test_not_being_rewritten():
assert "@py_builtins" not in globals()
def test_assert():
try:
assert f() == 3
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 == 3\n')
def test_assert_with_explicit_message():
try:
assert f() == 3, "hello"
except AssertionError:
e = exvalue()
assert e.msg == 'hello'
def test_assert_within_finally():
excinfo = pytest.raises(ZeroDivisionError, """
try:
1/0
finally:
i = 42
""")
s = excinfo.exconly()
assert py.std.re.search("division.+by zero", s) is not None
#def g():
# A.f()
#excinfo = getexcinfo(TypeError, g)
#msg = getmsg(excinfo)
#assert msg.find("must be called with A") != -1
def test_assert_multiline_1():
try:
assert (f() ==
3)
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 == 3\n')
def test_assert_multiline_2():
try:
assert (f() == (4,
3)[-1])
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith('assert 2 ==')
def test_in():
try:
assert "hi" in [1, 2]
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 'hi' in")
def test_is():
try:
assert 1 is 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 is 2")
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_attrib():
class Foo(object):
b = 1
i = Foo()
try:
assert i.b == 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 == 2")
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_attrib_inst():
class Foo(object):
b = 1
try:
assert Foo().b == 2
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 1 == 2")
def test_len():
l = list(range(42))
try:
assert len(l) == 100
except AssertionError:
e = exvalue()
s = str(e)
assert s.startswith("assert 42 == 100")
assert "where 42 = len([" in s
def test_assert_non_string_message():
class A:
def __str__(self):
return "hello"
try:
assert 0 == 1, A()
except AssertionError:
e = exvalue()
assert e.msg == "hello"
def test_assert_keyword_arg():
def f(x=3):
return False
try:
assert f(x=5)
except AssertionError:
e = exvalue()
assert "x=5" in e.msg
def test_private_class_variable():
class X:
def __init__(self):
self.__v = 41
def m(self):
assert self.__v == 42
try:
X().m()
except AssertionError:
e = exvalue()
assert "== 42" in e.msg
# These tests should both fail, but should fail nicely...
class WeirdRepr:
def __repr__(self):
return '<WeirdRepr\nsecond line>'
def bug_test_assert_repr():
v = WeirdRepr()
try:
assert v == 1
except AssertionError:
e = exvalue()
assert e.msg.find('WeirdRepr') != -1
assert e.msg.find('second line') != -1
assert 0
def test_assert_non_string():
try:
assert 0, ['list']
except AssertionError:
e = exvalue()
assert e.msg.find("list") != -1
def test_assert_implicit_multiline():
try:
x = [1,2,3]
assert x != [1,
2, 3]
except AssertionError:
e = exvalue()
assert e.msg.find('assert [1, 2, 3] !=') != -1
def test_assert_with_brokenrepr_arg():
class BrokenRepr:
def __repr__(self): 0 / 0
e = AssertionError(BrokenRepr())
if e.msg.find("broken __repr__") == -1:
pytest.fail("broken __repr__ not handle correctly")
def test_multiple_statements_per_line():
try:
a = 1; assert a == 2
except AssertionError:
e = exvalue()
assert "assert 1 == 2" in e.msg
def test_power():
try:
assert 2**3 == 7
except AssertionError:
e = exvalue()
assert "assert (2 ** 3) == 7" in e.msg
class TestView:
def setup_class(cls):
cls.View = pytest.importorskip("_pytest.assertion.oldinterpret").View
def test_class_dispatch(self):
### Use a custom class hierarchy with existing instances
class Picklable(self.View):
pass
class Simple(Picklable):
__view__ = object
def pickle(self):
return repr(self.__obj__)
class Seq(Picklable):
__view__ = list, tuple, dict
def pickle(self):
return ';'.join(
[Picklable(item).pickle() for item in self.__obj__])
class Dict(Seq):
__view__ = dict
def pickle(self):
return Seq.pickle(self) + '!' + Seq(self.values()).pickle()
assert Picklable(123).pickle() == '123'
assert Picklable([1,[2,3],4]).pickle() == '1;2;3;4'
assert Picklable({1:2}).pickle() == '1!2'
def test_viewtype_class_hierarchy(self):
# Use a custom class hierarchy based on attributes of existing instances
class Operation:
"Existing class that I don't want to change."
def __init__(self, opname, *args):
self.opname = opname
self.args = args
existing = [Operation('+', 4, 5),
Operation('getitem', '', 'join'),
Operation('setattr', 'x', 'y', 3),
Operation('-', 12, 1)]
class PyOp(self.View):
def __viewkey__(self):
return self.opname
def generate(self):
return '%s(%s)' % (self.opname, ', '.join(map(repr, self.args)))
class PyBinaryOp(PyOp):
__view__ = ('+', '-', '*', '/')
def generate(self):
return '%s %s %s' % (self.args[0], self.opname, self.args[1])
codelines = [PyOp(op).generate() for op in existing]
assert codelines == ["4 + 5", "getitem('', 'join')",
"setattr('x', 'y', 3)", "12 - 1"]
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_assert_customizable_reprcompare(monkeypatch):
monkeypatch.setattr(util, '_reprcompare', lambda *args: 'hello')
try:
assert 3 == 4
except AssertionError:
e = exvalue()
s = str(e)
assert "hello" in s
def test_assert_long_source_1():
try:
assert len == [
(None, ['somet text', 'more text']),
]
except AssertionError:
e = exvalue()
s = str(e)
assert 're-run' not in s
assert 'somet text' in s
def test_assert_long_source_2():
try:
assert(len == [
(None, ['somet text', 'more text']),
])
except AssertionError:
e = exvalue()
s = str(e)
assert 're-run' not in s
assert 'somet text' in s
def test_assert_raise_alias(testdir):
testdir.makepyfile("""
"PYTEST_DONT_REWRITE"
import sys
EX = AssertionError
def test_hello():
raise EX("hello"
"multi"
"line")
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*def test_hello*",
"*raise EX*",
"*1 failed*",
])
@pytest.mark.skipif("sys.version_info < (2,5)")
def test_assert_raise_subclass():
class SomeEx(AssertionError):
def __init__(self, *args):
super(SomeEx, self).__init__()
try:
raise SomeEx("hello")
except AssertionError:
s = str(exvalue())
assert 're-run' not in s
assert 'could not determine' in s
def test_assert_raises_in_nonzero_of_object_pytest_issue10():
class A(object):
def __nonzero__(self):
raise ValueError(42)
def __lt__(self, other):
return A()
def __repr__(self):
return "<MY42 object>"
def myany(x):
return True
try:
assert not(myany(A() < 0))
except AssertionError:
e = exvalue()
s = str(e)
assert "<MY42 object> < 0" in s
@pytest.mark.skipif("sys.version_info >= (2,6)")
def test_oldinterpret_importation():
# we had a cyclic import there
# requires pytest on sys.path
res = py.std.subprocess.call([
py.std.sys.executable, '-c', str(py.code.Source("""
try:
from _pytest.assertion.newinterpret import interpret
except ImportError:
from _pytest.assertion.oldinterpret import interpret
"""))
])
assert res == 0
| mit |
sparklyballs/XBMC-Headless | xbmcdata/addons/service.xbmc.versioncheck/lib/common.py | 82 | 7008 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Team-XBMC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import xbmc
import xbmcaddon
import xbmcgui
import xbmcvfs
__addon__ = xbmcaddon.Addon()
__addonversion__ = __addon__.getAddonInfo('version')
__addonname__ = __addon__.getAddonInfo('name')
__addonpath__ = __addon__.getAddonInfo('path').decode('utf-8')
__addonprofile__ = xbmc.translatePath( __addon__.getAddonInfo('profile') ).decode('utf-8')
__icon__ = __addon__.getAddonInfo('icon')
# Fixes unicode problems
def string_unicode(text, encoding='utf-8'):
try:
text = unicode( text, encoding )
except:
pass
return text
def normalize_string(text):
try:
text = unicodedata.normalize('NFKD', string_unicode(text)).encode('ascii', 'ignore')
except:
pass
return text
def localise(id):
string = normalize_string(__addon__.getLocalizedString(id))
return string
def log(txt):
if isinstance (txt,str):
txt = txt.decode("utf-8")
message = u'%s: %s' % ("Version Check", txt)
xbmc.log(msg=message.encode("utf-8"), level=xbmc.LOGDEBUG)
def get_password_from_user():
keyboard = xbmc.Keyboard("", __addonname__ + "," +localise(32022), True)
keyboard.doModal()
if (keyboard.isConfirmed()):
pwd = keyboard.getText()
return pwd
def message_upgrade_success():
xbmc.executebuiltin("XBMC.Notification(%s, %s, %d, %s)" %(__addonname__,
localise(32013),
15000,
__icon__))
def message_restart():
if dialog_yesno(32014):
xbmc.executebuiltin("RestartApp")
def dialog_yesno(line1 = 0, line2 = 0):
return xbmcgui.Dialog().yesno(__addonname__,
localise(line1),
localise(line2))
def upgrade_message(msg, oldversion, upgrade, msg_current, msg_available):
# Don't show while watching a video
while(xbmc.Player().isPlayingVideo() and not xbmc.abortRequested):
xbmc.sleep(1000)
i = 0
while(i < 5 and not xbmc.abortRequested):
xbmc.sleep(1000)
i += 1
if __addon__.getSetting("lastnotified_version") < __addonversion__:
xbmcgui.Dialog().ok(__addonname__,
localise(msg),
localise(32001),
localise(32002))
#__addon__.setSetting("lastnotified_version", __addonversion__)
else:
log("Already notified one time for upgrading.")
def upgrade_message2( version_installed, version_available, version_stable, oldversion, upgrade,):
# shorten releasecandidate to rc
if version_installed['tag'] == 'releasecandidate':
version_installed['tag'] = 'rc'
if version_available['tag'] == 'releasecandidate':
version_available['tag'] = 'rc'
# convert json-rpc result to strings for usage
msg_current = '%i.%i %s%s' %(version_installed['major'],
version_installed['minor'],
version_installed['tag'],
version_installed.get('tagversion',''))
msg_available = version_available['major'] + '.' + version_available['minor'] + ' ' + version_available['tag'] + version_available.get('tagversion','')
msg_stable = version_stable['major'] + '.' + version_stable['minor'] + ' ' + version_stable['tag'] + version_stable.get('tagversion','')
msg = localise(32034) %(msg_current, msg_available)
# Don't show notify while watching a video
while(xbmc.Player().isPlayingVideo() and not xbmc.abortRequested):
xbmc.sleep(1000)
i = 0
while(i < 10 and not xbmc.abortRequested):
xbmc.sleep(1000)
i += 1
# hack: convert current version number to stable string
# so users don't get notified again. remove in future
if __addon__.getSetting("lastnotified_version") == '0.1.24':
__addon__.setSetting("lastnotified_stable", msg_stable)
# Show different dialogs depending if there's a newer stable available.
# Also split them between xbmc and kodi notifications to reduce possible confusion.
# People will find out once they visit the website.
# For stable only notify once and when there's a newer stable available.
# Ignore any add-on updates as those only count for != stable
if oldversion == 'stable' and __addon__.getSetting("lastnotified_stable") != msg_stable:
if xbmcaddon.Addon('xbmc.addon').getAddonInfo('version') < "13.9.0":
xbmcgui.Dialog().ok(__addonname__,
msg,
localise(32030),
localise(32031))
else:
xbmcgui.Dialog().ok(__addonname__,
msg,
localise(32032),
localise(32033))
__addon__.setSetting("lastnotified_stable", msg_stable)
elif oldversion != 'stable' and __addon__.getSetting("lastnotified_version") != msg_available:
if xbmcaddon.Addon('xbmc.addon').getAddonInfo('version') < "13.9.0":
# point them to xbmc.org
xbmcgui.Dialog().ok(__addonname__,
msg,
localise(32035),
localise(32031))
else:
#use kodi.tv
xbmcgui.Dialog().ok(__addonname__,
msg,
localise(32035),
localise(32033))
# older skins don't support a text field in the OK dialog.
# let's use split lines for now. see code above.
'''
msg = localise(32034) %(msg_current, msg_available)
if oldversion == 'stable':
msg = msg + ' ' + localise(32030)
else:
msg = msg + ' ' + localise(32035)
msg = msg + ' ' + localise(32031)
xbmcgui.Dialog().ok(__addonname__, msg)
#__addon__.setSetting("lastnotified_version", __addonversion__)
'''
__addon__.setSetting("lastnotified_version", msg_available)
else:
log("Already notified one time for upgrading.") | mit |
apurvbhartia/gnuradio-routing | gr-wxgui/src/python/forms/__init__.py | 16 | 4222 | #
# Copyright 2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
The following classes will be available through gnuradio.wxgui.forms:
"""
########################################################################
# External Converters
########################################################################
from converters import \
eval_converter, str_converter, \
float_converter, int_converter
########################################################################
# External Forms
########################################################################
from forms import \
radio_buttons, drop_down, notebook, \
button, toggle_button, single_button, \
check_box, text_box, static_text, \
slider, log_slider, gauge, \
make_bold, DataEvent, EVT_DATA
########################################################################
# Helpful widgets
########################################################################
import wx
class static_box_sizer(wx.StaticBoxSizer):
"""
A box sizer with label and border.
@param parent the parent widget
@param sizer add this widget to sizer if provided (optional)
@param proportion the proportion when added to the sizer (default=0)
@param flag the flag argument when added to the sizer (default=wx.EXPAND)
@param label title label for this widget (optional)
@param bold true to boldify the label
@param orient the sizer orientation wx.VERTICAL or wx.HORIZONTAL (default=wx.VERTICAL)
"""
def __init__(self, parent, label='', bold=False, sizer=None, orient=wx.VERTICAL, proportion=0, flag=wx.EXPAND):
box = wx.StaticBox(parent=parent, label=label)
if bold: make_bold(box)
wx.StaticBoxSizer.__init__(self, box=box, orient=orient)
if sizer: sizer.Add(self, proportion, flag)
class incr_decr_buttons(wx.BoxSizer):
"""
A horizontal box sizer with a increment and a decrement button.
@param parent the parent widget
@param sizer add this widget to sizer if provided (optional)
@param proportion the proportion when added to the sizer (default=0)
@param flag the flag argument when added to the sizer (default=wx.EXPAND)
@param label title label for this widget (optional)
@param on_incr the callback for pressing the + button
@param on_decr the callback for pressing the - button
"""
def __init__(self, parent, on_incr, on_decr, label='', sizer=None, proportion=0, flag=wx.EXPAND):
"""
@param parent the parent window
@param on_incr the event handler for increment
@param on_decr the event handler for decrement
"""
wx.BoxSizer.__init__(self, wx.HORIZONTAL)
buttons_box = wx.BoxSizer(wx.HORIZONTAL)
self._incr_button = wx.Button(parent, label='+', style=wx.BU_EXACTFIT)
self._incr_button.Bind(wx.EVT_BUTTON, on_incr)
buttons_box.Add(self._incr_button, 0, wx.ALIGN_CENTER_VERTICAL)
self._decr_button = wx.Button(parent, label=' - ', style=wx.BU_EXACTFIT)
self._decr_button.Bind(wx.EVT_BUTTON, on_decr)
buttons_box.Add(self._decr_button, 0, wx.ALIGN_CENTER_VERTICAL)
if label: #add label
self.Add(wx.StaticText(parent, label='%s: '%label), 1, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_LEFT)
self.Add(buttons_box, 0, wx.ALIGN_CENTER_VERTICAL | wx.ALIGN_RIGHT)
else: self.Add(buttons_box, 0, wx.ALIGN_CENTER_VERTICAL)
if sizer: sizer.Add(self, proportion, flag)
def Disable(self, disable=True): self.Enable(not disable)
def Enable(self, enable=True):
if enable:
self._incr_button.Enable()
self._decr_button.Enable()
else:
self._incr_button.Disable()
self._decr_button.Disable()
| gpl-3.0 |
oasiswork/odoo | openerp/addons/base/tests/test_qweb.py | 289 | 4814 | # -*- coding: utf-8 -*-
import cgi
import json
import os.path
import glob
import re
import collections
from lxml import etree
import openerp.addons.base.ir.ir_qweb
import openerp.modules
from openerp.tests import common
from openerp.addons.base.ir import ir_qweb
class TestQWebTField(common.TransactionCase):
def setUp(self):
super(TestQWebTField, self).setUp()
self.engine = self.registry('ir.qweb')
def context(self, values):
return ir_qweb.QWebContext(
self.cr, self.uid, values, context={'inherit_branding': True})
def test_trivial(self):
field = etree.Element('span', {'t-field': u'company.name'})
Companies = self.registry('res.company')
company_id = Companies.create(self.cr, self.uid, {
'name': "My Test Company"
})
result = self.engine.render_node(field, self.context({
'company': Companies.browse(self.cr, self.uid, company_id),
}))
self.assertEqual(
result,
'<span data-oe-model="res.company" data-oe-id="%d" '
'data-oe-field="name" data-oe-type="char" '
'data-oe-expression="company.name">%s</span>' % (
company_id,
"My Test Company",))
def test_i18n(self):
field = etree.Element('span', {'t-field': u'company.name'})
Companies = self.registry('res.company')
s = u"Testing «ταБЬℓσ»: 1<2 & 4+1>3, now 20% off!"
company_id = Companies.create(self.cr, self.uid, {
'name': s,
})
result = self.engine.render_node(field, self.context({
'company': Companies.browse(self.cr, self.uid, company_id),
}))
self.assertEqual(
result,
'<span data-oe-model="res.company" data-oe-id="%d" '
'data-oe-field="name" data-oe-type="char" '
'data-oe-expression="company.name">%s</span>' % (
company_id,
cgi.escape(s.encode('utf-8')),))
def test_reject_crummy_tags(self):
field = etree.Element('td', {'t-field': u'company.name'})
with self.assertRaisesRegexp(
AssertionError,
r'^RTE widgets do not work correctly'):
self.engine.render_node(field, self.context({
'company': None
}))
def test_reject_t_tag(self):
field = etree.Element('t', {'t-field': u'company.name'})
with self.assertRaisesRegexp(
AssertionError,
r'^t-field can not be used on a t element'):
self.engine.render_node(field, self.context({
'company': None
}))
class TestQWeb(common.TransactionCase):
matcher = re.compile('^qweb-test-(.*)\.xml$')
@classmethod
def get_cases(cls):
path = cls.qweb_test_file_path()
return (
cls("test_qweb_{}".format(cls.matcher.match(f).group(1)))
for f in os.listdir(path)
# js inheritance
if f != 'qweb-test-extend.xml'
if cls.matcher.match(f)
)
@classmethod
def qweb_test_file_path(cls):
path = os.path.dirname(
openerp.modules.get_module_resource(
'web', 'static', 'lib', 'qweb', 'qweb2.js'))
return path
def __getattr__(self, item):
if not item.startswith('test_qweb_'):
raise AttributeError("No {} on {}".format(item, self))
f = 'qweb-test-{}.xml'.format(item[10:])
path = self.qweb_test_file_path()
return lambda: self.run_test_file(os.path.join(path, f))
def run_test_file(self, path):
context = openerp.addons.base.ir.ir_qweb.QWebContext(self.cr, self.uid, {})
qweb = self.env['ir.qweb']
doc = etree.parse(path).getroot()
qweb.load_document(doc, None, context)
for template in context.templates:
if template.startswith('_'): continue
param = doc.find('params[@id="{}"]'.format(template))
# OrderedDict to ensure JSON mappings are iterated in source order
# so output is predictable & repeatable
params = {} if param is None else json.loads(param.text, object_pairs_hook=collections.OrderedDict)
ctx = context.copy()
ctx.update(params)
result = doc.find('result[@id="{}"]'.format(template)).text
self.assertEqual(
qweb.render(template, qwebcontext=ctx).strip(),
(result or u'').strip().encode('utf-8'),
template
)
def load_tests(loader, suite, _):
# can't override TestQWeb.__dir__ because dir() called on *class* not
# instance
suite.addTests(TestQWeb.get_cases())
return suite
| agpl-3.0 |
carsongee/edx-platform | cms/djangoapps/contentstore/management/commands/course_id_clash.py | 18 | 2127 | """
Script for finding all courses whose org/name pairs == other courses when ignoring case
"""
from django.core.management.base import BaseCommand
from xmodule.modulestore.django import modulestore
from xmodule.modulestore import ModuleStoreEnum
#
# To run from command line: ./manage.py cms --settings dev course_id_clash
#
class Command(BaseCommand):
"""
Script for finding all courses in the Mongo Modulestore whose org/name pairs == other courses when ignoring case
"""
help = 'List all courses ids in the Mongo Modulestore which may collide when ignoring case'
def handle(self, *args, **options):
mstore = modulestore()._get_modulestore_by_type(ModuleStoreEnum.Type.mongo) # pylint: disable=protected-access
if hasattr(mstore, 'collection'):
map_fn = '''
function () {
emit(this._id.org.toLowerCase()+this._id.course.toLowerCase(), {target: this._id});
}
'''
reduce_fn = '''
function (idpair, matches) {
var result = {target: []};
matches.forEach(function (match) {
result.target.push(match.target);
});
return result;
}
'''
finalize = '''
function(key, reduced) {
if (Array.isArray(reduced.target)) {
return reduced;
}
else {return null;}
}
'''
results = mstore.collection.map_reduce(
map_fn, reduce_fn, {'inline': True}, query={'_id.category': 'course'}, finalize=finalize
)
results = results.get('results')
for entry in results:
if entry.get('value') is not None:
print '{:-^40}'.format(entry.get('_id'))
for course_id in entry.get('value').get('target'):
print ' {}/{}/{}'.format(course_id.get('org'), course_id.get('course'), course_id.get('name'))
| agpl-3.0 |
codingcommando/tmtp | standalone/New_Jeans2.py | 1 | 23537 | #!/usr/bin/env python
# New_Jeans2.py
# PatternMaker: Susan Spencer Conklin
# pants shell pattern
from tmtpl.constants import *
from tmtpl.pattern import *
from tmtpl.client import Client
from math import sqrt
class PatternDesign():
def __init__(self):
self.styledefs={}
self.markerdefs={}
return
def pattern(self):
"""
Method defining a pattern design. This is where the designer places
all elements of the design definition
"""
# All measurements are converted to pixels
# x increases towards right, y increases towards bottom of drawing - Quadrant is 'upside down'
# All angles are in radians
# angles start with 0 at '3:00', & move clockwise b/c quadrant is 'upside down'
cd = self.cd #client data is prefaced with cd
printer = '36" wide carriage plotter'
companyName = 'Seamly Patterns' # mandatory
designerName = 'Susan Spencer' # mandatory
patternName = 'pants Foundation' # mandatory
patternNumber = 'WS010-xj1-1' # mandatory
doc = setupPattern(self, cd, printer, companyName, designerName, patternName, patternNumber)
riseLine = cd.side_rise + (1*IN) # 1" sitting ease from hipline to riseline
hipLine = cd.front_hip_length # don't add 1" extra
hemLine = riseLine + cd.inseam
kneeLine = riseLine + cd.inseam/2. - (1*IN) # kneeline is 1" above midleg
# TODO - choose if using thick fabric
#seamEase = (1/16.0) * IN # 1/16" seam ease for thick fabric, 0 if not
seamEase = 0
waistLine = (1*IN) # Jeans waist is 1" lower than actual waist
frontDartWidth = 0.5*IN
frontDartLength = hipLine/2.
backDartWidth = 0.75*IN
backDartLength = hipLine*2/3.
waistBand = 1*IN # Height of waistBand
backKneeWidth = 10*IN
backHemWidth = 8*IN
frontKneeWidth = 8*IN
frontHemWidth = 7*IN
# pattern object
pants = Pattern('pants')
pants.styledefs.update(self.styledefs)
pants.markerdefs.update(self.markerdefs)
doc.add(pants)
# pants Front A
pants.add(PatternPiece('pattern', 'front', letter='A', fabric=2, interfacing=0, lining=0))
A = pants.front
top = 0.001 # can't use 0 in some calculations
side = 0.001
center = max(cd.front_waist_width*0.5, cd.front_hip_width*0.5)
width = center + cd.front_crotch_extension
creaseLine = width/2.0
TOPLEFT = pPoint(side, top)
TOPRIGHT = pPoint(center, top)
#a = pPoint(center, waistLine) # center waist
a = pPoint(center, riseLine - cd.front_rise - 1*IN) # center waist
#b = pPoint(center - cd.front_waist_width*0.5 - frontDartWidth - 2*seamEase, top) # side waist
radius = cd.front_waist_width*0.5 + frontDartWidth
Solution = pntIntersectLineCircleP(a, radius, TOPLEFT, TOPRIGHT) # returns pnt.intersections, pnt.p1, pnt.p2
if Solution.intersections == 1:
b = Solution.p1
elif Solution.intersections == 2:
if Solution.p1.x < a.x :
b = Solution.p1
else:
b = Solution.p2
#TODO - change angle of dart to be perpendicular to line ab
#pnt = pMidpointP(a, b) # dart center at waist along line ab
#c = pPoint(pnt.x, pnt.y + 0.25*IN) # lower dart center by 1/4in
c = pPointP(pMidpointP(a, b)) # dart center at waist along line ab
d = pPoint(c.x + frontDartWidth/2.0, c.y) # dart inside at waist
e = pPoint(c.x - frontDartWidth/2.0, c.y) # dart outside at waist
f = pPoint(c.x, c.y + frontDartLength) # dart point
angle = angleOfLineP(f, d) + angleOfVectorP(c, f, d)
g = pntFromDistanceAndAngleP(f, frontDartLength, angle) # on angle of sewn dart fold, after folded toward center
h = pPoint(center, riseLine/2.0) # center front 'pivot' point from crotch curve to front fly
i = pPoint(side, hipLine) # side hip
j = pPoint(center, hipLine) # center hip
k = pPoint(side, riseLine) # side rise
l = pPoint(center, riseLine) # center rise
m = pntFromDistanceAndAngleP(l, (1.25*IN), angleOfDegree(315.0)) # center crotch curve
n = pPoint(l.x + cd.front_crotch_extension, riseLine) # center crotch point
o = pPoint(creaseLine - frontKneeWidth/2.0, kneeLine) # inside knee
p = pPoint(creaseLine + frontKneeWidth/2.0, kneeLine) # outside knee
q = pPoint(creaseLine - frontHemWidth/2.0, hemLine) # inside hem
r = pPoint(creaseLine + frontHemWidth/2.0, hemLine) # outside hem
pnt1 = pntOnLineP(a, h, waistBand)
pnt2 = pntOnLineP(d, f, waistBand)
pnt3 = pntOnLineP(e, f, waistBand)
pnt4 = pntOnLineP(b, i, waistBand)
t1 = pntIntersectLinesP(pnt1, pnt2, a, h) # waistBand at center
u1 = pntIntersectLinesP(pnt1, pnt2, d, f) # waistBand at inside dart
v1 = pntIntersectLinesP(pnt3, pnt4, e, f) # waistBand at outside dart
w1 = pntIntersectLinesP(pnt3, pnt4, b, i) # waistBand at side
Side = rPoint(A, 'Side', side, top)
Center = rPoint(A, 'Center', center, top)
Inseam = rPoint(A, 'Inseam', width, top)
# front waist AW
AW1 = rPointP(A, 'AW1', a) # center waist
AW2 = rPointP(A, 'AW2', d) # inside dart
AW4 = rPointP(A, 'AW4', e) # outside dart
AW5 = rPointP(A, 'AW5', b) # side waist
# front waist control points
AW2_c1 = cPointP(A, 'AW2_c1', pntFromDistanceAndAngleP(AW1, lineLengthP(AW1, AW2)/3.0, angleOfLineP(j, AW1) - angleOfDegree(90))) # b/w AW1 & AW2
AW2_c2 = cPointP(A, 'AW2_c2', pntFromDistanceAndAngleP(AW2, lineLengthP(AW1, AW2)/3.0, angleOfLineP(f, AW2) + angleOfDegree(90))) # b/w AW1 & AW2
AW5_c1 = cPointP(A, 'AW5_c1', pntFromDistanceAndAngleP(AW4, lineLengthP(AW4, AW5)/3.0, angleOfLineP(f, AW4) - angleOfDegree(90))) # b/w AW4 & AW5
AW5_c2 = cPointP(A, 'AW5_c2', pntFromDistanceAndAngleP(AW5, lineLengthP(AW4, AW5)/3.0, angleOfLineP(i, AW5) + angleOfDegree(90))) # b/w AW4 & AW5
u1_c1 = cPointP(A, 'u1_c1', pntFromDistanceAndAngleP(t1, lineLengthP(t1, u1)/3.0, angleOfLineP(t1, AW1) - angleOfDegree(90))) # b/w t1 & u1
u1_c2 = cPointP(A, 'u1_c2', pntFromDistanceAndAngleP(u1, lineLengthP(t1, u1)/3.0, angleOfLineP(f, u1) + angleOfDegree(90))) # b/w t1 & u1
w1_c1 = cPointP(A, 'w1_c1', pntFromDistanceAndAngleP(v1, lineLengthP(v1, w1)/3.0, angleOfLineP(f, v1) - angleOfDegree(90))) # b/w v1 & w1
w1_c2 = cPointP(A, 'w1_c2', pntFromDistanceAndAngleP(w1, lineLengthP(v1, w1)/3.0, angleOfLineP(w1, AW5) + angleOfDegree(90))) # b/w v1 & w1
pnt1 = rPointP(A, 'pnt1', pntIntersectLinesP(f, g, AW2, AW2_c2)) # where sewn dart fold should cross waistline before folding
pnt2 = rPointP(A, 'pnt2', pntFromDistanceAndAngleP(AW4, lineLengthP(AW4, pnt1), angleOfLineP(AW2, pnt1) - angleOfVectorP(c, f, d)))
pnt3 = rPointP(A, 'pnt3', pntIntersectLinesP(f, pnt1, AW4, pnt2))
AW3 = rPointP(A, 'AW3', pntOnLineP(f, c, lineLengthP(f, pnt3))) # extend dart center up to make sewn dart fold cross waistline
# front dart AD
AD1 = rPointP(A, 'AD1', f) # dart point
AD2 = rPointP(A, 'AD2', pntOffLineP(d, AD1, SEAM_ALLOWANCE)) # inside dart at cuttingline
AD3 = rPointP(A, 'AD3', pntOffLineP(e, AD1, SEAM_ALLOWANCE)) # outside dart at cuttingline
# front side seam AS
AS1 = rPointP(A, 'AS1', i)
AS2 = rPointP(A, 'AS2', o)
AS3 = rPointP(A, 'AS3', q)
# front side seam control points cAS
# control points next to AS1 form a vertical line at AS1.x, control point nearest AS2 is along line of hem to knee so that seam curves continuously into straight seam from knee to hem
distance = lineLengthP(AS1, AW5)/4.0 # shorter control point line = flatter curve between waist & hip
AS1_c2 = cPoint(A, 'AS1_c2', AS1.x, AS1.y - distance) # b/w AW5 & AS1
angle = angleOfLineP(AW5, AS1_c2)
AS1_c1 = cPointP(A, 'AS1_c1', pntFromDistanceAndAngleP(AW5, distance, angle)) # b/w AW5 & AS1
distance = lineLengthP(AS1, AS2)/3.0
AS2_c1 = cPoint(A, 'AS2_c1', AS1.x, AS1.y + distance) # b/w AS1 & AS2
angle = angleOfLineP(AS3, AS2)
AS2_c2 = cPointP(A, 'AS2_c2', pntFromDistanceAndAngleP(AS2, distance, angle)) #b/w AS1 & AS2
# front inseam AI
AI1 = rPointP(A, 'AI1', r)
AI2 = rPointP(A, 'AI2', p)
AI3 = rPointP(A, 'AI3', n)
# front inseam control points cAI
AI3_c1 = cPointP(A, 'AI3_c1', pntOffLineP(AI2, AI1, lineLengthP(AI2, AI3)/3.0)) #b/w AI2 & AI3
AI3_c2 = cPointP(A, 'AI3_c2', pntOnLineP(AI3, AI3_c1, lineLengthP(AI2, AI3)/3.0)) #b/w AI2 & AI3
#front center seam AC
AC1 = rPointP(A, 'AC1', m)
AC2 = rPointP(A, 'AC2', h)
# front center seam control points cAC
AC2_c2 = cPointP(A, 'AC2_c2', pntOffLineP(AC2, AW1, lineLengthP(l, AC2)*(5/8.0)))
pnts = pointList(AI3, AC1, AC2_c2)
c1, c2 = controlPoints('FrontCenterSeam', pnts)
AC1_c1, AC1_c2 = cPointP(A, 'AC1_c1', c1[0]), cPointP(A, 'AC1_c2', c2[0]) #b/w AI3 & AC1
AC2_c1 = cPointP(A, 'AC2_c1', c1[1]) #b/w AC1 & AC2
#front grainline AG & label location
AG1 = rPoint(A, 'AG1', creaseLine, hipLine)
AG2 = rPoint(A, 'AG2', creaseLine, hemLine - 2.0*IN)
# front label location
A.label_x, A.label_y = creaseLine, hipLine - 2.0*IN
#grid path
grid = path()
addToPath(grid, 'M', Side, 'L', k, 'L', n, 'L', Inseam, 'L', Side, 'M', AS1, 'L', j, 'M', Center, 'L', l , 'L', m)
addToPath(grid, 'M', AW1, 'L', AW5,'M', AW1, 'L', AW2, 'M', AW4, 'L', AW5, 'M', t1, 'L', u1, 'M', v1, 'L', w1) # waist grid lines
# dart 'd' path
dartLine = path()
addToPath(dartLine, 'M', AD2, 'L', AD1, 'L', AD3)
# seamline 's' & cuttingline 'c' paths
seamLine = path()
cuttingLine = path()
for p in (seamLine, cuttingLine):
addToPath(p, 'M', AW1, 'C', AW2_c1, AW2_c2, AW2, 'L', AW3, 'L', AW4, 'C', AW5_c1, AW5_c2, AW5) # waist
addToPath(p, 'C', AS1_c1, AS1_c2, AS1, 'C', AS2_c1, AS2_c2, AS2, 'L', AS3) # side
addToPath(p, 'L', AI1, 'L', AI2, 'C', AI3_c1, AI3_c2, AI3) # inseam
addToPath(p, 'C', AC1_c1, AC1_c2, AC1, 'C', AC2_c1, AC2_c2, AC2, 'L', AW1) # center
# add grainline, dart, seamline & cuttingline paths to pattern
addGrainLine(A, AG1, AG2)
addGridLine(A, grid)
addDartLine(A, dartLine)
addSeamLine(A, seamLine)
addCuttingLine(A, cuttingLine)
# pants Back 'B'
#TODO - change angle of dart to be perpendicular to waistline
#TODO - use side_rise and back_rise to create reference grid
#TODO - use back_hip_length and crotch waist-to-waist measurements
pants.add(PatternPiece('pattern', 'back', letter='B', fabric=2, interfacing=0, lining=0))
B = pants.back
top = 0.001
crotch = 0.001
center = cd.back_crotch_extension
width = center + max(cd.back_hip_width*0.5, cd.back_waist_width*0.5)
side = width
creaseLine = width/2.0
Inseam = rPoint(B, 'Inseam', crotch, top)
Center = rPoint(B, 'Center', center, top)
Width = rPoint(B, 'Width', width, top)
Side = rPointP(B, 'Side', Width)
a = pPoint(center + (1+(1/8.))*IN, top - (1.*IN)) # center waist
b = pPoint(center + cd.back_waist_width*0.5 + backDartWidth, top) # side waist
pnt = pntOnLineP(a, b, lineLengthP(a, b)/2.0)
c = pPoint(pnt.x, pnt.y + (1/4.0)*IN) # dart center at waist along line ab
d = pPoint(c.x - backDartWidth/2.0, c.y) # dart inside at waist
e = pPoint(c.x + backDartWidth/2.0, c.y) # dart outside at waist
f = pPoint(c.x, c.y + backDartLength) # dart point
angle = angleOfLineP(f, d) - angleOfVectorP(c, f, d)
g = pntFromDistanceAndAngleP(f, backDartLength, angle) # on angle of sewn dart fold, after folded toward center
h = pPoint(center, riseLine/2.0) # center front 'pivot' point from crotch curve to front fly
i = pPoint(side, hipLine) # side hip
j = pPoint(center, hipLine) # center hip
k = pPoint(side, riseLine) # side rise
l = pPoint(center, riseLine) # center rise
m = pntFromDistanceAndAngleP(l, (1.25*IN), angleOfDegree(225.0)) # center crotch curve
n = pPoint(crotch, riseLine) # center crotch point
o = pPoint(creaseLine - backKneeWidth/2.0, kneeLine) # inside knee
p = pPoint(creaseLine + backKneeWidth/2.0, kneeLine) # outside knee
q = pPoint(creaseLine - backHemWidth/2.0, hemLine) # inside hem
r = pPoint(creaseLine + backHemWidth/2.0, hemLine) # outside hem
pnt1 = pPoint(a.x, a.y + waistBand)
pnt2 = pPoint(d.x, d.y + waistBand)
pnt3 = pPoint(e.x, e.y + waistBand)
pnt4 = pPoint(b.x, b.y + waistBand)
t2 = rPointP(B, 't2', pntIntersectLinesP(pnt1, pnt2, a, h)) # waistBand at center
u2 = rPointP(B, 'u2', pntIntersectLinesP(pnt1, pnt2, d, f)) # waistBand at inside dart
v2 = rPointP(B, 'v2', pntIntersectLinesP(pnt3, pnt4, e, f)) # waistBand at outside dart
w2 = rPointP(B, 'w2', pntIntersectLinesP(pnt3, pnt4, b, i)) # waistBand at side
# back waist BW
BW1 = rPointP(B, 'BW1', a) # center waist
BW2 = rPointP(B, 'BW2', d) # inside dart
BW4 = rPointP(B, 'BW4', e) # outside dart
BW5 = rPointP(B, 'BW5', b) # side waist
# back waist control points
BW2_c1 = cPointP(B, 'BW2_c1', pntFromDistanceAndAngleP(BW1, lineLengthP(BW1, BW2)/3.0, angleOfLineP(j, BW1) + angleOfDegree(90)))
BW2_c2 = cPointP(B, 'BW2_c2', pntFromDistanceAndAngleP(BW2, lineLengthP(BW1, BW2)/3.0, angleOfLineP(f, BW2) - angleOfDegree(90)))
BW5_c1 = cPointP(B, 'BW5_c1', pntFromDistanceAndAngleP(BW4, lineLengthP(BW4, BW5)/3.0, angleOfLineP(f, BW4) + angleOfDegree(90)))
BW5_c2 = cPointP(B, 'BW5_c2', pntFromDistanceAndAngleP(BW5, lineLengthP(BW4, BW5)/3.0, angleOfLineP(i, BW5) - angleOfDegree(90)))
u2_c1 = cPointP(B, 'u2_c1', pntFromDistanceAndAngleP(t2, lineLengthP(t2, u2)/3.0, angleOfLineP(t2, BW1) + angleOfDegree(90))) # b/w t2 & u2
u2_c2 = cPointP(B, 'u2_c2', pntFromDistanceAndAngleP(u2, lineLengthP(t2, u2)/3.0, angleOfLineP(u2, BW2) - angleOfDegree(90))) # b/w t2 & u2
w2_c1 = cPointP(B, 'w2_c1', pntFromDistanceAndAngleP(v2, lineLengthP(v2, w2)/3.0, angleOfLineP(f, v2) + angleOfDegree(90))) # b/w v2 & w2
w2_c2 = cPointP(B, 'w2_c2', pntFromDistanceAndAngleP(w2, lineLengthP(v2, w2)/3.0, angleOfLineP(w2, BW5) - angleOfDegree(90))) # b/w v2 & w2
# back dart BD
pnt1 = rPointP(B, 'pnt1', pntIntersectLinesP(f, g, BW2, BW2_c2)) # where sewn dart fold should cross waistline before folding
pnt2 = rPointP(B, 'pnt2', pntFromDistanceAndAngleP(BW4, lineLengthP(BW4, pnt1), angleOfLineP(BW2, pnt1) + angleOfVectorP(c, f, d)))
pnt3 = rPointP(B, 'pnt3', pntIntersectLinesP(f, pnt1, BW4, pnt2))
BW3 = rPointP(B, 'BW3', pntOnLineP(f, c, lineLengthP(f, pnt3))) # extend dart center up to make sewn dart fold cross waistline
BD1 = rPointP(B, 'BD1', f) # dart point
BD2 = rPointP(B, 'BD2', pntOffLineP(d, BD1, SEAM_ALLOWANCE)) # inside dart at cuttingline
BD3 = rPointP(B, 'BD3', pntOffLineP(e, BD1, SEAM_ALLOWANCE)) # outside dart at cuttingline
# back side seam BS
BS1 = rPointP(B, 'BS1', i) # side hip
BS2 = rPointP(B, 'BS2', p) # outside knee
BS3 = rPointP(B, 'BS3', r) # outside hem
# back side seam control points
# control points at hip are vertical
distance = lineLengthP(BS1, BW5)/4.0# shorter control point line = flatter curve between waist & hip
BS1_c2 = cPoint(B, 'BS1_c2', BS1.x, BS1.y - distance) # b/w BW5 & BS1
angle = angleOfLineP(BW5, BS1_c2)
BS1_c1 = cPointP(B, 'BS1_c1', pntFromDistanceAndAngleP(BW5, distance, angle)) # b/w BW5 & BS1
distance = lineLengthP(BS1, BS2)/3.0
BS2_c1 = cPoint(B, 'BS2_c1', BS1.x, BS1.y + distance) # b/w BS1 & BS2
angle = angleOfLineP(BS3, BS2)
BS2_c2 = cPointP(B, 'BS2_c2', pntFromDistanceAndAngleP(BS2, distance, angle)) #b/w BS1 & BS2
# back inseam BI
BI1 = rPointP(B, 'BI1', q) # inseam hem
BI2 = rPointP(B, 'BI2', o) # inseam knee
BI3 = rPointP(B, 'BI3', n) # crotch point
# back inseam control points
BI3_c1 = cPointP(B, 'BI3_c1', pntOffLineP(BI2, BI1, lineLengthP(BI2, BI3)/3.0)) #b/w BI2 & BI3
BI3_c2 = cPointP(B, 'BI3_c2', pntOnLineP(BI3, BI3_c1, lineLengthP(BI2, BI3)/3.0)) #b/w BI2 & BI
# back center seam BC
BC1 = rPointP(B, 'BC1', m) # crotch curve
BC2 = rPointP(B, 'BC2', j) # center hip
# back center seam control points
BC2_c2 = cPointP(B, 'BC2_c2', pntOffLineP(BC2, BW1, lineLengthP(l, BC2)/3.0))
BC2_c1 = cPointP(B, 'BC2_c1', pntFromDistanceAndAngleP(BC1, lineLengthP(BC1, BC2_c2)/3.0, angleOfLineP(BI3, BC2))) # b/w BC1 & BC2
distance = lineLengthP(BI3, BC1)/3.0
BC1_c1 = cPoint(B, 'BC1_c1', BI3.x + distance, BI3.y) #b/w BI3 & BC1
BC1_c2 = cPointP(B, 'BC1_c2', pntFromDistanceAndAngleP(BC1, distance, angleOfLineP(BC2, BI3)))
# back grainline BG
BG1 = rPoint(B, 'BG1', creaseLine, hipLine) # grainline end 1
BG2 = rPoint(B, 'BG2', creaseLine, hemLine - 2.0*IN) # grainline end 2
# back label location
B.label_x, B.label_y = creaseLine, (hipLine - 2.0*IN) # label location
# grid
grid = path()
addToPath(grid, 'M', Inseam, 'L', Width, 'L', k, 'L', n, 'L', Inseam, 'M', Center, 'L', l, 'M', i, 'L', j) # horizontal & vertical: torso box, centerline, hipline
addToPath(grid, 'M', l, 'L', m, 'M', BW1, 'L', BW5, 'M', BD2, 'L', BD1, 'L', BD3) # diagonal: crotch curve, waistline, dartline
addToPath(grid, 'M',BW1, 'L', BW2, 'M', BW4, 'L', BW5, 'M', t2, 'L', u2, 'M', v2, 'L', w2) # line to create waistband pattern piece
# dart 'd' path
dartLine = path()
addToPath(dartLine, 'M', BD2, 'L', BD1, 'L', BD3)
# seamline 's' & cuttingline 'c' paths
seamLine = path()
cuttingLine = path()
for p in (seamLine, cuttingLine):
addToPath(p, 'M', BW1, 'C', BW2_c1, BW2_c2, BW2, 'L', BW3, 'L', BW4, 'C', BW5_c1, BW5_c2, BW5) # waist
addToPath(p, 'C', BS1_c1, BS1_c2, BS1, 'C', BS2_c1, BS2_c2, BS2, 'L', BS3) # side
addToPath(p, 'L', BI1, 'L', BI2, 'C', BI3_c1, BI3_c2, BI3) # inseam
addToPath(p, 'C', BC1_c1, BC1_c2, BC1, 'C', BC2_c1, BC2_c2, BC2, 'L', BW1) # center
# add grid, dart, grainline, seamline & cuttingline paths to pattern
addGrainLine(B, BG1, BG2)
addGridLine(B, grid)
addDartLine(B, dartLine)
addSeamLine(B, seamLine)
addCuttingLine(B, cuttingLine)
# Waistfacing 'C'
pants.add(PatternPiece('pattern', 'Waistfacing', letter='C', fabric=0, interfacing=2, lining=2))
C = pants.Waistfacing
top = 0.0
width = cd.front_waist_width*0.5 + cd.back_waist_width*0.5
# Waistfacing front center section
# lower section
CW1 = rPointP(C, 'CW1', t1)
CW2 = rPointP(C, 'CW2', u1)
# upper section
CW9 = rPointP(C, 'CW9', AW2)
CW10 = rPointP(C, 'CW10', AW1)
# Waistfacing front side section
connectorPoints = pointList(CW9, CW2) # 2 connector points from waistfacing above, upper = CW9, lower = CW2
moveObject = pointList(AW4, v1, w1, AW5) # front side section, outside of dart. 1st 2 points connect to connectorPoints
new_pnts = connectObjects(connectorPoints, moveObject) # translate & rotate front side section
# skip AW4/new_pnts[0] & v1/new_pnts[1], same as CW9 & CW2
CW3 = rPointP(C, 'CW3', new_pnts[2]) # lower points
CW8 = rPointP(C, 'CW8', new_pnts[3]) # upper points
# Waistfacing back side section
connectorPoints = pointList(CW8, CW3) # 2 connector points from waistfacing above, upper = CW8, lower = CW3
moveObject = pointList(BW5, w2, v2, BW4)
new_pnts = connectObjects(connectorPoints, moveObject)
# skip BW5/new_pnts[0] & w2/new_pnts[1], same as CW8 & CW3
CW4 = rPointP(C, 'CW4', new_pnts[2]) # lower points
CW7 = rPointP(C, 'CW7', new_pnts[3]) # upper points
# Waistfacing back center section
connectorPoints = pointList(CW7, CW4) # 2 connector points from waistfacing above, upper = CW7, lower = CW4
moveObject = pointList(BW2, u2, t2, BW1)
new_pnts = connectObjects(connectorPoints, moveObject)
# skip BW2/new_pnts[0] & u2/new_pnts[1], same as CW7 & CW4
CW5 = rPointP(C, 'CW5', new_pnts[2]) # lower points
CW6 = rPointP(C, 'CW6', new_pnts[3]) # upper points
# Waistfacing control points
# lower
pnts = pointList(CW1, CW2, CW3, CW4, CW5)
c1, c2 = controlPoints('WaistfacingLower', pnts)
CW2_c1, CW2_c2 = cPointP(C, 'CW2_c1', c1[0]), cPointP(C, 'CW2_c2', c2[0]) # b/w CW1 & CW2
CW3_c1, CW3_c2 = cPointP(C, 'CW3_c1', c1[1]), cPointP(C, 'CW3_c2', c2[1]) # b/w CW2 & CW3
CW4_c1, CW4_c2 = cPointP(C, 'CW4_c1', c1[2]), cPointP(C, 'CW4_c2', c2[2]) # b/w CW2 & CW4
CW5_c1, CW5_c2 = cPointP(C, 'CW5_c1', c1[3]), cPointP(C, 'CW5_c2', c2[3]) # b/w CW4 & CW5
# upper
pnts = pointList(CW6, CW7, CW8, CW9, CW10)
c1, c2 = controlPoints('WaistfacingUpper', pnts)
CW7_c1, CW7_c2 = cPointP(C, 'CW7_c1', c1[0]), cPointP(C, 'CW7_c2', c2[0]) # b/w CW6 & CW7
CW8_c1, CW8_c2 = cPointP(C, 'CW8_c1', c1[1]), cPointP(C, 'CW8_c2', c2[1]) # b/w CW7 & CW8
CW9_c1, CW9_c2 = cPointP(C, 'CW9_c1', c1[2]), cPointP(C, 'CW9_c2', c2[2]) # b/w CW8 & CW9
CW10_c1, CW10_c2 = cPointP(C, 'CW10_c1', c1[3]), cPointP(C, 'CW10_c2', c2[3]) # b/w CW9 & CW10
# grainline points & label location
CG1 = rPoint(C, 'CG1', CW6.x, CW6.y + (abs(CW6.y - CW7.y)/2.0))
CG2 = rPointP(C, 'CG2', pntFromDistanceAndAngleP(CG1, 6.5*IN, angleOfDegree(45.0)))
C.label_x, C.label_y = CW6.x + 0.25*IN, CW6.y + 0.25*IN
# waistfacing grid
grid = path()
addToPath(grid, 'M', CW1, 'L', CW2, 'L', CW3, 'L', CW4, 'L', CW5, 'L', CW6, 'L', CW7, 'L', CW8, 'L', CW9, 'L', CW10, 'L', CW1)
seamLine = path()
cuttingLine = path()
for p in seamLine, cuttingLine:
addToPath(p, 'M', CW1, 'C', CW2_c1, CW2_c2, CW2, 'C', CW3_c1, CW3_c2, CW3, 'C', CW4_c1, CW4_c2, CW4, 'C', CW5_c1, CW5_c2, CW5) # lower waistband
addToPath(p, 'L', CW6, 'C', CW7_c1, CW7_c2, CW7, 'C', CW8_c1, CW8_c2, CW8, 'C', CW9_c1, CW9_c2, CW9, 'C', CW10_c1, CW10_c2, CW10, 'L', CW1) # upper waistband
# add grid, grainline, seamline & cuttingline paths to pattern
addGrainLine(C, CG1, CG2)
addGridLine(C, grid)
addSeamLine(C, seamLine)
addCuttingLine(C, cuttingLine)
# call draw once for the entire pattern
doc.draw()
return
# vi:set ts=4 sw=4 expandtab:
| gpl-3.0 |
twinaphex/pcsx2 | 3rdparty/portaudio/doc/utils/checkfiledocs.py | 70 | 2415 | import os
import os.path
import string
paRootDirectory = '../../'
paHtmlDocDirectory = os.path.join( paRootDirectory, "doc", "html" )
## Script to check documentation status
## this script assumes that html doxygen documentation has been generated
##
## it then walks the entire portaudio source tree and check that
## - every source file (.c,.h,.cpp) has a doxygen comment block containing
## - a @file directive
## - a @brief directive
## - a @ingroup directive
## - it also checks that a corresponding html documentation file has been generated.
##
## This can be used as a first-level check to make sure the documentation is in order.
##
## The idea is to get a list of which files are missing doxygen documentation.
# recurse from top and return a list of all with the given
# extensions. ignore .svn directories. return absolute paths
def recursiveFindFiles( top, extensions, includePaths ):
result = []
for (dirpath, dirnames, filenames) in os.walk(top):
if not '.svn' in dirpath:
for f in filenames:
if os.path.splitext(f)[1] in extensions:
if includePaths:
result.append( os.path.abspath( os.path.join( dirpath, f ) ) )
else:
result.append( f )
return result
# generate the html file name that doxygen would use for
# a particular source file. this is a brittle conversion
# which i worked out by trial and error
def doxygenHtmlDocFileName( sourceFile ):
return sourceFile.replace( '_', '__' ).replace( '.', '_8' ) + '.html'
sourceFiles = recursiveFindFiles( paRootDirectory, [ '.c', '.h', '.cpp' ], True );
docFiles = recursiveFindFiles( paHtmlDocDirectory, [ '.html' ], False );
currentFile = ""
def printError( f, message ):
global currentFile
if f != currentFile:
currentFile = f
print f, ":"
print "\t!", message
for f in sourceFiles:
if not doxygenHtmlDocFileName( os.path.basename(f) ) in docFiles:
printError( f, "no doxygen generated doc page" )
s = file( f, 'rt' ).read()
if not '/**' in s:
printError( f, "no doxygen /** block" )
if not '@file' in s:
printError( f, "no doxygen @file tag" )
if not '@brief' in s:
printError( f, "no doxygen @brief tag" )
if not '@ingroup' in s:
printError( f, "no doxygen @ingroup tag" )
| lgpl-3.0 |
Asana/boto | tests/unit/vpc/test_vpc.py | 100 | 13752 | # -*- coding: UTF-8 -*-
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.vpc import VPCConnection, VPC
from boto.ec2.securitygroup import SecurityGroup
DESCRIBE_VPCS = b'''<?xml version="1.0" encoding="UTF-8"?>
<DescribeVpcsResponse xmlns="http://ec2.amazonaws.com/doc/2013-02-01/">
<requestId>623040d1-b51c-40bc-8080-93486f38d03d</requestId>
<vpcSet>
<item>
<vpcId>vpc-12345678</vpcId>
<state>available</state>
<cidrBlock>172.16.0.0/16</cidrBlock>
<dhcpOptionsId>dopt-12345678</dhcpOptionsId>
<instanceTenancy>default</instanceTenancy>
<isDefault>false</isDefault>
</item>
</vpcSet>
</DescribeVpcsResponse>'''
class TestDescribeVPCs(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return DESCRIBE_VPCS
def test_get_vpcs(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_all_vpcs()
self.assertEqual(len(api_response), 1)
vpc = api_response[0]
self.assertFalse(vpc.is_default)
self.assertEqual(vpc.instance_tenancy, 'default')
class TestCreateVpc(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateVpcResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<vpc>
<vpcId>vpc-1a2b3c4d</vpcId>
<state>pending</state>
<cidrBlock>10.0.0.0/16</cidrBlock>
<dhcpOptionsId>dopt-1a2b3c4d2</dhcpOptionsId>
<instanceTenancy>default</instanceTenancy>
<tagSet/>
</vpc>
</CreateVpcResponse>
"""
def test_create_vpc(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_vpc('10.0.0.0/16', 'default')
self.assert_request_parameters({
'Action': 'CreateVpc',
'InstanceTenancy': 'default',
'CidrBlock': '10.0.0.0/16'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertIsInstance(api_response, VPC)
self.assertEquals(api_response.id, 'vpc-1a2b3c4d')
self.assertEquals(api_response.state, 'pending')
self.assertEquals(api_response.cidr_block, '10.0.0.0/16')
self.assertEquals(api_response.dhcp_options_id, 'dopt-1a2b3c4d2')
self.assertEquals(api_response.instance_tenancy, 'default')
class TestDeleteVpc(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteVpcResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DeleteVpcResponse>
"""
def test_delete_vpc(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_vpc('vpc-1a2b3c4d')
self.assert_request_parameters({
'Action': 'DeleteVpc',
'VpcId': 'vpc-1a2b3c4d'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestModifyVpcAttribute(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<ModifyVpcAttributeResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</ModifyVpcAttributeResponse>
"""
def test_modify_vpc_attribute_dns_support(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.modify_vpc_attribute(
'vpc-1a2b3c4d', enable_dns_support=True)
self.assert_request_parameters({
'Action': 'ModifyVpcAttribute',
'VpcId': 'vpc-1a2b3c4d',
'EnableDnsSupport.Value': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
def test_modify_vpc_attribute_dns_hostnames(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.modify_vpc_attribute(
'vpc-1a2b3c4d', enable_dns_hostnames=True)
self.assert_request_parameters({
'Action': 'ModifyVpcAttribute',
'VpcId': 'vpc-1a2b3c4d',
'EnableDnsHostnames.Value': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
class TestGetAllClassicLinkVpc(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DescribeVpcClassicLinkResponse xmlns="http://ec2.amazonaws.com/doc/2014-09-01/">
<requestId>2484655d-d669-4950-bf55-7ba559805d36</requestId>
<vpcSet>
<item>
<vpcId>vpc-6226ab07</vpcId>
<classicLinkEnabled>false</classicLinkEnabled>
<tagSet>
<item>
<key>Name</key>
<value>hello</value>[
</item>
</tagSet>
</item>
<item>
<vpcId>vpc-9d24f8f8</vpcId>
<classicLinkEnabled>true</classicLinkEnabled>
<tagSet/>
</item>
</vpcSet>
</DescribeVpcClassicLinkResponse>
"""
def test_get_all_classic_link_vpcs(self):
self.set_http_response(status_code=200)
response = self.service_connection.get_all_classic_link_vpcs()
self.assertEqual(len(response), 2)
vpc = response[0]
self.assertEqual(vpc.id, 'vpc-6226ab07')
self.assertEqual(vpc.classic_link_enabled, 'false')
self.assertEqual(vpc.tags, {'Name': 'hello'})
def test_get_all_classic_link_vpcs_params(self):
self.set_http_response(status_code=200)
self.service_connection.get_all_classic_link_vpcs(
vpc_ids=['id1', 'id2'],
filters={'GroupId': 'sg-9b4343fe'},
dry_run=True,
)
self.assert_request_parameters({
'Action': 'DescribeVpcClassicLink',
'VpcId.1': 'id1',
'VpcId.2': 'id2',
'Filter.1.Name': 'GroupId',
'Filter.1.Value.1': 'sg-9b4343fe',
'DryRun': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
class TestVpcClassicLink(AWSMockServiceTestCase):
connection_class = VPCConnection
def setUp(self):
super(TestVpcClassicLink, self).setUp()
self.vpc = VPC(self.service_connection)
self.vpc_id = 'myid'
self.vpc.id = self.vpc_id
class TestAttachClassicLinkVpc(TestVpcClassicLink):
def default_body(self):
return b"""
<AttachClassicLinkVpcResponse xmlns="http://ec2.amazonaws.com/doc/2014-09-01/">
<requestId>88673bdf-cd16-40bf-87a1-6132fec47257</requestId>
<return>true</return>
</AttachClassicLinkVpcResponse>
"""
def test_attach_classic_link_instance_string_groups(self):
groups = ['sg-foo', 'sg-bar']
self.set_http_response(status_code=200)
response = self.vpc.attach_classic_instance(
instance_id='my_instance_id',
groups=groups,
dry_run=True
)
self.assertTrue(response)
self.assert_request_parameters({
'Action': 'AttachClassicLinkVpc',
'VpcId': self.vpc_id,
'InstanceId': 'my_instance_id',
'SecurityGroupId.1': 'sg-foo',
'SecurityGroupId.2': 'sg-bar',
'DryRun': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
def test_attach_classic_link_instance_object_groups(self):
sec_group_1 = SecurityGroup()
sec_group_1.id = 'sg-foo'
sec_group_2 = SecurityGroup()
sec_group_2.id = 'sg-bar'
groups = [sec_group_1, sec_group_2]
self.set_http_response(status_code=200)
response = self.vpc.attach_classic_instance(
instance_id='my_instance_id',
groups=groups,
dry_run=True
)
self.assertTrue(response)
self.assert_request_parameters({
'Action': 'AttachClassicLinkVpc',
'VpcId': self.vpc_id,
'InstanceId': 'my_instance_id',
'SecurityGroupId.1': 'sg-foo',
'SecurityGroupId.2': 'sg-bar',
'DryRun': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
class TestDetachClassicLinkVpc(TestVpcClassicLink):
def default_body(self):
return b"""
<DetachClassicLinkVpcResponse xmlns="http://ec2.amazonaws.com/doc/2014-09-01/">
<requestId>5565033d-1321-4eef-b121-6aa46f152ed7</requestId>
<return>true</return>
</DetachClassicLinkVpcResponse>
"""
def test_detach_classic_link_instance(self):
self.set_http_response(status_code=200)
response = self.vpc.detach_classic_instance(
instance_id='my_instance_id',
dry_run=True
)
self.assertTrue(response)
self.assert_request_parameters({
'Action': 'DetachClassicLinkVpc',
'VpcId': self.vpc_id,
'InstanceId': 'my_instance_id',
'DryRun': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
class TestEnableClassicLinkVpc(TestVpcClassicLink):
def default_body(self):
return b"""
<EnableVpcClassicLinkResponse xmlns="http://ec2.amazonaws.com/doc/2014-09-01/">
<requestId>4ab2b2b3-a267-4366-a070-bab853b5927d</requestId>
<return>true</return>
</EnableVpcClassicLinkResponse>
"""
def test_enable_classic_link(self):
self.set_http_response(status_code=200)
response = self.vpc.enable_classic_link(
dry_run=True
)
self.assertTrue(response)
self.assert_request_parameters({
'Action': 'EnableVpcClassicLink',
'VpcId': self.vpc_id,
'DryRun': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
class TestDisableClassicLinkVpc(TestVpcClassicLink):
def default_body(self):
return b"""
<DisableVpcClassicLinkResponse xmlns="http://ec2.amazonaws.com/doc/2014-09-01/">
<requestId>4ab2b2b3-a267-4366-a070-bab853b5927d</requestId>
<return>true</return>
</DisableVpcClassicLinkResponse>
"""
def test_enable_classic_link(self):
self.set_http_response(status_code=200)
response = self.vpc.disable_classic_link(
dry_run=True
)
self.assertTrue(response)
self.assert_request_parameters({
'Action': 'DisableVpcClassicLink',
'VpcId': self.vpc_id,
'DryRun': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
class TestUpdateClassicLinkVpc(TestVpcClassicLink):
def default_body(self):
return b"""
<DescribeVpcClassicLinkResponse xmlns="http://ec2.amazonaws.com/doc/2014-09-01/">
<requestId>2484655d-d669-4950-bf55-7ba559805d36</requestId>
<vpcSet>
<item>
<vpcId>myid</vpcId>
<classicLinkEnabled>true</classicLinkEnabled>
<tagSet/>
</item>
</vpcSet>
</DescribeVpcClassicLinkResponse>
"""
def test_vpc_update_classic_link_enabled(self):
self.vpc.classic_link_enabled = False
self.set_http_response(status_code=200)
self.vpc.update_classic_link_enabled(
dry_run=True,
validate=True
)
self.assert_request_parameters({
'Action': 'DescribeVpcClassicLink',
'VpcId.1': self.vpc_id,
'DryRun': 'true'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp', 'Version'])
self.assertEqual(self.vpc.classic_link_enabled, 'true')
if __name__ == '__main__':
unittest.main()
| mit |
Parrot-Developers/ardupilot | mk/PX4/Tools/gencpp/src/gencpp/__init__.py | 214 | 9473 | # Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import genmsg.msgs
try:
from cStringIO import StringIO #Python 2.x
except ImportError:
from io import StringIO #Python 3.x
MSG_TYPE_TO_CPP = {'byte': 'int8_t',
'char': 'uint8_t',
'bool': 'uint8_t',
'uint8': 'uint8_t',
'int8': 'int8_t',
'uint16': 'uint16_t',
'int16': 'int16_t',
'uint32': 'uint32_t',
'int32': 'int32_t',
'uint64': 'uint64_t',
'int64': 'int64_t',
'float32': 'float',
'float64': 'double',
'string': 'std::basic_string<char, std::char_traits<char>, typename ContainerAllocator::template rebind<char>::other > ',
'time': 'ros::Time',
'duration': 'ros::Duration'}
#used
def msg_type_to_cpp(type):
"""
Converts a message type (e.g. uint32, std_msgs/String, etc.) into the C++ declaration
for that type (e.g. uint32_t, std_msgs::String_<ContainerAllocator>)
@param type: The message type
@type type: str
@return: The C++ declaration
@rtype: str
"""
(base_type, is_array, array_len) = genmsg.msgs.parse_type(type)
cpp_type = None
if (genmsg.msgs.is_builtin(base_type)):
cpp_type = MSG_TYPE_TO_CPP[base_type]
elif (len(base_type.split('/')) == 1):
if (genmsg.msgs.is_header_type(base_type)):
cpp_type = ' ::std_msgs::Header_<ContainerAllocator> '
else:
cpp_type = '%s_<ContainerAllocator> '%(base_type)
else:
pkg = base_type.split('/')[0]
msg = base_type.split('/')[1]
cpp_type = ' ::%s::%s_<ContainerAllocator> '%(pkg, msg)
if (is_array):
if (array_len is None):
return 'std::vector<%s, typename ContainerAllocator::template rebind<%s>::other > '%(cpp_type, cpp_type)
else:
return 'boost::array<%s, %s> '%(cpp_type, array_len)
else:
return cpp_type
def _escape_string(s):
s = s.replace('\\', '\\\\')
s = s.replace('"', '\\"')
return s
def escape_message_definition(definition):
lines = definition.splitlines()
if not lines:
lines.append('')
s = StringIO()
for line in lines:
line = _escape_string(line)
s.write('%s\\n\\\n'%(line))
val = s.getvalue()
s.close()
return val
#used2
def cpp_message_declarations(name_prefix, msg):
"""
Returns the different possible C++ declarations for a message given the message itself.
@param name_prefix: The C++ prefix to be prepended to the name, e.g. "std_msgs::"
@type name_prefix: str
@param msg: The message type
@type msg: str
@return: A tuple of 3 different names. cpp_message_decelarations("std_msgs::", "String") returns the tuple
("std_msgs::String_", "std_msgs::String_<ContainerAllocator>", "std_msgs::String")
@rtype: str
"""
pkg, basetype = genmsg.names.package_resource_name(msg)
cpp_name = ' ::%s%s'%(name_prefix, msg)
if (pkg):
cpp_name = ' ::%s::%s'%(pkg, basetype)
return ('%s_'%(cpp_name), '%s_<ContainerAllocator> '%(cpp_name), '%s'%(cpp_name))
#todo
def is_fixed_length(spec, msg_context, includepath):
"""
Returns whether or not the message is fixed-length
@param spec: The message spec
@type spec: genmsg.msgs.MsgSpec
@param package: The package of the
@type package: str
"""
types = []
for field in spec.parsed_fields():
if (field.is_array and field.array_len is None):
return False
if (field.base_type == 'string'):
return False
if (not field.is_builtin):
types.append(field.base_type)
types = set(types)
for t in types:
t = genmsg.msgs.resolve_type(t, spec.package)
assert isinstance(includepath, dict)
new_spec = genmsg.msg_loader.load_msg_by_type(msg_context, t, includepath)
if (not is_fixed_length(new_spec, msg_context, includepath)):
return False
return True
#used2
def default_value(type):
"""
Returns the value to initialize a message member with. 0 for integer types, 0.0 for floating point, false for bool,
empty string for everything else
@param type: The type
@type type: str
"""
if type in ['byte', 'int8', 'int16', 'int32', 'int64',
'char', 'uint8', 'uint16', 'uint32', 'uint64']:
return '0'
elif type in ['float32', 'float64']:
return '0.0'
elif type == 'bool':
return 'false'
return ""
#used2
def takes_allocator(type):
"""
Returns whether or not a type can take an allocator in its constructor. False for all builtin types except string.
True for all others.
@param type: The type
@type: str
"""
return not type in ['byte', 'int8', 'int16', 'int32', 'int64',
'char', 'uint8', 'uint16', 'uint32', 'uint64',
'float32', 'float64', 'bool', 'time', 'duration']
def escape_string(str):
str = str.replace('\\', '\\\\')
str = str.replace('"', '\\"')
return str
#used
def generate_fixed_length_assigns(spec, container_gets_allocator, cpp_name_prefix):
"""
Initialize any fixed-length arrays
@param s: The stream to write to
@type s: stream
@param spec: The message spec
@type spec: genmsg.msgs.MsgSpec
@param container_gets_allocator: Whether or not a container type (whether it's another message, a vector, array or string)
should have the allocator passed to its constructor. Assumes the allocator is named _alloc.
@type container_gets_allocator: bool
@param cpp_name_prefix: The C++ prefix to use when referring to the message, e.g. "std_msgs::"
@type cpp_name_prefix: str
"""
# Assign all fixed-length arrays their default values
for field in spec.parsed_fields():
if (not field.is_array or field.array_len is None):
continue
val = default_value(field.base_type)
if (container_gets_allocator and takes_allocator(field.base_type)):
# String is a special case, as it is the only builtin type that takes an allocator
if (field.base_type == "string"):
string_cpp = msg_type_to_cpp("string")
yield ' %s.assign(%s(_alloc));\n'%(field.name, string_cpp)
else:
(cpp_msg_unqualified, cpp_msg_with_alloc, _) = cpp_message_declarations(cpp_name_prefix, field.base_type)
yield ' %s.assign(%s(_alloc));\n'%(field.name, cpp_msg_with_alloc)
elif (len(val) > 0):
yield ' %s.assign(%s);\n'%(field.name, val)
#used
def generate_initializer_list(spec, container_gets_allocator):
"""
Writes the initializer list for a constructor
@param s: The stream to write to
@type s: stream
@param spec: The message spec
@type spec: genmsg.msgs.MsgSpec
@param container_gets_allocator: Whether or not a container type (whether it's another message, a vector, array or string)
should have the allocator passed to its constructor. Assumes the allocator is named _alloc.
@type container_gets_allocator: bool
"""
op = ':'
for field in spec.parsed_fields():
val = default_value(field.base_type)
use_alloc = takes_allocator(field.base_type)
if (field.is_array):
if (field.array_len is None and container_gets_allocator):
yield ' %s %s(_alloc)'%(op, field.name)
else:
yield ' %s %s()'%(op, field.name)
else:
if (container_gets_allocator and use_alloc):
yield ' %s %s(_alloc)'%(op, field.name)
else:
yield ' %s %s(%s)'%(op, field.name, val)
op = ','
| gpl-3.0 |
analyseuc3m/ANALYSE-v1 | openedx/core/djangolib/markup.py | 5 | 1091 | """
Utilities for use in Mako markup.
"""
import markupsafe
# Text() can be used to declare a string as plain text, as HTML() is used
# for HTML. It simply wraps markupsafe's escape, which will HTML-escape if
# it isn't already escaped.
Text = markupsafe.escape # pylint: disable=invalid-name
def HTML(html): # pylint: disable=invalid-name
"""
Mark a string as already HTML, so that it won't be escaped before output.
Use this function when formatting HTML into other strings. It must be
used in conjunction with ``Text()``, and both ``HTML()`` and ``Text()``
must be closed before any calls to ``format()``::
<%page expression_filter="h"/>
<%!
from django.utils.translation import ugettext as _
from openedx.core.djangolib.markup import Text, HTML
%>
${Text(_("Write & send {start}email{end}")).format(
start=HTML("<a href='mailto:{}'>".format(user.email),
end=HTML("</a>"),
)}
"""
return markupsafe.Markup(html)
| agpl-3.0 |
dagwieers/ansible | test/units/modules/network/aireos/test_aireos_command.py | 52 | 4292 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from ansible.modules.network.aireos import aireos_command
from units.modules.utils import set_module_args
from .aireos_module import TestCiscoWlcModule, load_fixture
class TestCiscoWlcCommandModule(TestCiscoWlcModule):
module = aireos_command
def setUp(self):
super(TestCiscoWlcCommandModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.aireos.aireos_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestCiscoWlcCommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item['command'])
command = obj['command']
except ValueError:
command = item['command']
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_aireos_command_simple(self):
set_module_args(dict(commands=['show sysinfo']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Manufacturer\'s Name'))
def test_aireos_command_multiple(self):
set_module_args(dict(commands=['show sysinfo', 'show sysinfo']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Manufacturer\'s Name'))
def test_aireos_command_wait_for(self):
wait_for = 'result[0] contains "Cisco Systems Inc"'
set_module_args(dict(commands=['show sysinfo'], wait_for=wait_for))
self.execute_module()
def test_aireos_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show sysinfo'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_aireos_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show sysinfo'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_aireos_command_match_any(self):
wait_for = ['result[0] contains "Cisco Systems Inc"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show sysinfo'], wait_for=wait_for, match='any'))
self.execute_module()
def test_aireos_command_match_all(self):
wait_for = ['result[0] contains "Cisco Systems Inc"',
'result[0] contains "Cisco Controller"']
set_module_args(dict(commands=['show sysinfo'], wait_for=wait_for, match='all'))
self.execute_module()
def test_aireos_command_match_all_failure(self):
wait_for = ['result[0] contains "Cisco Systems Inc"',
'result[0] contains "test string"']
commands = ['show sysinfo', 'show sysinfo']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
| gpl-3.0 |
felliott/osf.io | osf_tests/test_registration_moderation_notifications.py | 4 | 24500 | import pytest
import mock
from mock import call
from django.utils import timezone
from osf.management.commands.add_notification_subscription import add_reviews_notification_setting
from osf.management.commands.populate_registration_provider_notification_subscriptions import populate_registration_provider_notification_subscriptions
from osf.migrations import update_provider_auth_groups
from osf.models import Brand, NotificationDigest
from osf.models.action import RegistrationAction
from osf.utils import machines
from osf.utils.notifications import (
notify_submit,
notify_accept_reject,
notify_moderator_registration_requests_withdrawal,
notify_reject_withdraw_request,
notify_withdraw_registration
)
from osf.utils.workflows import RegistrationModerationTriggers, RegistrationModerationStates
from osf_tests.factories import (
RegistrationFactory,
AuthUserFactory,
RetractionFactory
)
from website import mails, settings
from website.notifications import emails, tasks
from website.reviews import listeners
def get_moderator(provider):
user = AuthUserFactory()
provider.add_to_group(user, 'moderator')
return user
def get_daily_moderator(provider):
user = AuthUserFactory()
provider.add_to_group(user, 'moderator')
for subscription_type in provider.DEFAULT_SUBSCRIPTIONS:
subscription = provider.notification_subscriptions.get(event_name=subscription_type)
subscription.add_user_to_subscription(user, 'email_digest')
return user
# Set USE_EMAIL to true and mock out the default mailer for consistency with other mocked settings
@mock.patch('website.mails.settings.USE_EMAIL', True)
@mock.patch('website.mails.tasks.send_email', mock.MagicMock())
@pytest.mark.django_db
class TestRegistrationMachineNotification:
MOCK_NOW = timezone.now()
@pytest.yield_fixture(autouse=True)
def setup(self):
populate_registration_provider_notification_subscriptions()
with mock.patch('osf.utils.machines.timezone.now', return_value=self.MOCK_NOW):
yield
@pytest.fixture()
def contrib(self):
return AuthUserFactory()
@pytest.fixture()
def admin(self):
return AuthUserFactory()
@pytest.fixture()
def registration(self, admin, contrib):
registration = RegistrationFactory(creator=admin)
registration.add_contributor(admin, 'admin')
registration.add_contributor(contrib, 'write')
update_provider_auth_groups()
return registration
@pytest.fixture()
def registration_with_retraction(self, admin, contrib):
sanction = RetractionFactory(user=admin)
registration = sanction.target_registration
registration.update_moderation_state()
registration.add_contributor(admin, 'admin')
registration.add_contributor(contrib, 'write')
registration.save()
return registration
@pytest.fixture()
def provider(self, registration):
return registration.provider
@pytest.fixture()
def moderator(self, provider):
user = AuthUserFactory()
provider.add_to_group(user, 'moderator')
return user
@pytest.fixture()
def daily_moderator(self, provider):
user = AuthUserFactory()
provider.add_to_group(user, 'moderator')
for subscription_type in provider.DEFAULT_SUBSCRIPTIONS:
subscription = provider.notification_subscriptions.get(event_name=subscription_type)
subscription.add_user_to_subscription(user, 'email_digest')
return user
@pytest.fixture()
def accept_action(self, registration, admin):
registration_action = RegistrationAction.objects.create(
creator=admin,
target=registration,
trigger=RegistrationModerationTriggers.ACCEPT_SUBMISSION.db_name,
from_state=RegistrationModerationStates.INITIAL.db_name,
to_state=RegistrationModerationStates.ACCEPTED.db_name,
comment='yo'
)
return registration_action
@pytest.fixture()
def withdraw_request_action(self, registration, admin):
registration_action = RegistrationAction.objects.create(
creator=admin,
target=registration,
trigger=RegistrationModerationTriggers.REQUEST_WITHDRAWAL.db_name,
from_state=RegistrationModerationStates.ACCEPTED.db_name,
to_state=RegistrationModerationStates.PENDING_WITHDRAW.db_name,
comment='yo'
)
return registration_action
@pytest.fixture()
def withdraw_action(self, registration, admin):
registration_action = RegistrationAction.objects.create(
creator=admin,
target=registration,
trigger=RegistrationModerationTriggers.ACCEPT_WITHDRAWAL.db_name,
from_state=RegistrationModerationStates.PENDING_WITHDRAW.db_name,
to_state=RegistrationModerationStates.WITHDRAWN.db_name,
comment='yo'
)
return registration_action
def test_submit_notifications(self, registration, moderator, admin, contrib, provider):
"""
[REQS-96] "As moderator of branded registry, I receive email notification upon admin author(s) submission approval"
:param mock_email:
:param draft_registration:
:return:
"""
# Set up mock_send_mail as a pass-through to the original function.
# This lets us assert on the call/args and also implicitly ensures
# that the email acutally renders as normal in send_mail.
send_mail = mails.send_mail
with mock.patch.object(listeners.mails, 'send_mail', side_effect=send_mail) as mock_send_mail:
notify_submit(registration, admin)
assert len(mock_send_mail.call_args_list) == 2
admin_message, contrib_message = mock_send_mail.call_args_list
assert admin_message == call(
admin.email,
mails.REVIEWS_SUBMISSION_CONFIRMATION,
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration.draft_registration.get(),
is_creator=True,
logo='osf_registries',
no_future_emails=[],
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_name=provider.name,
provider_url='http://localhost:5000/',
referrer=admin,
reviewable=registration,
user=admin,
workflow=None
)
assert contrib_message == call(
contrib.email,
mails.REVIEWS_SUBMISSION_CONFIRMATION,
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration.draft_registration.get(),
is_creator=False,
logo='osf_registries',
no_future_emails=[],
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_name=provider.name,
provider_url='http://localhost:5000/',
referrer=admin,
reviewable=registration,
user=contrib,
workflow=None
)
assert NotificationDigest.objects.count() == 1
digest = NotificationDigest.objects.last()
assert digest.user == moderator
assert digest.send_type == 'email_transactional'
assert digest.event == 'new_pending_submissions'
def test_accept_notifications(self, registration, moderator, admin, contrib, accept_action):
"""
[REQS-98] "As registration authors, we receive email notification upon moderator acceptance"
:param draft_registration:
:return:
"""
add_reviews_notification_setting('global_reviews')
# Set up mock_email as a pass-through to the original function.
# This lets us assert on the call count/args and also implicitly
# ensures that the email acutally renders correctly.
store_emails = emails.store_emails
with mock.patch.object(emails, 'store_emails', side_effect=store_emails) as mock_email:
notify_accept_reject(registration, registration.creator, accept_action, RegistrationModerationStates)
assert len(mock_email.call_args_list) == 2
admin_message, contrib_message = mock_email.call_args_list
assert admin_message == call(
[admin._id],
'email_transactional',
'global_reviews',
admin,
registration,
self.MOCK_NOW,
comment='yo',
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration.draft_registration.get(),
has_psyarxiv_chronos_text=False,
is_creator=True,
is_rejected=False,
notify_comment='yo',
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_url='http://localhost:5000/',
requester=admin,
reviewable=registration,
template='reviews_submission_status',
was_pending=False,
workflow=None
)
assert contrib_message == call(
[contrib._id],
'email_transactional',
'global_reviews',
admin,
registration,
self.MOCK_NOW,
comment='yo',
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration.draft_registration.get(),
has_psyarxiv_chronos_text=False,
is_creator=False,
is_rejected=False,
notify_comment='yo',
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_url='http://localhost:5000/',
reviewable=registration,
requester=admin,
template='reviews_submission_status',
was_pending=False,
workflow=None
)
def test_reject_notifications(self, registration, moderator, admin, contrib, accept_action):
"""
[REQS-100] "As authors of rejected by moderator registration, we receive email notification of registration returned
to draft state"
:param draft_registration:
:return:
"""
add_reviews_notification_setting('global_reviews')
# Set up mock_email as a pass-through to the original function.
# This lets us assert on the call count/args and also implicitly
# ensures that the email acutally renders correctly
store_emails = emails.store_emails
with mock.patch.object(emails, 'store_emails', side_effect=store_emails) as mock_email:
notify_accept_reject(registration, registration.creator, accept_action, RegistrationModerationStates)
assert len(mock_email.call_args_list) == 2
admin_message, contrib_message = mock_email.call_args_list
assert admin_message == call(
[admin._id],
'email_transactional',
'global_reviews',
admin,
registration,
self.MOCK_NOW,
comment='yo',
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration.draft_registration.get(),
has_psyarxiv_chronos_text=False,
is_creator=True,
is_rejected=False,
notify_comment='yo',
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_url='http://localhost:5000/',
reviewable=registration,
requester=admin,
template='reviews_submission_status',
was_pending=False,
workflow=None
)
assert contrib_message == call(
[contrib._id],
'email_transactional',
'global_reviews',
admin,
registration,
self.MOCK_NOW,
comment='yo',
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration.draft_registration.get(),
has_psyarxiv_chronos_text=False,
is_creator=False,
is_rejected=False,
notify_comment='yo',
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_url='http://localhost:5000/',
reviewable=registration,
requester=admin,
template='reviews_submission_status',
was_pending=False,
workflow=None
)
def test_notify_moderator_registration_requests_withdrawal_notifications(self, moderator, daily_moderator, registration, admin, provider):
"""
[REQS-106] "As moderator, I receive registration withdrawal request notification email"
:param mock_email:
:param draft_registration:
:param contrib:
:return:
"""
assert NotificationDigest.objects.count() == 0
notify_moderator_registration_requests_withdrawal(registration, admin)
assert NotificationDigest.objects.count() == 2
daily_digest = NotificationDigest.objects.get(send_type='email_digest')
transactional_digest = NotificationDigest.objects.get(send_type='email_transactional')
assert daily_digest.user == daily_moderator
assert transactional_digest.user == moderator
for digest in (daily_digest, transactional_digest):
assert 'requested withdrawal' in digest.message
assert digest.event == 'new_pending_withdraw_requests'
assert digest.provider == provider
def test_withdrawal_registration_accepted_notifications(self, registration_with_retraction, contrib, admin, withdraw_action):
"""
[REQS-109] "As registration author(s) requesting registration withdrawal, we receive notification email of moderator
decision"
:param mock_email:
:param draft_registration:
:param contrib:
:return:
"""
# Set up mock_send_mail as a pass-through to the original function.
# This lets us assert on the call count/args and also implicitly
# ensures that the email acutally renders as normal in send_mail.
send_mail = mails.send_mail
with mock.patch.object(machines.mails, 'send_mail', side_effect=send_mail) as mock_email:
notify_withdraw_registration(registration_with_retraction, withdraw_action)
assert len(mock_email.call_args_list) == 2
admin_message, contrib_message = mock_email.call_args_list
assert admin_message == call(
admin.email,
mails.WITHDRAWAL_REQUEST_GRANTED,
comment='yo',
contributor=admin,
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration_with_retraction.draft_registration.get(),
is_requester=True,
force_withdrawal=False,
notify_comment='yo',
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_url='http://localhost:5000/',
requester=admin,
reviewable=registration_with_retraction,
workflow=None
)
assert contrib_message == call(
contrib.email,
mails.WITHDRAWAL_REQUEST_GRANTED,
comment='yo',
contributor=contrib,
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration_with_retraction.draft_registration.get(),
is_requester=False,
force_withdrawal=False,
notify_comment='yo',
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_url='http://localhost:5000/',
requester=admin,
reviewable=registration_with_retraction,
workflow=None
)
def test_withdrawal_registration_rejected_notifications(self, registration, contrib, admin, withdraw_request_action):
"""
[REQS-109] "As registration author(s) requesting registration withdrawal, we receive notification email of moderator
decision"
:param mock_email:
:param draft_registration:
:param contrib:
:return:
"""
# Set up mock_send_mail as a pass-through to the original function.
# This lets us assert on the call count/args and also implicitly
# ensures that the email acutally renders as normal in send_mail.
send_mail = mails.send_mail
with mock.patch.object(machines.mails, 'send_mail', side_effect=send_mail) as mock_email:
notify_reject_withdraw_request(registration, withdraw_request_action)
assert len(mock_email.call_args_list) == 2
admin_message, contrib_message = mock_email.call_args_list
assert admin_message == call(
admin.email,
mails.WITHDRAWAL_REQUEST_DECLINED,
contributor=admin,
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration.draft_registration.get(),
is_requester=True,
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_url='http://localhost:5000/',
requester=admin,
reviewable=registration,
workflow=None
)
assert contrib_message == call(
contrib.email,
mails.WITHDRAWAL_REQUEST_DECLINED,
contributor=contrib,
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration.draft_registration.get(),
is_requester=False,
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_url='http://localhost:5000/',
requester=admin,
reviewable=registration,
workflow=None
)
def test_withdrawal_registration_force_notifications(self, registration_with_retraction, contrib, admin, withdraw_action):
"""
[REQS-109] "As registration author(s) requesting registration withdrawal, we receive notification email of moderator
decision"
:param mock_email:
:param draft_registration:
:param contrib:
:return:
"""
# Set up mock_send_mail as a pass-through to the original function.
# This lets us assert on the call count/args and also implicitly
# ensures that the email acutally renders as normal in send_mail.
send_mail = mails.send_mail
with mock.patch.object(machines.mails, 'send_mail', side_effect=send_mail) as mock_email:
notify_withdraw_registration(registration_with_retraction, withdraw_action)
assert len(mock_email.call_args_list) == 2
admin_message, contrib_message = mock_email.call_args_list
assert admin_message == call(
admin.email,
mails.WITHDRAWAL_REQUEST_GRANTED,
comment='yo',
contributor=admin,
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration_with_retraction.draft_registration.get(),
is_requester=True,
force_withdrawal=False,
notify_comment='yo',
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_url='http://localhost:5000/',
requester=admin,
reviewable=registration_with_retraction,
workflow=None
)
assert contrib_message == call(
contrib.email,
mails.WITHDRAWAL_REQUEST_GRANTED,
comment='yo',
contributor=contrib,
document_type='registration',
domain='http://localhost:5000/',
draft_registration=registration_with_retraction.draft_registration.get(),
is_requester=False,
force_withdrawal=False,
notify_comment='yo',
provider_contact_email=settings.OSF_CONTACT_EMAIL,
provider_support_email=settings.OSF_SUPPORT_EMAIL,
provider_url='http://localhost:5000/',
requester=admin,
reviewable=registration_with_retraction,
workflow=None
)
@pytest.mark.parametrize(
'digest_type, expected_recipient',
[('email_transactional', get_moderator), ('email_digest', get_daily_moderator)]
)
def test_submissions_and_withdrawals_both_appear_in_moderator_digest(self, digest_type, expected_recipient, registration, admin, provider):
# Invoke the fixture function to get the recipient because parametrize
expected_recipient = expected_recipient(provider)
with mock.patch('website.reviews.listeners.mails.send_mail'):
notify_submit(registration, admin)
notify_moderator_registration_requests_withdrawal(registration, admin)
# One user, one provider => one email
grouped_notifications = list(tasks.get_moderators_emails(digest_type))
assert len(grouped_notifications) == 1
moderator_message = grouped_notifications[0]
assert moderator_message['user_id'] == expected_recipient._id
assert moderator_message['provider_id'] == provider.id
# No fixed ordering of the entires, so just make sure that
# keywords for each action type are in some message
updates = moderator_message['info']
assert len(updates) == 2
assert any('submitted' in entry['message'] for entry in updates)
assert any('requested withdrawal' in entry['message'] for entry in updates)
@pytest.mark.parametrize('digest_type', ['email_transactional', 'email_digest'])
def test_submsissions_and_withdrawals_do_not_appear_in_node_digest(self, digest_type, registration, admin, moderator, daily_moderator):
notify_submit(registration, admin)
notify_moderator_registration_requests_withdrawal(registration, admin)
assert not list(tasks.get_users_emails(digest_type))
def test_moderator_digest_emails_render(self, registration, admin, moderator):
notify_moderator_registration_requests_withdrawal(registration, admin)
# Set up mock_send_mail as a pass-through to the original function.
# This lets us assert on the call count/args and also implicitly
# ensures that the email acutally renders as normal in send_mail.
send_mail = mails.send_mail
with mock.patch.object(tasks.mails, 'send_mail', side_effect=send_mail) as mock_send_mail:
tasks._send_reviews_moderator_emails('email_transactional')
mock_send_mail.assert_called()
def test_branded_provider_notification_renders(self, registration, admin, moderator):
# Set brand details to be checked in notify_base.mako
provider = registration.provider
provider.brand = Brand.objects.create(hero_logo_image='not-a-url', primary_color='#FFA500')
provider.name = 'Test Provider'
provider.save()
# Implicitly check that all of our uses of notify_base.mako render with branded details:
#
# notify_submit renders reviews_submission_confirmation using context from
# osf.utils.notifications and stores emails to be picked up in the moderator digest
#
# _send_Reviews_moderator_emails renders digest_reviews_moderators using context from
# website.notifications.tasks
notify_submit(registration, admin)
tasks._send_reviews_moderator_emails('email_transactional')
assert True # everything rendered!
| apache-2.0 |
SSSD/sssd | src/tests/multihost/adsites/test_adsites.py | 1 | 13157 | from __future__ import print_function
import time
import pytest
from sssd.testlib.common.utils import sssdTools
@pytest.mark.adsites
class Testadsites(object):
"""
@Title: IDM-SSSD-TC: ad_provider: adsites:
Improve AD site discovery process
Test cases for BZ: 1819012
@Steps:
1. Join client to AD
2. Start SSSD and enable debug
3. Create secondary site, move second domain controller to second site
"""
@pytest.mark.adsites
def test_001_ad_startup_discovery(self, multihost, adjoin):
"""
@Title: IDM-SSSD-TC: ad_startup_discovery
* grep sssd domain logs for cldap ping
* grep sssd logs for cldap ping parallel batch
* grep sssd logs for cldap ping domain discovery
"""
adjoin(membersw='adcli')
client = sssdTools(multihost.client[0], multihost.ad[0])
domain = client.get_domain_section_name()
domain_section = 'domain/{}'.format(domain)
sssd_params = {'debug_level': '0xFFF0'}
client.sssd_conf(domain_section, sssd_params)
ad1 = multihost.ad[0].hostname
ad2 = multihost.ad[1].hostname
multihost.client[0].service_sssd('start')
cmd_id = 'id Administrator@%s' % domain
multihost.client[0].run_command(cmd_id)
cmd_check_ping = 'grep -ire ad_cldap_ping_send ' \
'/var/log/sssd/sssd_%s.log | ' \
'grep -ire \"Found 2 domain controllers in domain ' \
'Default-First-Site-Name._sites.%s\"'\
% (domain, domain)
check_ping = multihost.client[0].run_command(cmd_check_ping,
raiseonerr=False)
assert check_ping.returncode == 0
cmd_check_batch1 = 'grep -ire ad_cldap_ping_parallel_batch' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \" %s\"' % (domain, ad1)
check_batch1 = multihost.client[0].run_command(cmd_check_batch1,
raiseonerr=False)
cmd_check_batch2 = 'grep -ire ad_cldap_ping_parallel_batch' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \" %s\"' % (domain, ad2)
check_batch2 = multihost.client[0].run_command(cmd_check_batch2,
raiseonerr=False)
if check_batch1.returncode == 0 or check_batch2.returncode == 0:
assert True
else:
assert False
cmd_check_discovery = 'grep -ire ad_cldap_ping_domain_discovery_done' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \"Found 2 domain controllers in' \
' domain Default-First-Site-Name._sites.%s\"'\
% (domain, domain)
check_discovery = multihost.client[0].run_command(cmd_check_discovery,
raiseonerr=False)
assert check_discovery.returncode == 0
@pytest.mark.adsites
def test_002_ad_startup_discovery_one_server_unreachable(self, multihost,
adjoin):
"""
@Title: IDM-SSSD-TC: ad_startup_discovery_one_server_unreachable
* grep sssd domain logs for cldap ping
* grep sssd logs for cldap ping parallel batch
* grep sssd logs for cldap ping domain discovery
"""
adjoin(membersw='adcli')
client = sssdTools(multihost.client[0], multihost.ad[0])
domain = client.get_domain_section_name()
domain_section = 'domain/{}'.format(domain)
sssd_params = {'debug_level': '0xFFF0'}
client.sssd_conf(domain_section, sssd_params)
ad1 = multihost.ad[0].hostname
ad2 = multihost.ad[1].hostname
ad2ip = multihost.ad[1].ip
cmd_dnf_firewalld = 'dnf install -y firewalld'
multihost.client[0].run_command(cmd_dnf_firewalld)
cmd_start_firewalld = 'systemctl start firewalld'
multihost.client[0].run_command(cmd_start_firewalld)
fw_add = 'firewall-cmd --permanent --direct --add-rule ipv4 ' \
'filter OUTPUT 0 -d %s -j DROP' % ad2ip
fw_reload = 'firewall-cmd --reload'
multihost.client[0].run_command(fw_add, raiseonerr=True)
multihost.client[0].run_command(fw_reload, raiseonerr=True)
multihost.client[0].service_sssd('start')
cmd_check_ping = 'grep -ire ad_cldap_ping_send ' \
'/var/log/sssd/sssd_%s.log | ' \
'grep -ire \"Found 2 domain controllers in domain ' \
'Default-First-Site-Name._sites.%s\"'\
% (domain, domain)
check_ping = multihost.client[0].run_command(cmd_check_ping,
raiseonerr=False)
assert check_ping.returncode == 0
cmd_check_batch1 = 'grep -ire ad_cldap_ping_parallel_batch' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \" %s\"' % (domain, ad1)
check_batch1 = multihost.client[0].run_command(cmd_check_batch1,
raiseonerr=False)
cmd_check_batch2 = 'grep -ire ad_cldap_ping_parallel_batch' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \" %s\"' % (domain, ad2)
check_batch2 = multihost.client[0].run_command(cmd_check_batch2,
raiseonerr=False)
if check_batch1.returncode == 1 and check_batch2.returncode == 0:
assert True
else:
assert False
cmd_check_discovery = 'grep -ire ad_cldap_ping_domain_discovery_done' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \"Found 2 domain' \
' controllers in domain ' \
'Default-First-Site-Name._sites.%s\"'\
% (domain, domain)
check_discovery = multihost.client[0].run_command(cmd_check_discovery,
raiseonerr=False)
assert check_discovery.returncode == 0
fw_stop = 'systemctl stop firewalld'
multihost.client[0].run_command(fw_stop, raiseonerr=True)
fw_remove = 'dnf remove -y firewalld'
multihost.client[0].run_command(fw_remove, raiseonerr=True)
@pytest.mark.adsites
def test_003_ad_startup_discovery_two_different_sites(self, multihost,
adjoin, create_site):
"""
@Title: IDM-SSSD-TC: ad_startup_discovery_two_different_sites
* grep sssd domain logs for cldap ping
* grep sssd logs for cldap ping parallel batch
* grep sssd logs for cldap ping domain discovery
"""
adjoin(membersw='adcli')
client = sssdTools(multihost.client[0], multihost.ad[0])
domain = client.get_domain_section_name()
domain_section = 'domain/{}'.format(domain)
sssd_params = {'debug_level': '0xFFF0'}
client.sssd_conf(domain_section, sssd_params)
ad1 = multihost.ad[0].hostname
ad2 = multihost.ad[1].hostname
multihost.client[0].service_sssd('start')
cmd_check_ping = 'grep -ire ad_cldap_ping_send' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \"Found 2 domain controllers in domain ' \
'Default-First-Site-Name._sites.%s\"'\
% (domain, domain)
check_ping = multihost.client[0].run_command(cmd_check_ping,
raiseonerr=False)
assert check_ping.returncode == 0
cmd_check_batch1 = 'grep -ire ad_cldap_ping_parallel_batch' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \" %s\"' % (domain, ad1)
check_batch1 = multihost.client[0].run_command(cmd_check_batch1,
raiseonerr=False)
cmd_check_batch2 = 'grep -ire ad_cldap_ping_parallel_batch' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \" %s\"' % (domain, ad2)
check_batch2 = multihost.client[0].run_command(cmd_check_batch2,
raiseonerr=False)
if check_batch1.returncode == 0 or check_batch2.returncode == 0:
assert True
else:
assert False
cmd_check_discovery = 'grep -ire ad_cldap_ping_domain_discovery_done' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \"Found 2 domain' \
' controllers in domain ' \
'Default-First-Site-Name._sites.%s\"'\
% (domain, domain)
check_discovery = multihost.client[0].run_command(cmd_check_discovery,
raiseonerr=False)
assert check_discovery.returncode == 0
@pytest.mark.adsites
def test_004_ad_startup_discovery_one_server_unreachable(self,
multihost,
adjoin,
create_site):
"""
@Title: IDM-SSSD-TC:
ad_startup_discovery_two_different_sites_one_server_unreachable
* grep sssd domain logs for cldap ping
* grep sssd logs for cldap ping parallel batch
* grep sssd logs for cldap ping domain discovery
"""
adjoin(membersw='adcli')
client = sssdTools(multihost.client[0], multihost.ad[0])
domain = client.get_domain_section_name()
domain_section = 'domain/{}'.format(domain)
sssd_params = {'debug_level': '0xFFF0'}
client.sssd_conf(domain_section, sssd_params)
ad1 = multihost.ad[0].hostname
ad2 = multihost.ad[1].hostname
ad2ip = multihost.ad[1].ip
cmd_dnf_firewalld = 'dnf install -y firewalld'
multihost.client[0].run_command(cmd_dnf_firewalld)
cmd_start_firewalld = 'systemctl start firewalld'
multihost.client[0].run_command(cmd_start_firewalld)
fw_add = 'firewall-cmd --permanent --direct --add-rule ipv4 ' \
'filter OUTPUT 0 -d %s -j DROP' % ad2ip
fw_reload = 'firewall-cmd --reload'
multihost.client[0].run_command(fw_add, raiseonerr=True)
multihost.client[0].run_command(fw_reload, raiseonerr=True)
multihost.client[0].service_sssd('start')
cmd_check_ping = 'grep -ire ad_cldap_ping_send' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \"Found 2 domain controllers in domain ' \
'Default-First-Site-Name._sites.%s\"'\
% (domain, domain)
check_ping = multihost.client[0].run_command(cmd_check_ping,
raiseonerr=False)
assert check_ping.returncode == 0
cmd_check_batch1 = 'grep -ire ad_cldap_ping_parallel_batch' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \" %s\"' % (domain, ad1)
check_batch1 = multihost.client[0].run_command(cmd_check_batch1,
raiseonerr=False)
cmd_check_batch2 = 'grep -ire ad_cldap_ping_parallel_batch' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \" %s\"' % (domain, ad2)
check_batch2 = multihost.client[0].run_command(cmd_check_batch2,
raiseonerr=False)
if check_batch1.returncode == 1 and check_batch2.returncode == 0:
assert True
else:
assert False
cmd_check_discovery = 'grep -ire ad_cldap_ping_domain_discovery_done' \
' /var/log/sssd/sssd_%s.log | ' \
'grep -ire \"Found 2 domain' \
' controllers in domain ' \
'Default-First-Site-Name._sites.%s\"'\
% (domain, domain)
check_discovery = multihost.client[0].run_command(cmd_check_discovery,
raiseonerr=False)
assert check_discovery.returncode == 0
fw_stop = 'systemctl stop firewalld'
multihost.client[0].run_command(fw_stop, raiseonerr=True)
fw_remove = 'dnf remove -y firewalld'
multihost.client[0].run_command(fw_remove, raiseonerr=True)
| gpl-3.0 |
Solanar/CMPUT410-Project | DisSoNet/front/views/author.py | 1 | 4859 | from django.http import HttpResponse
import json
import socket
from data.models import Comment
def processRequestFromOtherServer(obj, dict_type):
json_dict = {}
json_dict_list = []
if dict_type is "author":
json_dict_list.append(getAuthorDict(obj))
elif dict_type is "posts":
for posts in obj:
json_dict_list.append(getPostDict(posts))
elif dict_type is "comments":
json_dict_list = getCommentDictList(obj)
else:
print ("Unknown type")
json_dict[dict_type] = json_dict_list
json_data = json.dumps(json_dict)
return HttpResponse(json_data, content_type="application/json")
def getAuthorDict(user_obj, include_url=False):
""" Take a list of author objects, returns it's dict representations.
"author":
{
"id":"sha1",
"host":"host",
"displayname":"name",
"url":"url_to_author"
},
:returns: dict representation of an author object
"""
author_dict = {}
author_dict["id"] = user_obj.guid
author_dict["displayname"] = user_obj.get_full_name()
host = socket.gethostname() # only works if website running on port 80
ip = "http://10.4.10.2" # dat hard coding of values
port = ":8080/"
author_dict["host"] = ip + port # host
# why is this here?
# if include_url:
# author_dict["url"] = author_object.url
author_dict["url"] = ip + port + "author/" + user_obj.guid + "/"
return author_dict
def getPostDict(post_object):
""" From all post URLS should return a list of posts like the following.
Of the form:
{ "posts":[{"title":"string",
"source":"url",
"origin":"url",
"description":"string",
"content-type":"text/*",
"content":"string",
"author":{"id":"sha1",
"host":"host",
"displayname":"name",
"url":"url_to_author"},
"categories":["cat1", "cat2"],
"comments":[{"author":{"id":"sha1",
"host":"url",
"displayname":"name"},
"comment":"string",
"pubDate":"date",
"guid":"sha1"}]
"pubdate":"date",
"guid":"sha1",
"visibility":"PUBLIC"}]}
This function will return the representation of a post to go into this list
"""
post_dict = {}
post_dict["title"] = post_object.title
post_dict["source"] = post_object.source
post_dict["origin"] = post_object.origin
post_dict["description"] = post_object.description
post_dict["content-type"] = post_object.content_type
post_dict["content"] = post_object.content
# TODO python datetime is not JSON serializable
formatter = "%a %b %d %H:%M:%S mst %Y"
timestring = post_object.published_date.strftime(formatter)
post_dict["pubDate"] = timestring
# post_dict["pubdate"] = post_object.published_date
post_dict["guid"] = post_object.guid
post_dict["visibility"] = post_object.visibility
# get the post author, convert to dict and add to post_dict
author_dict = getAuthorDict(post_object.author, include_url=True)
post_dict["author"] = author_dict
post_dict["categories"] = getCategoryList(post_object.categories)
# get all comments on this post of return them
comment_list = Comment.objects.filter(post=post_object)
comment_dict_list = getCommentDictList(comment_list)
post_dict["comments"] = comment_dict_list
return post_dict
def getCommentDictList(comment_list):
""" Take a list of comment objects, returns list of dict representations.
Of the form:
"comments":[{"author":{"id":"sha1",
"host":"url",
"displayname":"name"},
"comment":"string",
"pubDate":"date",
"guid":"sha1"}]
:returns: A list of dicts
"""
comment_dict_list = []
for comment in comment_list:
comment_dict = {}
author_dict = getAuthorDict(comment.user)
comment_dict["author"] = author_dict
comment_dict["comment"] = comment.content
# TODO python datetime is not JSON serializable
formatter = "%a %b %d %H:%M:%S mst %Y"
timestring = comment.published_date.strftime(formatter)
comment_dict["pubDate"] = timestring
comment_dict["guid"] = comment.guid
comment_dict_list.append(comment_dict)
return comment_dict_list
def getCategoryList(categories):
category_list = []
for category in categories.all():
category_list.append(category.category_name)
return category_list
| apache-2.0 |
apache/incubator-mxnet | tests/python/unittest/test_gluon_control_flow.py | 2 | 25390 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import copy
import numpy as np
import mxnet as mx
from mxnet import gluon
from mxnet.test_utils import *
from mxnet.base import _as_list
from collections import defaultdict
from mxnet.attribute import AttrScope
@mx.util.use_np
def test_while_loop_simple_forward():
class _TestBlock(gluon.HybridBlock):
def __init__(self, cond, func, max_iterations):
super(_TestBlock, self).__init__()
self.cond = cond
self.func = func
self.max_iterations = max_iterations
def forward(self, *loop_vars):
return mx.npx.while_loop(
cond=self.cond,
func=self.func,
loop_vars=loop_vars,
max_iterations=self.max_iterations
)
for hybridize in [False, True]:
# Case 1.1: result should be sum([1, 2, 3 ... 100])
model = _TestBlock(
cond=lambda i, s: i <= 5,
func=lambda i, s: (None, (i + 1, s + i)),
max_iterations=10,
)
if hybridize:
model.hybridize()
_, result = model(
mx.np.array([1], dtype="int64"), # i
mx.np.array([0], dtype="int64"), # s
)
assert result[0].item() == 6
assert result[1].item() == 15
# Case 1.2: result should be sum([1, 2, 3 ... 1000])
model = _TestBlock(
cond=lambda i, s, true: true,
func=lambda i, s, true: (None, (i + 1, s + i, true)),
max_iterations=1000,
)
if hybridize:
model.hybridize()
_, result = model(
mx.np.array([1], dtype="int64"), # i
mx.np.array([0], dtype="int64"), # s
mx.np.array([1], dtype="int64"), # true
)
assert result[0].item() == 1001
assert result[1].item() == 500500
assert result[2].item() == 1
# Case 1.3: result should be sum([])
model = _TestBlock(
cond=lambda i, s, false: false,
func=lambda i, s, false: (None, (i + 1, s + i, false)),
max_iterations=1000,
)
if hybridize:
model.hybridize()
_, result = model(
mx.np.array([1], dtype="int64"), # i
mx.np.array([0], dtype="int64"), # s
mx.np.array([0], dtype="int64"), # false
)
assert result[0].item() == 1
assert result[1].item() == 0
assert result[2].item() == 0
# Case 2.1: result should be sum([1, 2, 3 ... 100])
model = _TestBlock(
cond=lambda i, s: i <= 100,
func=lambda i, s: (i, (i + 1, s + i)),
max_iterations=1000,
)
if hybridize:
model.hybridize()
outputs, (result_i, result_s) = model(
mx.np.array([1], dtype="int64"), # i
mx.np.array([0], dtype="int64"), # s
)
assert all(outputs.asnumpy()[ : 100] == np.arange(1, 101).reshape(100, 1))
assert result_i.item() == 101
assert result_s.item() == 5050
# Case 2.2: result should be sum([1, 2, 3 ... 1000])
model = _TestBlock(
cond=lambda i, s, true: true,
func=lambda i, s, true: (i, (i + 1, s + i, true)),
max_iterations=1000,
)
if hybridize:
model.hybridize()
outputs, (result_i, result_s, _) = model(
mx.np.array([1], dtype="int64"), # i
mx.np.array([0], dtype="int64"), # s
mx.np.array([1], dtype="int64"), # true
)
assert all(outputs.asnumpy() == np.arange(1, 1001).reshape(1000, 1))
assert result_i.item() == 1001
assert result_s.item() == 500500
# Case 2.3: a corner case, in which loop body is never executed
model = _TestBlock(
cond=lambda i, s, false: false,
func=lambda i, s, false: (i, (i + 1, s + i, false)),
max_iterations=1000,
)
if hybridize:
model.hybridize()
_, (result_i, result_s, _) = model(
mx.np.array([1], dtype="int64"), # i
mx.np.array([0], dtype="int64"), # s
mx.np.array([0], dtype="int64"), # false
)
assert result_i.item() == 1
assert result_s.item() == 0
def test_cut_subgraph_foreach():
class TestLayer(gluon.HybridBlock):
def __init__(self):
super(TestLayer, self).__init__()
def forward(self, inputs, states):
def step1(data, states):
return data + 1, states
out1, states1 = mx.npx.foreach(step1, inputs, states)
out2, states2 = mx.npx.foreach(step1, out1, states)
def step2(data, states):
return data + states[0], states
out, states = mx.npx.foreach(step2, out2, states1)
return out
data = mx.np.random.normal(loc=0, scale=1, size=(5, 10))
states = mx.np.random.normal(loc=0, scale=1, size=(10))
layer = TestLayer()
layer.initialize(ctx=default_context())
res1 = layer(data, [states])
with mx.autograd.record():
res1 = layer(data, [states])
layer = TestLayer()
layer.initialize(ctx=default_context())
layer.hybridize()
res2 = layer(data, [states])
with mx.autograd.record():
res2 = layer(data, [states])
assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=1e-3, atol=1e-3)
@mx.util.use_np
def test_uniq_name():
class ForeachLayer1(gluon.HybridBlock):
def __init__(self):
super(ForeachLayer1, self).__init__()
def forward(self, inputs, states):
def step1(data, states):
return data + 1, states
out1, states1 = mx.npx.foreach(step1, inputs, states)
# The input variables have the same symbol name.
out, states = mx.npx.foreach(step1, out1, states1)
return out
class ForeachLayer2(gluon.HybridBlock):
def __init__(self):
super(ForeachLayer2, self).__init__()
def forward(self, inputs, states):
def step1(data, states):
return data + 1, states
out1, states1 = mx.npx.foreach(step1, inputs, states)
def step2(data, states):
return data, [states[0] + states[0] + mx.np.squeeze(mx.npx.slice(data, begin=0, end=1))]
# The input variables have the same symbol names.
# The free variables have the same symbol names as the input variables.
out, states = mx.npx.foreach(step2, out1, states1)
return out
class WhileLayer1(gluon.HybridBlock):
def __init__(self):
super(WhileLayer1, self).__init__()
def forward(self, inputs, states):
def cond(state1, state2):
s = mx.np.squeeze(mx.npx.slice(state1, begin=0, end=1))
return s == s
def step(state1, state2):
return state1 + 1, [state1 + 1, state2 + 1]
states = [states[0], states[0] + 1]
out1, states1 = mx.npx.while_loop(cond, step, states, max_iterations=5)
# The input variables have the same symbol name.
out, states = mx.npx.while_loop(cond, step, states1, max_iterations=5)
return out
class WhileLayer2(gluon.HybridBlock):
def __init__(self):
super(WhileLayer2, self).__init__()
def forward(self, inputs, states):
def cond(state1, state2):
s = mx.np.squeeze(mx.npx.slice(state1, begin=0, end=1))
return s == s
def step1(state1, state2):
return state1 + 1, [state1, state2]
states = [states[0], states[0] + 1]
out1, states1 = mx.npx.while_loop(cond, step1, states, max_iterations=5)
def step2(state1, state2):
return state1 + 1, [state1 + state1[0], state2 + state1[1]]
# The input variables have the same symbol name.
out, states = mx.npx.while_loop(cond, step2, states1, max_iterations=5)
return out
TestLayers = [ForeachLayer1, ForeachLayer2,
WhileLayer1, WhileLayer2]
# TestLayers = [WhileLayer1]
data = mx.np.random.normal(loc=0, scale=1, size=(2, 5))
states = mx.np.random.normal(loc=0, scale=1, size=(5))
for TestLayer in TestLayers:
layer = TestLayer()
layer.initialize(ctx=default_context())
res1 = layer(data, [states])
with mx.autograd.record():
res1 = layer(data, [states])
layer = TestLayer()
layer.initialize(ctx=default_context())
layer.hybridize()
res2 = layer(data, [states])
with mx.autograd.record():
res2 = layer(data, [states])
assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=0.001, atol=0.0001)
@mx.util.use_np
def test_cut_subgraph_while_loop():
class TestLayer(gluon.HybridBlock):
def __init__(self):
super(TestLayer, self).__init__()
def forward(self, data):
out1, data1 = mx.npx.while_loop(
cond=lambda i: i <= 5,
func=lambda i: (None, (i + 1, )),
loop_vars=(data, ),
max_iterations=10,
)
out2, data2 = mx.npx.while_loop(
cond=lambda i: i,
func=lambda i: (None, (i + 1, )),
loop_vars=data1[0],
max_iterations=10,
)
return data2[0]
data = mx.np.random.normal(loc=0, scale=1, size=(1, ))
layer = TestLayer()
layer.initialize(ctx=default_context())
res1 = layer(data)
with mx.autograd.record():
res1 = layer(data)
layer = TestLayer()
layer.initialize(ctx=default_context())
layer.hybridize()
res2 = layer(data)
with mx.autograd.record():
res2 = layer(data)
assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=1e-3, atol=1e-3)
@mx.util.use_np
def test_cut_subgraph_cond():
class TestLayer(gluon.HybridBlock):
def __init__(self):
super(TestLayer, self).__init__()
def forward(self, data):
data1 = mx.npx.cond(
pred=lambda data: data > 0.5,
then_func=lambda data: data * 2,
else_func=lambda data: data * 3,
inputs=data,
)
data2 = mx.npx.cond(
pred=lambda data: data > 0.5,
then_func=lambda data: data * 2,
else_func=lambda data: data * 3,
inputs=data1,
)
return data2
data = mx.np.random.normal(loc=0, scale=1, size=(1, ))
layer = TestLayer()
layer.initialize(ctx=default_context())
res1 = layer(data)
with mx.autograd.record():
res1 = layer(data)
layer = TestLayer()
layer.initialize(ctx=default_context())
layer.hybridize()
res2 = layer(data)
with mx.autograd.record():
res2 = layer(data)
assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=1e-3, atol=1e-3)
@mx.util.use_np
def test_output_format_foreach():
class TestLayer1(gluon.HybridBlock):
def __init__(self, step):
super(TestLayer1, self).__init__()
self.step = step
def forward(self, ins, states):
out, states = mx.npx.foreach(self.step, ins, states)
return out, states
def step1(data, state):
return data, state
def step2(data, state):
return [data], state
def step3(data, state):
if isinstance(state, list):
return [], [state[0] + data]
else:
return [], state + data
def step4(data, state):
if isinstance(state, list):
return [data, state[0]], state
else:
return [data, state], state
steps = [step1, step2, step3, step4]
data = mx.np.random.normal(loc=0, scale=1, size=(10, 2))
state = mx.np.random.normal(loc=0, scale=1, size=(2))
for step in steps:
layer1 = TestLayer1(step)
layer1.initialize(ctx=default_context())
layer2 = TestLayer1(step)
layer2.initialize(ctx=default_context())
layer2.hybridize()
out1, state1 = layer1(data, [state])
out2, state2 = layer2(data, [state])
step_out, step_state = step(data, [state])
assert type(out1) == type(step_out)
assert type(out2) == type(step_out)
assert type(state1) == type(step_state)
assert type(state2) == type(step_state)
out1 = _as_list(out1)
out2 = _as_list(out2)
state1 = _as_list(state1)
state2 = _as_list(state2)
for i in range(len(out1)):
assert_almost_equal(out1[i].asnumpy(), out2[i].asnumpy(), rtol=0.001, atol=0.0001)
for i in range(len(state1)):
assert_almost_equal(state1[i].asnumpy(), state2[i].asnumpy(), rtol=0.001, atol=0.0001)
layer1 = TestLayer1(step)
layer1.initialize(ctx=default_context())
layer2 = TestLayer1(step)
layer2.initialize(ctx=default_context())
layer2.hybridize()
out1, state1 = layer1(data, state)
out2, state2 = layer2(data, state)
step_out, step_state = step(data, state)
assert type(out1) == type(step_out)
assert type(out2) == type(step_out)
assert type(state1) == type(step_state)
assert type(state2) == type(step_state)
out1 = _as_list(out1)
out2 = _as_list(out2)
state1 = _as_list(state1)
state2 = _as_list(state2)
for i in range(len(out1)):
assert_almost_equal(out1[i].asnumpy(), out2[i].asnumpy(), rtol=0.001, atol=0.0001)
for i in range(len(state1)):
assert_almost_equal(state1[i].asnumpy(), state2[i].asnumpy(), rtol=0.001, atol=0.0001)
if step == step3:
continue
layer1 = TestLayer1(step)
layer1.initialize(ctx=default_context())
layer2 = TestLayer1(step)
layer2.initialize(ctx=default_context())
layer2.hybridize()
out1, state1 = layer1(data, [state, [state + 1]])
out2, state2 = layer2(data, [state, [state + 1]])
step_out, step_state = step(data, [state, [state + 1]])
assert type(out1) == type(step_out)
assert type(out2) == type(step_out)
assert type(state1) == type(step_state)
assert type(state2) == type(step_state)
out1 = _as_list(out1)
out2 = _as_list(out2)
state1 = _as_list(state1)
state2 = _as_list(state2)
for i in range(len(out1)):
assert_almost_equal(out1[i].asnumpy(), out2[i].asnumpy(), rtol=0.001, atol=0.0001)
for i in range(len(state1)):
if isinstance(state1[i], list):
assert_almost_equal(state1[i][0].asnumpy(), state2[i][0].asnumpy(),
rtol=0.001, atol=0.0001)
else:
assert_almost_equal(state1[i].asnumpy(), state2[i].asnumpy(),
rtol=0.001, atol=0.0001)
@mx.util.use_np
def test_output_format_while():
class TestLayer1(gluon.HybridBlock):
def __init__(self, step, use_list, nested_list=False):
super(TestLayer1, self).__init__()
self.step = step
self.use_list = use_list
self.nested_list = nested_list
def forward(self, states):
def cond(state1):
scalar = mx.npx.slice(state1, begin=0, end=1)
return scalar == scalar
cond_func = cond
if self.use_list:
states = [states]
elif self.nested_list:
def cond2(state1, state2):
scalar = mx.npx.slice(state1, begin=0, end=1)
return scalar == scalar
cond_func = cond2
states = [states, [states + 1]]
out, states = mx.npx.while_loop(cond_func, self.step, states, max_iterations=5)
return out, states
def step1(state):
return state, state
def step2(state):
if isinstance(state, list):
return state, state
else:
return [state], state
def step3(state):
return [], state
steps = [step1, step2, step3]
state = mx.np.random.normal(loc=0, scale=1, size=(2))
for step in steps:
layer1 = TestLayer1(step, False)
layer1.initialize(ctx=default_context())
layer2 = TestLayer1(step, False)
layer2.initialize(ctx=default_context())
layer2.hybridize()
out1, state1 = layer1(state)
out2, state2 = layer2(state)
assert type(out1) == type(out2)
assert type(state1) == type(state1)
out1 = _as_list(out1)
out2 = _as_list(out2)
state1 = _as_list(state1)
state2 = _as_list(state2)
for i in range(len(out1)):
assert_almost_equal(out1[i].asnumpy(), out2[i].asnumpy(), rtol=0.001, atol=0.0001)
for i in range(len(state1)):
assert_almost_equal(state1[i].asnumpy(), state2[i].asnumpy(), rtol=0.001, atol=0.0001)
layer1 = TestLayer1(step, True)
layer1.initialize(ctx=default_context())
layer2 = TestLayer1(step, True)
layer2.initialize(ctx=default_context())
layer2.hybridize()
out1, state1 = layer1(state)
out2, state2 = layer2(state)
assert type(out1) == type(out2)
assert type(state1) == type(state2)
out1 = _as_list(out1)
out2 = _as_list(out2)
state1 = _as_list(state1)
state2 = _as_list(state2)
for i in range(len(out1)):
assert_almost_equal(out1[i].asnumpy(), out2[i].asnumpy(), rtol=0.001, atol=0.0001)
for i in range(len(state1)):
assert_almost_equal(state1[i].asnumpy(), state2[i].asnumpy(), rtol=0.001, atol=0.0001)
def step4(state, state2):
states = _as_list(state)
states.append(state2)
return state, states
def step5(state, state2):
states = _as_list(state)
states.append(state2)
if isinstance(state, list):
return state, states
else:
return [state], states
def step6(state, state2):
states = _as_list(state)
states.append(state2)
return [], states
steps = [step4, step5, step6]
for step in steps:
layer1 = TestLayer1(step, False, True)
layer1.initialize(ctx=default_context())
layer2 = TestLayer1(step, False, True)
layer2.initialize(ctx=default_context())
layer2.hybridize()
out1, state1 = layer1(state)
out2, state2 = layer2(state)
assert type(out1) == type(out2)
assert type(state1) == type(state2)
out1 = _as_list(out1)
out2 = _as_list(out2)
state1 = _as_list(state1)
state2 = _as_list(state2)
for i in range(len(out1)):
assert_almost_equal(out1[i].asnumpy(), out2[i].asnumpy(), rtol=0.001, atol=0.0001)
for i in range(len(state1)):
if not isinstance(state1[i], list):
assert_almost_equal(state1[i].asnumpy(), state2[i].asnumpy(),
rtol=0.001, atol=0.0001)
@mx.util.use_np
def test_output_format_cond():
class TestLayer1(gluon.HybridBlock):
def __init__(self, func):
super(TestLayer1, self).__init__()
self.func = func
def forward(self, data):
def then_func(data):
return self.func(data)
def else_func(data):
return self.func(data)
return mx.npx.cond(lambda data: mx.npx.slice(data, begin=0, end=1),
then_func, else_func, data)
def func1(data):
return data
def func2(data):
return [data]
def func3(data):
return [data, data]
funcs = [func1, func2, func3]
data = mx.np.random.normal(loc=0, scale=1, size=(2))
for func in funcs:
layer1 = TestLayer1(func)
layer1.initialize(ctx=default_context())
layer2 = TestLayer1(func)
layer2.initialize(ctx=default_context())
layer2.hybridize()
out1 = layer1(data)
out2 = layer2(data)
func_out = func(data)
assert type(out1) == type(func_out)
assert type(out2) == type(func_out)
out1 = _as_list(out1)
out2 = _as_list(out2)
for i in range(len(out1)):
assert_almost_equal(out1[i].asnumpy(), out2[i].asnumpy(), rtol=0.001, atol=0.0001)
@mx.util.use_np
def test_scope():
class TestBlock1(gluon.HybridBlock):
def __init__(self):
super(TestBlock1, self).__init__()
def forward(self, data):
(new_data, ) = mx.npx.cond(
pred=lambda data: data > 0.5,
then_func=lambda data: data * 2,
else_func=lambda data: data * 3,
inputs=data,
name="my_cond",
)
return new_data
class TestBlock2(gluon.HybridBlock):
def __init__(self):
super(TestBlock2, self).__init__()
def forward(self, data):
(new_data, ) = mx.npx.cond(
pred=lambda data: data > 0.5,
then_func=lambda data: data * 2,
else_func=lambda data: data * 3,
inputs=data,
name="my_cond",
)
return new_data
AttrScope._subgraph_names = defaultdict(int)
data = mx.np.random.normal(loc=0, scale=1, size=(1, ))
with AttrScope(__subgraph_name__="my_cond"):
block1 = TestBlock1()
block1.initialize(ctx=default_context())
block1.hybridize()
_ = block1(data)
block2 = TestBlock2()
block2.initialize(ctx=default_context())
block2.hybridize()
_ = block2(data)
assert len(AttrScope._subgraph_names) == 3
assert AttrScope._subgraph_names['my_cond$my_cond_else'] == 2
assert AttrScope._subgraph_names['my_cond$my_cond_pred'] == 2
assert AttrScope._subgraph_names['my_cond$my_cond_then'] == 2
class RNNLayer(gluon.HybridBlock):
def __init__(self, cell_type, hidden_size):
super(RNNLayer, self).__init__()
self.cell = cell_type(hidden_size)
def forward(self, inputs, states):
out, states = mx.npx.foreach(self.cell, inputs, states)
return out
def infer_shape(self, input, *args):
self.cell.infer_shape(0, input, False)
@mx.util.use_np
def check_rnn(cell_type, num_states):
batch_size = 10
hidden_size = 100
rnn_data = mx.np.random.normal(loc=0, scale=1, size=(5, batch_size, 50))
state_shape = (batch_size, hidden_size)
states = [mx.np.random.normal(loc=0, scale=1, size=state_shape) for i in range(num_states)]
layer = RNNLayer(cell_type, hidden_size)
layer.infer_shape(rnn_data)
layer.initialize(ctx=default_context())
res1 = layer(rnn_data, states)
params1 = layer.collect_params()
orig_params1 = copy.deepcopy(params1)
trainer = gluon.Trainer(params1, 'sgd', {'learning_rate' : 0.03})
with mx.autograd.record():
res1 = layer(rnn_data, states)
res1.backward()
trainer.step(batch_size)
configs = [
{},
{'inline_limit': 0},
{'static_alloc': True},
{'static_alloc': True, 'static_shape': True} ]
for config in configs:
layer = RNNLayer(cell_type, hidden_size)
layer.infer_shape(rnn_data)
layer.initialize(ctx=default_context())
layer.hybridize(**config)
res2 = layer(rnn_data, states)
params2 = layer.collect_params()
for key, val in orig_params1.items():
params2[key].set_data(copy.deepcopy(val.data()))
trainer = gluon.Trainer(params2, 'sgd', {'learning_rate' : 0.03})
with mx.autograd.record():
res2 = layer(rnn_data, states)
assert_almost_equal(res1.asnumpy(), res2.asnumpy(), rtol=1e-3, atol=1e-3)
res2.backward()
trainer.step(batch_size)
for key, val in params1.items():
weight1 = val.data()
weight2 = params2[key].data()
assert_almost_equal(weight1.asnumpy(), weight2.asnumpy(),
rtol=1e-3, atol=1e-3)
def test_rnn():
cell_types = [(gluon.rnn.RNNCell, 1), (gluon.rnn.LSTMCell, 2),
(gluon.rnn.GRUCell, 1)]
for cell_type, num_states in cell_types:
check_rnn(cell_type, num_states)
| apache-2.0 |
KaelChen/numpy | numpy/ma/tests/test_mrecords.py | 64 | 20867 | # pylint: disable-msg=W0611, W0612, W0511,R0201
"""Tests suite for mrecords.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
"""
from __future__ import division, absolute_import, print_function
import warnings
import pickle
import numpy as np
import numpy.ma as ma
from numpy import recarray
from numpy.compat import asbytes, asbytes_nested
from numpy.ma import masked, nomask
from numpy.testing import TestCase, run_module_suite
from numpy.core.records import (
fromrecords as recfromrecords, fromarrays as recfromarrays
)
from numpy.ma.mrecords import (
MaskedRecords, mrecarray, fromarrays, fromtextfile, fromrecords,
addfield
)
from numpy.ma.testutils import (
assert_, assert_equal,
assert_equal_records,
)
class TestMRecords(TestCase):
# Base test class for MaskedArrays.
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
self.setup()
def setup(self):
# Generic setup
ilist = [1, 2, 3, 4, 5]
flist = [1.1, 2.2, 3.3, 4.4, 5.5]
slist = asbytes_nested(['one', 'two', 'three', 'four', 'five'])
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mask = [0, 1, 0, 0, 1]
self.base = ma.array(list(zip(ilist, flist, slist)),
mask=mask, dtype=ddtype)
def test_byview(self):
# Test creation by view
base = self.base
mbase = base.view(mrecarray)
assert_equal(mbase.recordmask, base.recordmask)
assert_equal_records(mbase._mask, base._mask)
assert_(isinstance(mbase._data, recarray))
assert_equal_records(mbase._data, base._data.view(recarray))
for field in ('a', 'b', 'c'):
assert_equal(base[field], mbase[field])
assert_equal_records(mbase.view(mrecarray), mbase)
def test_get(self):
# Tests fields retrieval
base = self.base.copy()
mbase = base.view(mrecarray)
# As fields..........
for field in ('a', 'b', 'c'):
assert_equal(getattr(mbase, field), mbase[field])
assert_equal(base[field], mbase[field])
# as elements .......
mbase_first = mbase[0]
assert_(isinstance(mbase_first, mrecarray))
assert_equal(mbase_first.dtype, mbase.dtype)
assert_equal(mbase_first.tolist(), (1, 1.1, asbytes('one')))
# Used to be mask, now it's recordmask
assert_equal(mbase_first.recordmask, nomask)
assert_equal(mbase_first._mask.item(), (False, False, False))
assert_equal(mbase_first['a'], mbase['a'][0])
mbase_last = mbase[-1]
assert_(isinstance(mbase_last, mrecarray))
assert_equal(mbase_last.dtype, mbase.dtype)
assert_equal(mbase_last.tolist(), (None, None, None))
# Used to be mask, now it's recordmask
assert_equal(mbase_last.recordmask, True)
assert_equal(mbase_last._mask.item(), (True, True, True))
assert_equal(mbase_last['a'], mbase['a'][-1])
assert_((mbase_last['a'] is masked))
# as slice ..........
mbase_sl = mbase[:2]
assert_(isinstance(mbase_sl, mrecarray))
assert_equal(mbase_sl.dtype, mbase.dtype)
# Used to be mask, now it's recordmask
assert_equal(mbase_sl.recordmask, [0, 1])
assert_equal_records(mbase_sl.mask,
np.array([(False, False, False),
(True, True, True)],
dtype=mbase._mask.dtype))
assert_equal_records(mbase_sl, base[:2].view(mrecarray))
for field in ('a', 'b', 'c'):
assert_equal(getattr(mbase_sl, field), base[:2][field])
def test_set_fields(self):
# Tests setting fields.
base = self.base.copy()
mbase = base.view(mrecarray)
mbase = mbase.copy()
mbase.fill_value = (999999, 1e20, 'N/A')
# Change the data, the mask should be conserved
mbase.a._data[:] = 5
assert_equal(mbase['a']._data, [5, 5, 5, 5, 5])
assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1])
# Change the elements, and the mask will follow
mbase.a = 1
assert_equal(mbase['a']._data, [1]*5)
assert_equal(ma.getmaskarray(mbase['a']), [0]*5)
# Use to be _mask, now it's recordmask
assert_equal(mbase.recordmask, [False]*5)
assert_equal(mbase._mask.tolist(),
np.array([(0, 0, 0),
(0, 1, 1),
(0, 0, 0),
(0, 0, 0),
(0, 1, 1)],
dtype=bool))
# Set a field to mask ........................
mbase.c = masked
# Use to be mask, and now it's still mask !
assert_equal(mbase.c.mask, [1]*5)
assert_equal(mbase.c.recordmask, [1]*5)
assert_equal(ma.getmaskarray(mbase['c']), [1]*5)
assert_equal(ma.getdata(mbase['c']), [asbytes('N/A')]*5)
assert_equal(mbase._mask.tolist(),
np.array([(0, 0, 1),
(0, 1, 1),
(0, 0, 1),
(0, 0, 1),
(0, 1, 1)],
dtype=bool))
# Set fields by slices .......................
mbase = base.view(mrecarray).copy()
mbase.a[3:] = 5
assert_equal(mbase.a, [1, 2, 3, 5, 5])
assert_equal(mbase.a._mask, [0, 1, 0, 0, 0])
mbase.b[3:] = masked
assert_equal(mbase.b, base['b'])
assert_equal(mbase.b._mask, [0, 1, 0, 1, 1])
# Set fields globally..........................
ndtype = [('alpha', '|S1'), ('num', int)]
data = ma.array([('a', 1), ('b', 2), ('c', 3)], dtype=ndtype)
rdata = data.view(MaskedRecords)
val = ma.array([10, 20, 30], mask=[1, 0, 0])
with warnings.catch_warnings():
warnings.simplefilter("ignore")
rdata['num'] = val
assert_equal(rdata.num, val)
assert_equal(rdata.num.mask, [1, 0, 0])
def test_set_fields_mask(self):
# Tests setting the mask of a field.
base = self.base.copy()
# This one has already a mask....
mbase = base.view(mrecarray)
mbase['a'][-2] = masked
assert_equal(mbase.a, [1, 2, 3, 4, 5])
assert_equal(mbase.a._mask, [0, 1, 0, 1, 1])
# This one has not yet
mbase = fromarrays([np.arange(5), np.random.rand(5)],
dtype=[('a', int), ('b', float)])
mbase['a'][-2] = masked
assert_equal(mbase.a, [0, 1, 2, 3, 4])
assert_equal(mbase.a._mask, [0, 0, 0, 1, 0])
def test_set_mask(self):
base = self.base.copy()
mbase = base.view(mrecarray)
# Set the mask to True .......................
mbase.mask = masked
assert_equal(ma.getmaskarray(mbase['b']), [1]*5)
assert_equal(mbase['a']._mask, mbase['b']._mask)
assert_equal(mbase['a']._mask, mbase['c']._mask)
assert_equal(mbase._mask.tolist(),
np.array([(1, 1, 1)]*5, dtype=bool))
# Delete the mask ............................
mbase.mask = nomask
assert_equal(ma.getmaskarray(mbase['c']), [0]*5)
assert_equal(mbase._mask.tolist(),
np.array([(0, 0, 0)]*5, dtype=bool))
def test_set_mask_fromarray(self):
base = self.base.copy()
mbase = base.view(mrecarray)
# Sets the mask w/ an array
mbase.mask = [1, 0, 0, 0, 1]
assert_equal(mbase.a.mask, [1, 0, 0, 0, 1])
assert_equal(mbase.b.mask, [1, 0, 0, 0, 1])
assert_equal(mbase.c.mask, [1, 0, 0, 0, 1])
# Yay, once more !
mbase.mask = [0, 0, 0, 0, 1]
assert_equal(mbase.a.mask, [0, 0, 0, 0, 1])
assert_equal(mbase.b.mask, [0, 0, 0, 0, 1])
assert_equal(mbase.c.mask, [0, 0, 0, 0, 1])
def test_set_mask_fromfields(self):
mbase = self.base.copy().view(mrecarray)
nmask = np.array(
[(0, 1, 0), (0, 1, 0), (1, 0, 1), (1, 0, 1), (0, 0, 0)],
dtype=[('a', bool), ('b', bool), ('c', bool)])
mbase.mask = nmask
assert_equal(mbase.a.mask, [0, 0, 1, 1, 0])
assert_equal(mbase.b.mask, [1, 1, 0, 0, 0])
assert_equal(mbase.c.mask, [0, 0, 1, 1, 0])
# Reinitalizes and redo
mbase.mask = False
mbase.fieldmask = nmask
assert_equal(mbase.a.mask, [0, 0, 1, 1, 0])
assert_equal(mbase.b.mask, [1, 1, 0, 0, 0])
assert_equal(mbase.c.mask, [0, 0, 1, 1, 0])
def test_set_elements(self):
base = self.base.copy()
# Set an element to mask .....................
mbase = base.view(mrecarray).copy()
mbase[-2] = masked
assert_equal(
mbase._mask.tolist(),
np.array([(0, 0, 0), (1, 1, 1), (0, 0, 0), (1, 1, 1), (1, 1, 1)],
dtype=bool))
# Used to be mask, now it's recordmask!
assert_equal(mbase.recordmask, [0, 1, 0, 1, 1])
# Set slices .................................
mbase = base.view(mrecarray).copy()
mbase[:2] = (5, 5, 5)
assert_equal(mbase.a._data, [5, 5, 3, 4, 5])
assert_equal(mbase.a._mask, [0, 0, 0, 0, 1])
assert_equal(mbase.b._data, [5., 5., 3.3, 4.4, 5.5])
assert_equal(mbase.b._mask, [0, 0, 0, 0, 1])
assert_equal(mbase.c._data,
asbytes_nested(['5', '5', 'three', 'four', 'five']))
assert_equal(mbase.b._mask, [0, 0, 0, 0, 1])
mbase = base.view(mrecarray).copy()
mbase[:2] = masked
assert_equal(mbase.a._data, [1, 2, 3, 4, 5])
assert_equal(mbase.a._mask, [1, 1, 0, 0, 1])
assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 4.4, 5.5])
assert_equal(mbase.b._mask, [1, 1, 0, 0, 1])
assert_equal(mbase.c._data,
asbytes_nested(['one', 'two', 'three', 'four', 'five']))
assert_equal(mbase.b._mask, [1, 1, 0, 0, 1])
def test_setslices_hardmask(self):
# Tests setting slices w/ hardmask.
base = self.base.copy()
mbase = base.view(mrecarray)
mbase.harden_mask()
try:
mbase[-2:] = (5, 5, 5)
assert_equal(mbase.a._data, [1, 2, 3, 5, 5])
assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 5, 5.5])
assert_equal(mbase.c._data,
asbytes_nested(['one', 'two', 'three', '5', 'five']))
assert_equal(mbase.a._mask, [0, 1, 0, 0, 1])
assert_equal(mbase.b._mask, mbase.a._mask)
assert_equal(mbase.b._mask, mbase.c._mask)
except NotImplementedError:
# OK, not implemented yet...
pass
except AssertionError:
raise
else:
raise Exception("Flexible hard masks should be supported !")
# Not using a tuple should crash
try:
mbase[-2:] = 3
except (NotImplementedError, TypeError):
pass
else:
raise TypeError("Should have expected a readable buffer object!")
def test_hardmask(self):
# Test hardmask
base = self.base.copy()
mbase = base.view(mrecarray)
mbase.harden_mask()
self.assertTrue(mbase._hardmask)
mbase.mask = nomask
assert_equal_records(mbase._mask, base._mask)
mbase.soften_mask()
self.assertTrue(not mbase._hardmask)
mbase.mask = nomask
# So, the mask of a field is no longer set to nomask...
assert_equal_records(mbase._mask,
ma.make_mask_none(base.shape, base.dtype))
self.assertTrue(ma.make_mask(mbase['b']._mask) is nomask)
assert_equal(mbase['a']._mask, mbase['b']._mask)
def test_pickling(self):
# Test pickling
base = self.base.copy()
mrec = base.view(mrecarray)
_ = pickle.dumps(mrec)
mrec_ = pickle.loads(_)
assert_equal(mrec_.dtype, mrec.dtype)
assert_equal_records(mrec_._data, mrec._data)
assert_equal(mrec_._mask, mrec._mask)
assert_equal_records(mrec_._mask, mrec._mask)
def test_filled(self):
# Test filling the array
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
_b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
_c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8')
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mrec = fromarrays([_a, _b, _c], dtype=ddtype,
fill_value=(99999, 99999., 'N/A'))
mrecfilled = mrec.filled()
assert_equal(mrecfilled['a'], np.array((1, 2, 99999), dtype=int))
assert_equal(mrecfilled['b'], np.array((1.1, 2.2, 99999.),
dtype=float))
assert_equal(mrecfilled['c'], np.array(('one', 'two', 'N/A'),
dtype='|S8'))
def test_tolist(self):
# Test tolist.
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
_b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
_c = ma.array(['one', 'two', 'three'], mask=[1, 0, 0], dtype='|S8')
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mrec = fromarrays([_a, _b, _c], dtype=ddtype,
fill_value=(99999, 99999., 'N/A'))
assert_equal(mrec.tolist(),
[(1, 1.1, None), (2, 2.2, asbytes('two')),
(None, None, asbytes('three'))])
def test_withnames(self):
# Test the creation w/ format and names
x = mrecarray(1, formats=float, names='base')
x[0]['base'] = 10
assert_equal(x['base'][0], 10)
def test_exotic_formats(self):
# Test that 'exotic' formats are processed properly
easy = mrecarray(1, dtype=[('i', int), ('s', '|S8'), ('f', float)])
easy[0] = masked
assert_equal(easy.filled(1).item(), (1, asbytes('1'), 1.))
solo = mrecarray(1, dtype=[('f0', '<f8', (2, 2))])
solo[0] = masked
assert_equal(solo.filled(1).item(),
np.array((1,), dtype=solo.dtype).item())
mult = mrecarray(2, dtype="i4, (2,3)float, float")
mult[0] = masked
mult[1] = (1, 1, 1)
mult.filled(0)
assert_equal_records(mult.filled(0),
np.array([(0, 0, 0), (1, 1, 1)],
dtype=mult.dtype))
class TestView(TestCase):
def setUp(self):
(a, b) = (np.arange(10), np.random.rand(10))
ndtype = [('a', np.float), ('b', np.float)]
arr = np.array(list(zip(a, b)), dtype=ndtype)
mrec = fromarrays([a, b], dtype=ndtype, fill_value=(-9., -99.))
mrec.mask[3] = (False, True)
self.data = (mrec, a, b, arr)
def test_view_by_itself(self):
(mrec, a, b, arr) = self.data
test = mrec.view()
self.assertTrue(isinstance(test, MaskedRecords))
assert_equal_records(test, mrec)
assert_equal_records(test._mask, mrec._mask)
def test_view_simple_dtype(self):
(mrec, a, b, arr) = self.data
ntype = (np.float, 2)
test = mrec.view(ntype)
self.assertTrue(isinstance(test, ma.MaskedArray))
assert_equal(test, np.array(list(zip(a, b)), dtype=np.float))
self.assertTrue(test[3, 1] is ma.masked)
def test_view_flexible_type(self):
(mrec, a, b, arr) = self.data
alttype = [('A', np.float), ('B', np.float)]
test = mrec.view(alttype)
self.assertTrue(isinstance(test, MaskedRecords))
assert_equal_records(test, arr.view(alttype))
self.assertTrue(test['B'][3] is masked)
assert_equal(test.dtype, np.dtype(alttype))
self.assertTrue(test._fill_value is None)
##############################################################################
class TestMRecordsImport(TestCase):
# Base test class for MaskedArrays.
def __init__(self, *args, **kwds):
TestCase.__init__(self, *args, **kwds)
self.setup()
def setup(self):
# Generic setup
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
_b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
_c = ma.array(list(map(asbytes, ['one', 'two', 'three'])),
mask=[0, 0, 1], dtype='|S8')
ddtype = [('a', int), ('b', float), ('c', '|S8')]
mrec = fromarrays([_a, _b, _c], dtype=ddtype,
fill_value=(asbytes('99999'), asbytes('99999.'),
asbytes('N/A')))
nrec = recfromarrays((_a._data, _b._data, _c._data), dtype=ddtype)
self.data = (mrec, nrec, ddtype)
def test_fromarrays(self):
_a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
_b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
_c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8')
(mrec, nrec, _) = self.data
for (f, l) in zip(('a', 'b', 'c'), (_a, _b, _c)):
assert_equal(getattr(mrec, f)._mask, l._mask)
# One record only
_x = ma.array([1, 1.1, 'one'], mask=[1, 0, 0],)
assert_equal_records(fromarrays(_x, dtype=mrec.dtype), mrec[0])
def test_fromrecords(self):
# Test construction from records.
(mrec, nrec, ddtype) = self.data
#......
palist = [(1, 'abc', 3.7000002861022949, 0),
(2, 'xy', 6.6999998092651367, 1),
(0, ' ', 0.40000000596046448, 0)]
pa = recfromrecords(palist, names='c1, c2, c3, c4')
mpa = fromrecords(palist, names='c1, c2, c3, c4')
assert_equal_records(pa, mpa)
#.....
_mrec = fromrecords(nrec)
assert_equal(_mrec.dtype, mrec.dtype)
for field in _mrec.dtype.names:
assert_equal(getattr(_mrec, field), getattr(mrec._data, field))
_mrec = fromrecords(nrec.tolist(), names='c1,c2,c3')
assert_equal(_mrec.dtype, [('c1', int), ('c2', float), ('c3', '|S5')])
for (f, n) in zip(('c1', 'c2', 'c3'), ('a', 'b', 'c')):
assert_equal(getattr(_mrec, f), getattr(mrec._data, n))
_mrec = fromrecords(mrec)
assert_equal(_mrec.dtype, mrec.dtype)
assert_equal_records(_mrec._data, mrec.filled())
assert_equal_records(_mrec._mask, mrec._mask)
def test_fromrecords_wmask(self):
# Tests construction from records w/ mask.
(mrec, nrec, ddtype) = self.data
_mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=[0, 1, 0,])
assert_equal_records(_mrec._data, mrec._data)
assert_equal(_mrec._mask.tolist(), [(0, 0, 0), (1, 1, 1), (0, 0, 0)])
_mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=True)
assert_equal_records(_mrec._data, mrec._data)
assert_equal(_mrec._mask.tolist(), [(1, 1, 1), (1, 1, 1), (1, 1, 1)])
_mrec = fromrecords(nrec.tolist(), dtype=ddtype, mask=mrec._mask)
assert_equal_records(_mrec._data, mrec._data)
assert_equal(_mrec._mask.tolist(), mrec._mask.tolist())
_mrec = fromrecords(nrec.tolist(), dtype=ddtype,
mask=mrec._mask.tolist())
assert_equal_records(_mrec._data, mrec._data)
assert_equal(_mrec._mask.tolist(), mrec._mask.tolist())
def test_fromtextfile(self):
# Tests reading from a text file.
fcontent = asbytes(
"""#
'One (S)','Two (I)','Three (F)','Four (M)','Five (-)','Six (C)'
'strings',1,1.0,'mixed column',,1
'with embedded "double quotes"',2,2.0,1.0,,1
'strings',3,3.0E5,3,,1
'strings',4,-1e-10,,,1
""")
import os
import tempfile
(tmp_fd, tmp_fl) = tempfile.mkstemp()
os.write(tmp_fd, fcontent)
os.close(tmp_fd)
mrectxt = fromtextfile(tmp_fl, delimitor=',', varnames='ABCDEFG')
os.remove(tmp_fl)
self.assertTrue(isinstance(mrectxt, MaskedRecords))
assert_equal(mrectxt.F, [1, 1, 1, 1])
assert_equal(mrectxt.E._mask, [1, 1, 1, 1])
assert_equal(mrectxt.C, [1, 2, 3.e+5, -1e-10])
def test_addfield(self):
# Tests addfield
(mrec, nrec, ddtype) = self.data
(d, m) = ([100, 200, 300], [1, 0, 0])
mrec = addfield(mrec, ma.array(d, mask=m))
assert_equal(mrec.f3, d)
assert_equal(mrec.f3._mask, m)
def test_record_array_with_object_field():
# Trac #1839
y = ma.masked_array(
[(1, '2'), (3, '4')],
mask=[(0, 0), (0, 1)],
dtype=[('a', int), ('b', np.object)])
# getting an item used to fail
y[1]
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
geopython/pywps-demo | docs/conf.py | 3 | 10208 | # -*- coding: utf-8 -*-
#
# PyWPS-Flask documentation build configuration file, created by
# sphinx-quickstart on Thu Aug 11 21:27:33 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.linkcode'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'PyWPS-Flask'
copyright = u'2016, PyWPS Development Team'
author = u'PyWPS Development Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'4.2'
# The full version, including alpha/beta/rc tags.
release = u'4.2.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
#html_logo = '_static/pywps.png'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# alabaster settings
html_theme_options = {
'show_related': True,
'travis_button': True,
'github_banner': True,
'github_user': 'geopython',
'github_repo': 'pywps-flask',
'github_button': True,
'logo': 'pywps.png',
'logo_name': False
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# alabaster settings
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'searchbox.html',
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyWPS-Flaskdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyWPS-Flask.tex', u'PyWPS-Flask Documentation',
u'PyWPS Development Team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pywps-flask', u'PyWPS-Flask Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyWPS-Flask', u'PyWPS-Flask Documentation',
author, 'PyWPS-Flask', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/': None}
def linkcode_resolve(domain, info):
if domain != 'py':
return None
if not info['module']:
return None
filename = info['module'].replace('.', '/')
return "http://github.com/geopython/pywps-flask/blob/master/%s.py" % filename
| mit |
sam-m888/gprime | gprime/db/dbconst.py | 1 | 4144 | #
# gPrime - A web-based genealogy program
#
# Copyright (C) 2004-2007 Donald N. Allingham
# Copyright (C) 2011 Tim G L Lyons
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Declare constants used by database modules
"""
#-------------------------------------------------------------------------
#
# constants
#
#-------------------------------------------------------------------------
__all__ = ( 'DBPAGE', 'DBMODE', 'DBCACHE', 'DBLOCKS', 'DBOBJECTS', 'DBUNDO',
'DBEXT', 'DBMODE_R', 'DBMODE_W', 'DBUNDOFN', 'DBLOCKFN',
'DBRECOVFN','BDBVERSFN', 'DBLOGNAME', 'SCHVERSFN',
'DBBACKEND',
'PERSON_KEY', 'FAMILY_KEY', 'SOURCE_KEY', 'CITATION_KEY',
'EVENT_KEY', 'MEDIA_KEY', 'PLACE_KEY', 'REPOSITORY_KEY',
'NOTE_KEY', 'REFERENCE_KEY', 'TAG_KEY',
'TXNADD', 'TXNUPD', 'TXNDEL',
"CLASS_TO_KEY_MAP", "KEY_TO_CLASS_MAP", "KEY_TO_NAME_MAP"
)
DBEXT = ".db" # File extension to be used for database files
DBUNDOFN = "undo.db" # File name of 'undo' database
DBLOCKFN = "lock" # File name of lock file
DBRECOVFN = "need_recover" # File name of recovery file
BDBVERSFN = "bdbversion.txt"# File name of Berkeley DB version file
DBBACKEND = "database.txt" # File name of Database backend file
SCHVERSFN = "schemaversion.txt"# File name of schema version file
DBLOGNAME = ".Db" # Name of logger
DBMODE_R = "r" # Read-only access
DBMODE_W = "w" # Full Read/Write access
DBPAGE = 16384 # Size of the pages used to hold items in the database
DBMODE = 0o666 # Unix mode for database creation
DBCACHE = 0x4000000 # Size of the shared memory buffer pool
DBLOCKS = 100000 # Maximum number of locks supported
DBOBJECTS = 100000 # Maximum number of simultaneously locked objects
DBUNDO = 1000 # Maximum size of undo buffer
PERSON_KEY = 0
FAMILY_KEY = 1
SOURCE_KEY = 2
EVENT_KEY = 3
MEDIA_KEY = 4
PLACE_KEY = 5
REPOSITORY_KEY = 6
REFERENCE_KEY = 7
NOTE_KEY = 8
TAG_KEY = 9
CITATION_KEY = 10
TXNADD, TXNUPD, TXNDEL = 0, 1, 2
CLASS_TO_KEY_MAP = {"Person": PERSON_KEY,
"Family": FAMILY_KEY,
"Source": SOURCE_KEY,
"Citation": CITATION_KEY,
"Event": EVENT_KEY,
"Media": MEDIA_KEY,
"Place": PLACE_KEY,
"Repository": REPOSITORY_KEY,
"Note" : NOTE_KEY,
"Tag": TAG_KEY}
KEY_TO_CLASS_MAP = {PERSON_KEY: "Person",
FAMILY_KEY: "Family",
SOURCE_KEY: "Source",
CITATION_KEY: "Citation",
EVENT_KEY: "Event",
MEDIA_KEY: "Media",
PLACE_KEY: "Place",
REPOSITORY_KEY: "Repository",
NOTE_KEY: "Note",
TAG_KEY: "Tag"}
KEY_TO_NAME_MAP = {PERSON_KEY: 'person',
FAMILY_KEY: 'family',
EVENT_KEY: 'event',
SOURCE_KEY: 'source',
CITATION_KEY: 'citation',
PLACE_KEY: 'place',
MEDIA_KEY: 'media',
REPOSITORY_KEY: 'repository',
#REFERENCE_KEY: 'reference',
NOTE_KEY: 'note',
TAG_KEY: 'tag'}
| gpl-2.0 |
mmnelemane/neutron | neutron/plugins/embrane/plugins/embrane_fake_plugin.py | 59 | 1274 | # Copyright 2013 Embrane, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db import extraroute_db
from neutron.plugins.embrane import base_plugin as base
from neutron.plugins.embrane.l2base.fake import fake_l2_plugin as l2
from neutron.plugins.embrane.l2base.fake import fakeplugin_support as sup
class EmbraneFakePlugin(base.EmbranePlugin, extraroute_db.ExtraRoute_db_mixin,
l2.FakeL2Plugin):
_plugin_support = sup.FakePluginSupport()
def __init__(self):
'''First run plugin specific initialization, then Embrane's.'''
self.supported_extension_aliases += ["extraroute", "router"]
l2.FakeL2Plugin.__init__(self)
self._run_embrane_config()
| apache-2.0 |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/tseries/plotting.py | 7 | 9969 | """
Period formatters and locators adapted from scikits.timeseries by
Pierre GF Gerard-Marchant & Matt Knox
"""
# TODO: Use the fact that axis can have units to simplify the process
import numpy as np
from matplotlib import pylab
from pandas.tseries.period import Period
from pandas.tseries.offsets import DateOffset
import pandas.tseries.frequencies as frequencies
from pandas.tseries.index import DatetimeIndex
from pandas.formats.printing import pprint_thing
import pandas.compat as compat
from pandas.tseries.converter import (TimeSeries_DateLocator,
TimeSeries_DateFormatter)
# ---------------------------------------------------------------------
# Plotting functions and monkey patches
def tsplot(series, plotf, ax=None, **kwargs):
"""
Plots a Series on the given Matplotlib axes or the current axes
Parameters
----------
axes : Axes
series : Series
Notes
_____
Supports same kwargs as Axes.plot
"""
# Used inferred freq is possible, need a test case for inferred
if ax is None:
import matplotlib.pyplot as plt
ax = plt.gca()
freq, series = _maybe_resample(series, ax, kwargs)
# Set ax with freq info
_decorate_axes(ax, freq, kwargs)
ax._plot_data.append((series, plotf, kwargs))
lines = plotf(ax, series.index._mpl_repr(), series.values, **kwargs)
# set date formatter, locators and rescale limits
format_dateaxis(ax, ax.freq)
return lines
def _maybe_resample(series, ax, kwargs):
# resample against axes freq if necessary
freq, ax_freq = _get_freq(ax, series)
if freq is None: # pragma: no cover
raise ValueError('Cannot use dynamic axis without frequency info')
# Convert DatetimeIndex to PeriodIndex
if isinstance(series.index, DatetimeIndex):
series = series.to_period(freq=freq)
if ax_freq is not None and freq != ax_freq:
if frequencies.is_superperiod(freq, ax_freq): # upsample input
series = series.copy()
series.index = series.index.asfreq(ax_freq, how='s')
freq = ax_freq
elif _is_sup(freq, ax_freq): # one is weekly
how = kwargs.pop('how', 'last')
series = getattr(series.resample('D'), how)().dropna()
series = getattr(series.resample(ax_freq), how)().dropna()
freq = ax_freq
elif frequencies.is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq):
_upsample_others(ax, freq, kwargs)
ax_freq = freq
else: # pragma: no cover
raise ValueError('Incompatible frequency conversion')
return freq, series
def _is_sub(f1, f2):
return ((f1.startswith('W') and frequencies.is_subperiod('D', f2)) or
(f2.startswith('W') and frequencies.is_subperiod(f1, 'D')))
def _is_sup(f1, f2):
return ((f1.startswith('W') and frequencies.is_superperiod('D', f2)) or
(f2.startswith('W') and frequencies.is_superperiod(f1, 'D')))
def _upsample_others(ax, freq, kwargs):
legend = ax.get_legend()
lines, labels = _replot_ax(ax, freq, kwargs)
_replot_ax(ax, freq, kwargs)
other_ax = None
if hasattr(ax, 'left_ax'):
other_ax = ax.left_ax
if hasattr(ax, 'right_ax'):
other_ax = ax.right_ax
if other_ax is not None:
rlines, rlabels = _replot_ax(other_ax, freq, kwargs)
lines.extend(rlines)
labels.extend(rlabels)
if (legend is not None and kwargs.get('legend', True) and
len(lines) > 0):
title = legend.get_title().get_text()
if title == 'None':
title = None
ax.legend(lines, labels, loc='best', title=title)
def _replot_ax(ax, freq, kwargs):
data = getattr(ax, '_plot_data', None)
# clear current axes and data
ax._plot_data = []
ax.clear()
_decorate_axes(ax, freq, kwargs)
lines = []
labels = []
if data is not None:
for series, plotf, kwds in data:
series = series.copy()
idx = series.index.asfreq(freq, how='S')
series.index = idx
ax._plot_data.append((series, plotf, kwds))
# for tsplot
if isinstance(plotf, compat.string_types):
from pandas.tools.plotting import _plot_klass
plotf = _plot_klass[plotf]._plot
lines.append(plotf(ax, series.index._mpl_repr(),
series.values, **kwds)[0])
labels.append(pprint_thing(series.name))
return lines, labels
def _decorate_axes(ax, freq, kwargs):
"""Initialize axes for time-series plotting"""
if not hasattr(ax, '_plot_data'):
ax._plot_data = []
ax.freq = freq
xaxis = ax.get_xaxis()
xaxis.freq = freq
if not hasattr(ax, 'legendlabels'):
ax.legendlabels = [kwargs.get('label', None)]
else:
ax.legendlabels.append(kwargs.get('label', None))
ax.view_interval = None
ax.date_axis_info = None
def _get_ax_freq(ax):
"""
Get the freq attribute of the ax object if set.
Also checks shared axes (eg when using secondary yaxis, sharex=True
or twinx)
"""
ax_freq = getattr(ax, 'freq', None)
if ax_freq is None:
# check for left/right ax in case of secondary yaxis
if hasattr(ax, 'left_ax'):
ax_freq = getattr(ax.left_ax, 'freq', None)
elif hasattr(ax, 'right_ax'):
ax_freq = getattr(ax.right_ax, 'freq', None)
if ax_freq is None:
# check if a shared ax (sharex/twinx) has already freq set
shared_axes = ax.get_shared_x_axes().get_siblings(ax)
if len(shared_axes) > 1:
for shared_ax in shared_axes:
ax_freq = getattr(shared_ax, 'freq', None)
if ax_freq is not None:
break
return ax_freq
def _get_freq(ax, series):
# get frequency from data
freq = getattr(series.index, 'freq', None)
if freq is None:
freq = getattr(series.index, 'inferred_freq', None)
ax_freq = _get_ax_freq(ax)
# use axes freq if no data freq
if freq is None:
freq = ax_freq
# get the period frequency
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
return freq, ax_freq
def _use_dynamic_x(ax, data):
freq = _get_index_freq(data)
ax_freq = _get_ax_freq(ax)
if freq is None: # convert irregular if axes has freq info
freq = ax_freq
else: # do not use tsplot if irregular was plotted first
if (ax_freq is None) and (len(ax.get_lines()) > 0):
return False
if freq is None:
return False
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
if freq is None:
return False
# hack this for 0.10.1, creating more technical debt...sigh
if isinstance(data.index, DatetimeIndex):
base = frequencies.get_freq(freq)
x = data.index
if (base <= frequencies.FreqGroup.FR_DAY):
return x[:1].is_normalized
return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0]
return True
def _get_index_freq(data):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if freq == 'B':
weekdays = np.unique(data.index.dayofweek)
if (5 in weekdays) or (6 in weekdays):
freq = None
return freq
def _maybe_convert_index(ax, data):
# tsplot converts automatically, but don't want to convert index
# over and over for DataFrames
if isinstance(data.index, DatetimeIndex):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if isinstance(freq, DateOffset):
freq = freq.rule_code
if freq is None:
freq = _get_ax_freq(ax)
if freq is None:
raise ValueError('Could not get frequency alias for plotting')
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
data = data.to_period(freq=freq)
return data
# Patch methods for subplot. Only format_dateaxis is currently used.
# Do we need the rest for convenience?
def format_dateaxis(subplot, freq):
"""
Pretty-formats the date axis (x-axis).
Major and minor ticks are automatically set for the frequency of the
current underlying series. As the dynamic mode is activated by
default, changing the limits of the x axis will intelligently change
the positions of the ticks.
"""
majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,
minor_locator=False,
plot_obj=subplot)
minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,
minor_locator=True,
plot_obj=subplot)
subplot.xaxis.set_major_locator(majlocator)
subplot.xaxis.set_minor_locator(minlocator)
majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,
minor_locator=False,
plot_obj=subplot)
minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,
minor_locator=True,
plot_obj=subplot)
subplot.xaxis.set_major_formatter(majformatter)
subplot.xaxis.set_minor_formatter(minformatter)
# x and y coord info
subplot.format_coord = lambda t, y: (
"t = {0} y = {1:8f}".format(Period(ordinal=int(t), freq=freq), y))
pylab.draw_if_interactive()
| apache-2.0 |
brunojppb/porque-rails | node_modules/node-gyp/gyp/pylib/gyp/MSVSVersion.py | 1509 | 17165 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Handle version information related to Visual Stuio."""
import errno
import os
import re
import subprocess
import sys
import gyp
import glob
class VisualStudioVersion(object):
"""Information regarding a version of Visual Studio."""
def __init__(self, short_name, description,
solution_version, project_version, flat_sln, uses_vcxproj,
path, sdk_based, default_toolset=None):
self.short_name = short_name
self.description = description
self.solution_version = solution_version
self.project_version = project_version
self.flat_sln = flat_sln
self.uses_vcxproj = uses_vcxproj
self.path = path
self.sdk_based = sdk_based
self.default_toolset = default_toolset
def ShortName(self):
return self.short_name
def Description(self):
"""Get the full description of the version."""
return self.description
def SolutionVersion(self):
"""Get the version number of the sln files."""
return self.solution_version
def ProjectVersion(self):
"""Get the version number of the vcproj or vcxproj files."""
return self.project_version
def FlatSolution(self):
return self.flat_sln
def UsesVcxproj(self):
"""Returns true if this version uses a vcxproj file."""
return self.uses_vcxproj
def ProjectExtension(self):
"""Returns the file extension for the project."""
return self.uses_vcxproj and '.vcxproj' or '.vcproj'
def Path(self):
"""Returns the path to Visual Studio installation."""
return self.path
def ToolPath(self, tool):
"""Returns the path to a given compiler tool. """
return os.path.normpath(os.path.join(self.path, "VC/bin", tool))
def DefaultToolset(self):
"""Returns the msbuild toolset version that will be used in the absence
of a user override."""
return self.default_toolset
def SetupScript(self, target_arch):
"""Returns a command (with arguments) to be used to set up the
environment."""
# Check if we are running in the SDK command line environment and use
# the setup script from the SDK if so. |target_arch| should be either
# 'x86' or 'x64'.
assert target_arch in ('x86', 'x64')
sdk_dir = os.environ.get('WindowsSDKDir')
if self.sdk_based and sdk_dir:
return [os.path.normpath(os.path.join(sdk_dir, 'Bin/SetEnv.Cmd')),
'/' + target_arch]
else:
# We don't use VC/vcvarsall.bat for x86 because vcvarsall calls
# vcvars32, which it can only find if VS??COMNTOOLS is set, which it
# isn't always.
if target_arch == 'x86':
if self.short_name >= '2013' and self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
# VS2013 and later, non-Express have a x64-x86 cross that we want
# to prefer.
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), 'amd64_x86']
# Otherwise, the standard x86 compiler.
return [os.path.normpath(
os.path.join(self.path, 'Common7/Tools/vsvars32.bat'))]
else:
assert target_arch == 'x64'
arg = 'x86_amd64'
# Use the 64-on-64 compiler if we're not using an express
# edition and we're running on a 64bit OS.
if self.short_name[-1] != 'e' and (
os.environ.get('PROCESSOR_ARCHITECTURE') == 'AMD64' or
os.environ.get('PROCESSOR_ARCHITEW6432') == 'AMD64'):
arg = 'amd64'
return [os.path.normpath(
os.path.join(self.path, 'VC/vcvarsall.bat')), arg]
def _RegistryQueryBase(sysdir, key, value):
"""Use reg.exe to read a particular key.
While ideally we might use the win32 module, we would like gyp to be
python neutral, so for instance cygwin python lacks this module.
Arguments:
sysdir: The system subdirectory to attempt to launch reg.exe from.
key: The registry key to read from.
value: The particular value to read.
Return:
stdout from reg.exe, or None for failure.
"""
# Skip if not on Windows or Python Win32 setup issue
if sys.platform not in ('win32', 'cygwin'):
return None
# Setup params to pass to and attempt to launch reg.exe
cmd = [os.path.join(os.environ.get('WINDIR', ''), sysdir, 'reg.exe'),
'query', key]
if value:
cmd.extend(['/v', value])
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Obtain the stdout from reg.exe, reading to the end so p.returncode is valid
# Note that the error text may be in [1] in some cases
text = p.communicate()[0]
# Check return code from reg.exe; officially 0==success and 1==error
if p.returncode:
return None
return text
def _RegistryQuery(key, value=None):
r"""Use reg.exe to read a particular key through _RegistryQueryBase.
First tries to launch from %WinDir%\Sysnative to avoid WoW64 redirection. If
that fails, it falls back to System32. Sysnative is available on Vista and
up and available on Windows Server 2003 and XP through KB patch 942589. Note
that Sysnative will always fail if using 64-bit python due to it being a
virtual directory and System32 will work correctly in the first place.
KB 942589 - http://support.microsoft.com/kb/942589/en-us.
Arguments:
key: The registry key.
value: The particular registry value to read (optional).
Return:
stdout from reg.exe, or None for failure.
"""
text = None
try:
text = _RegistryQueryBase('Sysnative', key, value)
except OSError, e:
if e.errno == errno.ENOENT:
text = _RegistryQueryBase('System32', key, value)
else:
raise
return text
def _RegistryGetValueUsingWinReg(key, value):
"""Use the _winreg module to obtain the value of a registry key.
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure. Throws
ImportError if _winreg is unavailable.
"""
import _winreg
try:
root, subkey = key.split('\\', 1)
assert root == 'HKLM' # Only need HKLM for now.
with _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey) as hkey:
return _winreg.QueryValueEx(hkey, value)[0]
except WindowsError:
return None
def _RegistryGetValue(key, value):
"""Use _winreg or reg.exe to obtain the value of a registry key.
Using _winreg is preferable because it solves an issue on some corporate
environments where access to reg.exe is locked down. However, we still need
to fallback to reg.exe for the case where the _winreg module is not available
(for example in cygwin python).
Args:
key: The registry key.
value: The particular registry value to read.
Return:
contents of the registry key's value, or None on failure.
"""
try:
return _RegistryGetValueUsingWinReg(key, value)
except ImportError:
pass
# Fallback to reg.exe if we fail to import _winreg.
text = _RegistryQuery(key, value)
if not text:
return None
# Extract value.
match = re.search(r'REG_\w+\s+([^\r]+)\r\n', text)
if not match:
return None
return match.group(1)
def _CreateVersion(name, path, sdk_based=False):
"""Sets up MSVS project generation.
Setup is based off the GYP_MSVS_VERSION environment variable or whatever is
autodetected if GYP_MSVS_VERSION is not explicitly specified. If a version is
passed in that doesn't match a value in versions python will throw a error.
"""
if path:
path = os.path.normpath(path)
versions = {
'2015': VisualStudioVersion('2015',
'Visual Studio 2015',
solution_version='12.00',
project_version='14.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v140'),
'2013': VisualStudioVersion('2013',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2013e': VisualStudioVersion('2013e',
'Visual Studio 2013',
solution_version='13.00',
project_version='12.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v120'),
'2012': VisualStudioVersion('2012',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2012e': VisualStudioVersion('2012e',
'Visual Studio 2012',
solution_version='12.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based,
default_toolset='v110'),
'2010': VisualStudioVersion('2010',
'Visual Studio 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=False,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2010e': VisualStudioVersion('2010e',
'Visual C++ Express 2010',
solution_version='11.00',
project_version='4.0',
flat_sln=True,
uses_vcxproj=True,
path=path,
sdk_based=sdk_based),
'2008': VisualStudioVersion('2008',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2008e': VisualStudioVersion('2008e',
'Visual Studio 2008',
solution_version='10.00',
project_version='9.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005': VisualStudioVersion('2005',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=False,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
'2005e': VisualStudioVersion('2005e',
'Visual Studio 2005',
solution_version='9.00',
project_version='8.00',
flat_sln=True,
uses_vcxproj=False,
path=path,
sdk_based=sdk_based),
}
return versions[str(name)]
def _ConvertToCygpath(path):
"""Convert to cygwin path if we are using cygwin."""
if sys.platform == 'cygwin':
p = subprocess.Popen(['cygpath', path], stdout=subprocess.PIPE)
path = p.communicate()[0].strip()
return path
def _DetectVisualStudioVersions(versions_to_check, force_express):
"""Collect the list of installed visual studio versions.
Returns:
A list of visual studio versions installed in descending order of
usage preference.
Base this on the registry and a quick check if devenv.exe exists.
Only versions 8-10 are considered.
Possibilities are:
2005(e) - Visual Studio 2005 (8)
2008(e) - Visual Studio 2008 (9)
2010(e) - Visual Studio 2010 (10)
2012(e) - Visual Studio 2012 (11)
2013(e) - Visual Studio 2013 (12)
2015 - Visual Studio 2015 (14)
Where (e) is e for express editions of MSVS and blank otherwise.
"""
version_to_year = {
'8.0': '2005',
'9.0': '2008',
'10.0': '2010',
'11.0': '2012',
'12.0': '2013',
'14.0': '2015',
}
versions = []
for version in versions_to_check:
# Old method of searching for which VS version is installed
# We don't use the 2010-encouraged-way because we also want to get the
# path to the binaries, which it doesn't offer.
keys = [r'HKLM\Software\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\%s' % version,
r'HKLM\Software\Microsoft\VCExpress\%s' % version,
r'HKLM\Software\Wow6432Node\Microsoft\VCExpress\%s' % version]
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], 'InstallDir')
if not path:
continue
path = _ConvertToCygpath(path)
# Check for full.
full_path = os.path.join(path, 'devenv.exe')
express_path = os.path.join(path, '*express.exe')
if not force_express and os.path.exists(full_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version],
os.path.join(path, '..', '..')))
# Check for express.
elif glob.glob(express_path):
# Add this one.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..', '..')))
# The old method above does not work when only SDK is installed.
keys = [r'HKLM\Software\Microsoft\VisualStudio\SxS\VC7',
r'HKLM\Software\Wow6432Node\Microsoft\VisualStudio\SxS\VC7']
for index in range(len(keys)):
path = _RegistryGetValue(keys[index], version)
if not path:
continue
path = _ConvertToCygpath(path)
if version != '14.0': # There is no Express edition for 2015.
versions.append(_CreateVersion(version_to_year[version] + 'e',
os.path.join(path, '..'), sdk_based=True))
return versions
def SelectVisualStudioVersion(version='auto', allow_fallback=True):
"""Select which version of Visual Studio projects to generate.
Arguments:
version: Hook to allow caller to force a particular version (vs auto).
Returns:
An object representing a visual studio project format version.
"""
# In auto mode, check environment variable for override.
if version == 'auto':
version = os.environ.get('GYP_MSVS_VERSION', 'auto')
version_map = {
'auto': ('14.0', '12.0', '10.0', '9.0', '8.0', '11.0'),
'2005': ('8.0',),
'2005e': ('8.0',),
'2008': ('9.0',),
'2008e': ('9.0',),
'2010': ('10.0',),
'2010e': ('10.0',),
'2012': ('11.0',),
'2012e': ('11.0',),
'2013': ('12.0',),
'2013e': ('12.0',),
'2015': ('14.0',),
}
override_path = os.environ.get('GYP_MSVS_OVERRIDE_PATH')
if override_path:
msvs_version = os.environ.get('GYP_MSVS_VERSION')
if not msvs_version:
raise ValueError('GYP_MSVS_OVERRIDE_PATH requires GYP_MSVS_VERSION to be '
'set to a particular version (e.g. 2010e).')
return _CreateVersion(msvs_version, override_path, sdk_based=True)
version = str(version)
versions = _DetectVisualStudioVersions(version_map[version], 'e' in version)
if not versions:
if not allow_fallback:
raise ValueError('Could not locate Visual Studio installation.')
if version == 'auto':
# Default to 2005 if we couldn't find anything
return _CreateVersion('2005', None)
else:
return _CreateVersion(version, None)
return versions[0]
| mit |
tjsavage/full_nonrel_starter | django/core/exceptions.py | 292 | 2767 | """
Global Django exception and warning classes.
"""
class DjangoRuntimeWarning(RuntimeWarning):
pass
class ObjectDoesNotExist(Exception):
"The requested object does not exist"
silent_variable_failure = True
class MultipleObjectsReturned(Exception):
"The query returned multiple objects when only one was expected."
pass
class SuspiciousOperation(Exception):
"The user did something suspicious"
pass
class PermissionDenied(Exception):
"The user did not have permission to do that"
pass
class ViewDoesNotExist(Exception):
"The requested view does not exist"
pass
class MiddlewareNotUsed(Exception):
"This middleware is not used in this server configuration"
pass
class ImproperlyConfigured(Exception):
"Django is somehow improperly configured"
pass
class FieldError(Exception):
"""Some kind of problem with a model field."""
pass
NON_FIELD_ERRORS = '__all__'
class ValidationError(Exception):
"""An error while validating data."""
def __init__(self, message, code=None, params=None):
import operator
from django.utils.encoding import force_unicode
"""
ValidationError can be passed any object that can be printed (usually
a string), a list of objects or a dictionary.
"""
if isinstance(message, dict):
self.message_dict = message
# Reduce each list of messages into a single list.
message = reduce(operator.add, message.values())
if isinstance(message, list):
self.messages = [force_unicode(msg) for msg in message]
else:
self.code = code
self.params = params
message = force_unicode(message)
self.messages = [message]
def __str__(self):
# This is needed because, without a __str__(), printing an exception
# instance would result in this:
# AttributeError: ValidationError instance has no attribute 'args'
# See http://www.python.org/doc/current/tut/node10.html#handling
if hasattr(self, 'message_dict'):
return repr(self.message_dict)
return repr(self.messages)
def __repr__(self):
if hasattr(self, 'message_dict'):
return 'ValidationError(%s)' % repr(self.message_dict)
return 'ValidationError(%s)' % repr(self.messages)
def update_error_dict(self, error_dict):
if hasattr(self, 'message_dict'):
if error_dict:
for k, v in self.message_dict.items():
error_dict.setdefault(k, []).extend(v)
else:
error_dict = self.message_dict
else:
error_dict[NON_FIELD_ERRORS] = self.messages
return error_dict
| bsd-3-clause |
RUedx/configuration | tests/test_mongodb_replica_set.py | 25 | 5499 | # Tests for mongodb_replica_set ansible module
#
# How to run these tests:
# 1. move this file to playbooks/library
# 2. rename mongodb_replica_set to mongodb_replica_set.py
# 3. python test_mongodb_replica_set.py
import mongodb_replica_set as mrs
import unittest, mock
from urllib import quote_plus
from copy import deepcopy
class TestNoPatchingMongodbReplicaSet(unittest.TestCase):
def test_host_port_transformation(self):
unfixed = {
'members': [
{'host': 'foo.bar'},
{'host': 'bar.baz', 'port': 1234},
{'host': 'baz.bing:54321'}
]}
fixed = {
'members': [
{'host': 'foo.bar:27017'},
{'host': 'bar.baz:1234'},
{'host': 'baz.bing:54321'}
]}
mrs.fix_host_port(unfixed)
self.assertEqual(fixed, unfixed)
fixed_2 = deepcopy(fixed)
mrs.fix_host_port(fixed_2)
self.assertEqual(fixed, fixed_2)
def test_member_id_managed(self):
new = [
{'host': 'foo.bar', '_id': 1},
{'host': 'bar.baz'},
{'host': 'baz.bing'}
]
old = [
{'host': 'baz.bing', '_id': 0}
]
fixed = deepcopy(new)
mrs.set_member_ids(fixed, old)
#test that each id is unique
unique_ids = {m['_id'] for m in fixed}
self.assertEqual(len(unique_ids), len(new))
#test that it "prefers" the "matching" one in old_members
self.assertEqual(fixed[0]['_id'], new[0]['_id'])
self.assertEqual(fixed[2]['_id'], old[0]['_id'])
self.assertIn('_id', fixed[1])
def test_mongo_uri_escaped(self):
host = username = password = auth_database = ':!@#$%/'
port = 1234
uri = mrs.get_mongo_uri(host=host, port=port, username=username, password=password, auth_database=auth_database)
self.assertEqual(uri, "mongodb://{un}:{pw}@{host}:{port}/{db}".format(
un=quote_plus(username), pw=quote_plus(password),
host=quote_plus(host), port=port, db=quote_plus(auth_database),
))
rs_id = 'a replset id'
members = [
{'host': 'foo.bar:1234'},
{'host': 'bar.baz:4321'},
]
old_rs_config = {
'version': 1,
'_id': rs_id,
'members': [
{'_id': 0, 'host': 'foo.bar:1234',},
{'_id': 1, 'host': 'bar.baz:4321',},
]
}
new_rs_config = {
'version': 2,
'_id': rs_id,
'members': [
{'_id': 0, 'host': 'foo.bar:1234',},
{'_id': 1, 'host': 'bar.baz:4321',},
{'_id': 2, 'host': 'baz.bing:27017',},
]
}
rs_config = {
'members': [
{'host': 'foo.bar', 'port': 1234,},
{'host': 'bar.baz', 'port': 4321,},
{'host': 'baz.bing', 'port': 27017,},
]
}
def init_replset_mock(f):
get_replset_initialize_mock = mock.patch.object(mrs, 'get_replset',
side_effect=(None, deepcopy(new_rs_config)))
initialize_replset_mock = mock.patch.object(mrs, 'initialize_replset')
return get_replset_initialize_mock(initialize_replset_mock(f))
def update_replset_mock(f):
get_replset_update_mock = mock.patch.object(mrs, 'get_replset',
side_effect=(deepcopy(old_rs_config), deepcopy(new_rs_config)))
reconfig_replset_mock = mock.patch.object(mrs, 'reconfig_replset')
return get_replset_update_mock(reconfig_replset_mock(f))
@mock.patch.object(mrs, 'get_rs_config_id', return_value=rs_id)
@mock.patch.object(mrs, 'client', create=True)
@mock.patch.object(mrs, 'module', create=True)
class TestPatchingMongodbReplicaSet(unittest.TestCase):
@update_replset_mock
def test_version_managed(self, _1, _2, module, *args):
# Version set automatically on initialize
mrs.update_replset(deepcopy(rs_config))
new_version = module.exit_json.call_args[1]['config']['version']
self.assertEqual(old_rs_config['version'], new_version - 1)
@init_replset_mock
def test_doc_id_managed_on_initialize(self, _1, _2, module, *args):
#old_rs_config provided by init_replset_mock via mrs.get_replset().
#That returns None on the first call, so it falls through to get_rs_config_id(),
#which is also mocked.
mrs.update_replset(deepcopy(rs_config))
new_id = module.exit_json.call_args[1]['config']['_id']
self.assertEqual(rs_id, new_id)
@update_replset_mock
def test_doc_id_managed_on_update(self, _1, _2, module, *args):
#old_rs_config provided by update_replset_mock via mrs.get_replset()
mrs.update_replset(deepcopy(rs_config))
new_id = module.exit_json.call_args[1]['config']['_id']
self.assertEqual(rs_id, new_id)
@init_replset_mock
def test_initialize_if_necessary(self, initialize_replset, _2, module, *args):
mrs.update_replset(deepcopy(rs_config))
self.assertTrue(initialize_replset.called)
#self.assertFalse(reconfig_replset.called)
@update_replset_mock
def test_reconfig_if_necessary(self, reconfig_replset, _2, module, *args):
mrs.update_replset(deepcopy(rs_config))
self.assertTrue(reconfig_replset.called)
#self.assertFalse(initialize_replset.called)
@update_replset_mock
def test_not_changed_when_docs_match(self, _1, _2, module, *args):
rs_config = {'members': members} #This way the docs "match", but aren't identical
mrs.update_replset(deepcopy(rs_config))
changed = module.exit_json.call_args[1]['changed']
self.assertFalse(changed)
@update_replset_mock
def test_ignores_magic_given_full_doc(self, _1, _2, module, _3, get_rs_config_id, *args):
mrs.update_replset(deepcopy(new_rs_config))
new_doc = module.exit_json.call_args[1]['config']
self.assertEqual(new_doc, new_rs_config)
self.assertFalse(get_rs_config_id.called)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
sudeepdutt/mic | tools/perf/scripts/python/syscall-counts-by-pid.py | 1996 | 2105 | # system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
huguesv/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/_bootlocale.py | 40 | 1801 | """A minimal subset of the locale module used at interpreter startup
(imported by the _io module), in order to reduce startup time.
Don't import directly from third-party code; use the `locale` module instead!
"""
import sys
import _locale
if sys.platform.startswith("win"):
def getpreferredencoding(do_setlocale=True):
if sys.flags.utf8_mode:
return 'UTF-8'
return _locale._getdefaultlocale()[1]
else:
try:
_locale.CODESET
except AttributeError:
if hasattr(sys, 'getandroidapilevel'):
# On Android langinfo.h and CODESET are missing, and UTF-8 is
# always used in mbstowcs() and wcstombs().
def getpreferredencoding(do_setlocale=True):
return 'UTF-8'
else:
def getpreferredencoding(do_setlocale=True):
if sys.flags.utf8_mode:
return 'UTF-8'
# This path for legacy systems needs the more complex
# getdefaultlocale() function, import the full locale module.
import locale
return locale.getpreferredencoding(do_setlocale)
else:
def getpreferredencoding(do_setlocale=True):
assert not do_setlocale
if sys.flags.utf8_mode:
return 'UTF-8'
result = _locale.nl_langinfo(_locale.CODESET)
if not result and sys.platform == 'darwin':
# nl_langinfo can return an empty string
# when the setting has an invalid value.
# Default to UTF-8 in that case because
# UTF-8 is the default charset on OSX and
# returning nothing will crash the
# interpreter.
result = 'UTF-8'
return result
| apache-2.0 |
mixja/eap-sim-lab | lib/pyscard-1.6.16/build/lib.macosx-10.10-x86_64-2.7/smartcard/CardConnection.py | 2 | 7754 | """The CardConnection abstract class manages connections with a card and
apdu transmission.
__author__ = "http://www.gemalto.com"
Copyright 2001-2012 gemalto
Author: Jean-Daniel Aussel, mailto:[email protected]
This file is part of pyscard.
pyscard is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
pyscard is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with pyscard; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from smartcard.CardConnectionEvent import CardConnectionEvent
from smartcard.Exceptions import SmartcardException
from smartcard.Observer import Observer
from smartcard.Observer import Observable
class CardConnection(Observable):
"""Card connection abstract class.
Known subclasses: smartcard.pcsc.PCSCCardConnection
"""
T0_protocol = 0x00000001
T1_protocol = 0x00000002
RAW_protocol = 0x00010000
T15_protocol = 0x00000008
def __init__(self, reader):
"""Construct a new card connection.
readerName: name of the reader in which the smartcard to connect
to is located.
"""
Observable.__init__(self)
self.reader = reader
self.errorcheckingchain = None
self.defaultprotocol = CardConnection.T0_protocol |\
CardConnection.T1_protocol
def __del__(self):
"""Connect to card."""
pass
def addSWExceptionToFilter(self, exClass):
"""Add a status word exception class to be filtered.
exClass: the class to filter, e.g.
smartcard.sw.SWException.WarningProcessingException
Filtered exceptions will not be raised when encountered in the
error checking chain."""
if None != self.errorcheckingchain:
self.errorcheckingchain[0].addFilterException(exClass)
def addObserver(self, observer):
"""Add a CardConnection observer."""
Observable.addObserver(self, observer)
def deleteObserver(self, observer):
"""Remove a CardConnection observer."""
Observable.deleteObserver(self, observer)
def connect(self, protocol=None, mode=None, disposition=None):
"""Connect to card.
protocol: a bit mask of the protocols to use, from
CardConnection.T0_protocol, CardConnection.T1_protocol,
CardConnection.RAW_protocol, CardConnection.T15_protocol
mode: passed as-is to the PC/SC layer
"""
Observable.setChanged(self)
Observable.notifyObservers(self, CardConnectionEvent('connect'))
def disconnect(self):
"""Disconnect from card."""
Observable.setChanged(self)
Observable.notifyObservers(self, CardConnectionEvent('disconnect'))
def getATR(self):
"""Return card ATR"""
pass
def getProtocol(self):
"""Return bit mask for the protocol of connection, or None if no
protocol set. The return value is a bit mask of
CardConnection.T0_protocol, CardConnection.T1_protocol,
CardConnection.RAW_protocol, CardConnection.T15_protocol
"""
return self.defaultprotocol
def getReader(self):
"""Return card connection reader"""
return self.reader
def setErrorCheckingChain(self, errorcheckingchain):
"""Add an error checking chain.
errorcheckingchain: a smartcard.sw.ErrorCheckingChain object The
error checking strategies in errorchecking chain will be tested
with each received response APDU, and a
smartcard.sw.SWException.SWException will be raised upon
error."""
self.errorcheckingchain = errorcheckingchain
def setProtocol(self, protocol):
"""Set protocol for card connection.
protocol: a bit mask of CardConnection.T0_protocol,
CardConnection.T1_protocol, CardConnection.RAW_protocol,
CardConnection.T15_protocol e.g.
setProtocol(CardConnection.T1_protocol |
CardConnection.T0_protocol) """
self.defaultprotocol = protocol
def transmit(self, bytes, protocol=None):
"""Transmit an apdu. Internally calls doTransmit() class method
and notify observers upon command/response APDU events.
Subclasses must override the doTransmit() class method.
bytes: list of bytes to transmit
protocol: the transmission protocol, from
CardConnection.T0_protocol,
CardConnection.T1_protocol, or
CardConnection.RAW_protocol
"""
Observable.setChanged(self)
Observable.notifyObservers(self,
CardConnectionEvent(
'command',
[bytes, protocol]))
data, sw1, sw2 = self.doTransmit(bytes, protocol)
Observable.setChanged(self)
Observable.notifyObservers(self,
CardConnectionEvent(
'response',
[data, sw1, sw2]))
if None != self.errorcheckingchain:
self.errorcheckingchain[0](data, sw1, sw2)
return data, sw1, sw2
def doTransmit(self, bytes, protocol):
"""Performs the command APDU transmission.
Subclasses must override this method for implementing apdu
transmission."""
pass
def control(self, controlCode, bytes=[]):
"""Send a control command and buffer. Internally calls doControl()
class method and notify observers upon command/response events.
Subclasses must override the doControl() class method.
controlCode: command code
bytes: list of bytes to transmit
"""
Observable.setChanged(self)
Observable.notifyObservers(self,
CardConnectionEvent(
'command',
[controlCode, bytes]))
data = self.doControl(controlCode, bytes)
Observable.setChanged(self)
Observable.notifyObservers(self,
CardConnectionEvent(
'response',
data))
if None != self.errorcheckingchain:
self.errorcheckingchain[0](data)
return data
def doControl(self, controlCode, bytes):
"""Performs the command control.
Subclasses must override this method for implementing control."""
pass
def getAttrib(self, attribId):
"""return the requested attribute
attribId: attribute id like SCARD_ATTR_VENDOR_NAME
"""
Observable.setChanged(self)
Observable.notifyObservers(self,
CardConnectionEvent(
'attrib',
[attribId]))
data = self.doGetAttrib(attribId)
if None != self.errorcheckingchain:
self.errorcheckingchain[0](data)
return data
def doGetAttrib(self, attribId):
"""Performs the command get attrib.
Subclasses must override this method for implementing get attrib."""
pass
| mit |
mcepl/youtube-dl | youtube_dl/extractor/charlierose.py | 12 | 1709 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import remove_end
class CharlieRoseIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?charlierose\.com/video(?:s|/player)/(?P<id>\d+)'
_TESTS = [{
'url': 'https://charlierose.com/videos/27996',
'md5': 'fda41d49e67d4ce7c2411fd2c4702e09',
'info_dict': {
'id': '27996',
'ext': 'mp4',
'title': 'Remembering Zaha Hadid',
'thumbnail': 're:^https?://.*\.jpg\?\d+',
'description': 'We revisit past conversations with Zaha Hadid, in memory of the world renowned Iraqi architect.',
'subtitles': {
'en': [{
'ext': 'vtt',
}],
},
},
}, {
'url': 'https://charlierose.com/videos/27996',
'only_matching': True,
}]
_PLAYER_BASE = 'https://charlierose.com/video/player/%s'
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(self._PLAYER_BASE % video_id, video_id)
title = remove_end(self._og_search_title(webpage), ' - Charlie Rose')
info_dict = self._parse_html5_media_entries(
self._PLAYER_BASE % video_id, webpage, video_id,
m3u8_entry_protocol='m3u8_native')[0]
self._sort_formats(info_dict['formats'])
self._remove_duplicate_formats(info_dict['formats'])
info_dict.update({
'id': video_id,
'title': title,
'thumbnail': self._og_search_thumbnail(webpage),
'description': self._og_search_description(webpage),
})
return info_dict
| unlicense |
ttfseiko/openerp-trunk | openerp/addons/resource/faces/observer.py | 433 | 2328 | #@+leo-ver=4
#@+node:@file observer.py
#@@language python
#@<< Copyright >>
#@+node:<< Copyright >>
############################################################################
# Copyright (C) 2005, 2006, 2007, 2008 by Reithinger GmbH
# [email protected]
#
# This file is part of faces.
#
# faces is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# faces is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the
# Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
############################################################################
#@-node:<< Copyright >>
#@nl
"""
This module contains the base class for all observer objects
"""
#@<< Imports >>
#@+node:<< Imports >>
#@-node:<< Imports >>
#@nl
_is_source_ = True
#@+others
#@+node:class Observer
class Observer(object):
"""
Base Class for all charts and reports.
@var visible: Specifies if the observer is visible
at the navigation bar inside the gui.
@var link_view: syncronizes the marked objects in all views.
"""
#@ << declarations >>
#@+node:<< declarations >>
__type_name__ = None
__type_image__ = None
visible = True
link_view = True
__attrib_completions__ = { "visible" : 'visible = False',
"link_view" : "link_view = False" }
#@-node:<< declarations >>
#@nl
#@ @+others
#@+node:register_editors
def register_editors(cls, registry):
pass
register_editors = classmethod(register_editors)
#@-node:register_editors
#@-others
#@-node:class Observer
#@-others
factories = { }
clear_cache_funcs = {}
#@-node:@file observer.py
#@-leo
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ax003d/openerp | openerp/addons/plugin_thunderbird/plugin_thunderbird.py | 92 | 2075 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class plugin_thunderbird_installer(osv.osv_memory):
_name = 'plugin_thunderbird.installer'
_inherit = 'res.config.installer'
_columns = {
'thunderbird': fields.boolean('Thunderbird Plug-in', help="Allows you to select an object that you would like to add to your email and its attachments."),
'plugin_name': fields.char('File name', size=64),
'plugin_file': fields.char('Thunderbird Plug-in', size=256, readonly=True, help="Thunderbird plug-in file. Save this file and install it in Thunderbird."),
}
_defaults = {
'thunderbird': True,
'plugin_name': 'openerp_plugin.xpi',
}
def default_get(self, cr, uid, fields, context=None):
res = super(plugin_thunderbird_installer, self).default_get(cr, uid, fields, context)
base_url = self.pool.get('ir.config_parameter').get_param(cr, uid, 'web.base.url')
res['plugin_file'] = base_url + '/plugin_thunderbird/static/openerp_plugin.xpi'
return res
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
qmagico/sampleappqm | src/django/db/backends/postgresql_psycopg2/introspection.py | 82 | 4121 | from django.db.backends import BaseDatabaseIntrospection
class DatabaseIntrospection(BaseDatabaseIntrospection):
# Maps type codes to Django Field types.
data_types_reverse = {
16: 'BooleanField',
20: 'BigIntegerField',
21: 'SmallIntegerField',
23: 'IntegerField',
25: 'TextField',
700: 'FloatField',
701: 'FloatField',
869: 'GenericIPAddressField',
1042: 'CharField', # blank-padded
1043: 'CharField',
1082: 'DateField',
1083: 'TimeField',
1114: 'DateTimeField',
1184: 'DateTimeField',
1266: 'TimeField',
1700: 'DecimalField',
}
def get_table_list(self, cursor):
"Returns a list of table names in the current database."
cursor.execute("""
SELECT c.relname
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'v', '')
AND n.nspname NOT IN ('pg_catalog', 'pg_toast')
AND pg_catalog.pg_table_is_visible(c.oid)""")
return [row[0] for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
# As cursor.description does not return reliably the nullable property,
# we have to query the information_schema (#7783)
cursor.execute("""
SELECT column_name, is_nullable
FROM information_schema.columns
WHERE table_name = %s""", [table_name])
null_map = dict(cursor.fetchall())
cursor.execute("SELECT * FROM %s LIMIT 1" % self.connection.ops.quote_name(table_name))
return [tuple([item for item in line[:6]] + [null_map[line[0]]==u'YES'])
for line in cursor.description]
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_index: (field_index_other_table, other_table)}
representing all relationships to the given table. Indexes are 0-based.
"""
cursor.execute("""
SELECT con.conkey, con.confkey, c2.relname
FROM pg_constraint con, pg_class c1, pg_class c2
WHERE c1.oid = con.conrelid
AND c2.oid = con.confrelid
AND c1.relname = %s
AND con.contype = 'f'""", [table_name])
relations = {}
for row in cursor.fetchall():
# row[0] and row[1] are single-item lists, so grab the single item.
relations[row[0][0] - 1] = (row[1][0] - 1, row[2])
return relations
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of fieldname -> infodict for the given table,
where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
"""
# This query retrieves each index on the given table, including the
# first associated field name
cursor.execute("""
SELECT attr.attname, idx.indkey, idx.indisunique, idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx, pg_catalog.pg_attribute attr
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND attr.attrelid = c.oid
AND attr.attnum = idx.indkey[0]
AND c.relname = %s""", [table_name])
indexes = {}
for row in cursor.fetchall():
# row[1] (idx.indkey) is stored in the DB as an array. It comes out as
# a string of space-separated integers. This designates the field
# indexes (1-based) of the fields that have indexes on the table.
# Here, we skip any indexes across multiple fields.
if ' ' in row[1]:
continue
indexes[row[0]] = {'primary_key': row[3], 'unique': row[2]}
return indexes
| mit |
MattDevo/edk2 | AppPkg/Applications/Python/Python-2.7.2/Lib/multifile.py | 67 | 4982 | """A readline()-style interface to the parts of a multipart message.
The MultiFile class makes each part of a multipart message "feel" like
an ordinary file, as long as you use fp.readline(). Allows recursive
use, for nested multipart messages. Probably best used together
with module mimetools.
Suggested use:
real_fp = open(...)
fp = MultiFile(real_fp)
"read some lines from fp"
fp.push(separator)
while 1:
"read lines from fp until it returns an empty string" (A)
if not fp.next(): break
fp.pop()
"read remaining lines from fp until it returns an empty string"
The latter sequence may be used recursively at (A).
It is also allowed to use multiple push()...pop() sequences.
If seekable is given as 0, the class code will not do the bookkeeping
it normally attempts in order to make seeks relative to the beginning of the
current file part. This may be useful when using MultiFile with a non-
seekable stream object.
"""
from warnings import warn
warn("the multifile module has been deprecated since Python 2.5",
DeprecationWarning, stacklevel=2)
del warn
__all__ = ["MultiFile","Error"]
class Error(Exception):
pass
class MultiFile:
seekable = 0
def __init__(self, fp, seekable=1):
self.fp = fp
self.stack = []
self.level = 0
self.last = 0
if seekable:
self.seekable = 1
self.start = self.fp.tell()
self.posstack = []
def tell(self):
if self.level > 0:
return self.lastpos
return self.fp.tell() - self.start
def seek(self, pos, whence=0):
here = self.tell()
if whence:
if whence == 1:
pos = pos + here
elif whence == 2:
if self.level > 0:
pos = pos + self.lastpos
else:
raise Error, "can't use whence=2 yet"
if not 0 <= pos <= here or \
self.level > 0 and pos > self.lastpos:
raise Error, 'bad MultiFile.seek() call'
self.fp.seek(pos + self.start)
self.level = 0
self.last = 0
def readline(self):
if self.level > 0:
return ''
line = self.fp.readline()
# Real EOF?
if not line:
self.level = len(self.stack)
self.last = (self.level > 0)
if self.last:
raise Error, 'sudden EOF in MultiFile.readline()'
return ''
assert self.level == 0
# Fast check to see if this is just data
if self.is_data(line):
return line
else:
# Ignore trailing whitespace on marker lines
marker = line.rstrip()
# No? OK, try to match a boundary.
# Return the line (unstripped) if we don't.
for i, sep in enumerate(reversed(self.stack)):
if marker == self.section_divider(sep):
self.last = 0
break
elif marker == self.end_marker(sep):
self.last = 1
break
else:
return line
# We only get here if we see a section divider or EOM line
if self.seekable:
self.lastpos = self.tell() - len(line)
self.level = i+1
if self.level > 1:
raise Error,'Missing endmarker in MultiFile.readline()'
return ''
def readlines(self):
list = []
while 1:
line = self.readline()
if not line: break
list.append(line)
return list
def read(self): # Note: no size argument -- read until EOF only!
return ''.join(self.readlines())
def next(self):
while self.readline(): pass
if self.level > 1 or self.last:
return 0
self.level = 0
self.last = 0
if self.seekable:
self.start = self.fp.tell()
return 1
def push(self, sep):
if self.level > 0:
raise Error, 'bad MultiFile.push() call'
self.stack.append(sep)
if self.seekable:
self.posstack.append(self.start)
self.start = self.fp.tell()
def pop(self):
if self.stack == []:
raise Error, 'bad MultiFile.pop() call'
if self.level <= 1:
self.last = 0
else:
abslastpos = self.lastpos + self.start
self.level = max(0, self.level - 1)
self.stack.pop()
if self.seekable:
self.start = self.posstack.pop()
if self.level > 0:
self.lastpos = abslastpos - self.start
def is_data(self, line):
return line[:2] != '--'
def section_divider(self, str):
return "--" + str
def end_marker(self, str):
return "--" + str + "--"
| bsd-2-clause |
kbdick/RecycleTracker | recyclecollector/scrap/gdata-2.0.18/samples/apps/marketplace_sample/gdata/tlslite/BaseDB.py | 238 | 3508 | """Base class for SharedKeyDB and VerifierDB."""
import anydbm
import thread
class BaseDB:
def __init__(self, filename, type):
self.type = type
self.filename = filename
if self.filename:
self.db = None
else:
self.db = {}
self.lock = thread.allocate_lock()
def create(self):
"""Create a new on-disk database.
@raise anydbm.error: If there's a problem creating the database.
"""
if self.filename:
self.db = anydbm.open(self.filename, "n") #raises anydbm.error
self.db["--Reserved--type"] = self.type
self.db.sync()
else:
self.db = {}
def open(self):
"""Open a pre-existing on-disk database.
@raise anydbm.error: If there's a problem opening the database.
@raise ValueError: If the database is not of the right type.
"""
if not self.filename:
raise ValueError("Can only open on-disk databases")
self.db = anydbm.open(self.filename, "w") #raises anydbm.error
try:
if self.db["--Reserved--type"] != self.type:
raise ValueError("Not a %s database" % self.type)
except KeyError:
raise ValueError("Not a recognized database")
def __getitem__(self, username):
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
valueStr = self.db[username]
finally:
self.lock.release()
return self._getItem(username, valueStr)
def __setitem__(self, username, value):
if self.db == None:
raise AssertionError("DB not open")
valueStr = self._setItem(username, value)
self.lock.acquire()
try:
self.db[username] = valueStr
if self.filename:
self.db.sync()
finally:
self.lock.release()
def __delitem__(self, username):
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
del(self.db[username])
if self.filename:
self.db.sync()
finally:
self.lock.release()
def __contains__(self, username):
"""Check if the database contains the specified username.
@type username: str
@param username: The username to check for.
@rtype: bool
@return: True if the database contains the username, False
otherwise.
"""
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
return self.db.has_key(username)
finally:
self.lock.release()
def check(self, username, param):
value = self.__getitem__(username)
return self._checkItem(value, username, param)
def keys(self):
"""Return a list of usernames in the database.
@rtype: list
@return: The usernames in the database.
"""
if self.db == None:
raise AssertionError("DB not open")
self.lock.acquire()
try:
usernames = self.db.keys()
finally:
self.lock.release()
usernames = [u for u in usernames if not u.startswith("--Reserved--")]
return usernames | gpl-3.0 |
dunkhong/grr | grr/server/grr_response_server/gui/api_plugins/artifact_test.py | 2 | 4766 | #!/usr/bin/env python
"""This modules contains tests for artifact API handler."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import io
import os
from absl import app
from grr_response_core import config
from grr_response_core.lib.rdfvalues import artifacts as rdf_artifacts
from grr_response_server import artifact
from grr_response_server.gui import api_test_lib
from grr_response_server.gui.api_plugins import artifact as artifact_plugin
from grr.test_lib import artifact_test_lib
from grr.test_lib import db_test_lib
from grr.test_lib import flow_test_lib
from grr.test_lib import test_lib
@db_test_lib.TestDatabases()
class ApiListArtifactsHandlerTest(flow_test_lib.FlowTestsBaseclass):
"""Test for ApiListArtifactsHandler."""
def setUp(self):
super(ApiListArtifactsHandlerTest, self).setUp()
self.handler = artifact_plugin.ApiListArtifactsHandler()
@artifact_test_lib.PatchCleanArtifactRegistry
def testNoArtifacts(self, _):
result = self.handler.Handle(self.handler.args_type(), token=self.token)
self.assertEqual(result.total_count, 0)
self.assertEqual(result.items, [])
@artifact_test_lib.PatchDefaultArtifactRegistry
def testPrepackagedArtifacts(self, registry):
test_artifacts_file = os.path.join(config.CONFIG["Test.data_dir"],
"artifacts", "test_artifacts.json")
registry.AddFileSource(test_artifacts_file)
result = self.handler.Handle(self.handler.args_type(), token=self.token)
# Some artifacts are guaranteed to be returned, as they're defined in
# the test_data/artifacts/test_artifacts.json.
self.assertTrue(result.total_count)
# Check that FakeArtifact artifact exists. It's guaranteed to exist, since
# it's defined in test_data/artifacts/test_artifacts.json.
for item in result.items:
if item.artifact.name == "FakeArtifact":
fake_artifact = item
self.assertTrue(fake_artifact)
self.assertTrue(fake_artifact.HasField("is_custom"))
self.assertFalse(fake_artifact.is_custom)
self.assertTrue(fake_artifact.artifact.doc)
self.assertTrue(fake_artifact.artifact.labels)
self.assertTrue(fake_artifact.artifact.supported_os)
@db_test_lib.TestDatabases()
class ApiUploadArtifactHandlerTest(api_test_lib.ApiCallHandlerTest):
def setUp(self):
super(ApiUploadArtifactHandlerTest, self).setUp()
self.handler = artifact_plugin.ApiUploadArtifactHandler()
@artifact_test_lib.PatchCleanArtifactRegistry
def testUpload(self, registry):
test_artifacts_file = os.path.join(config.CONFIG["Test.data_dir"],
"artifacts", "test_artifact.json")
with open(test_artifacts_file, "rb") as fd:
args = self.handler.args_type(artifact=fd.read())
with self.assertRaises(rdf_artifacts.ArtifactNotRegisteredError):
registry.GetArtifact("TestDrivers")
self.handler.Handle(args, token=self.token)
registry.GetArtifact("TestDrivers")
@db_test_lib.TestDatabases()
@artifact_test_lib.PatchDefaultArtifactRegistry
class ApiDeleteArtifactsHandlerTest(api_test_lib.ApiCallHandlerTest):
def setUp(self):
super(ApiDeleteArtifactsHandlerTest, self).setUp()
self.handler = artifact_plugin.ApiDeleteArtifactsHandler()
def UploadTestArtifacts(self):
test_artifacts_file = os.path.join(config.CONFIG["Test.data_dir"],
"artifacts", "test_artifacts.json")
with io.open(test_artifacts_file, mode="r", encoding="utf-8") as fd:
artifact.UploadArtifactYamlFile(fd.read())
def testDeletesArtifactsWithSpecifiedNames(self, registry):
self.UploadTestArtifacts()
count = len(registry.GetArtifacts(reload_datastore_artifacts=True))
args = self.handler.args_type(
names=["TestFilesArtifact", "WMIActiveScriptEventConsumer"])
self.handler.Handle(args, token=self.token)
new_count = len(registry.GetArtifacts())
# Check that we deleted exactly 2 artifacts.
self.assertEqual(new_count, count - 2)
def testDeleteDependency(self, registry):
self.UploadTestArtifacts()
args = self.handler.args_type(names=["TestAggregationArtifact"])
with self.assertRaises(ValueError):
self.handler.Handle(args, token=self.token)
def testDeleteNonExistentArtifact(self, registry):
self.UploadTestArtifacts()
args = self.handler.args_type(names=["NonExistentArtifact"])
e = self.assertRaises(ValueError)
with e:
self.handler.Handle(args, token=self.token)
self.assertEqual(
str(e.exception),
"Artifact(s) to delete (NonExistentArtifact) not found.")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| apache-2.0 |
ngageoint/voxel-globe | voxel_globe/tiepoint_registration/views.py | 2 | 2848 | from django.shortcuts import render
from django.http import HttpResponse
from django.template import RequestContext, loader
def tiepoint_registration_1(request):
from voxel_globe.meta import models
image_set_list = models.ImageSet.objects.all()
return render(request, 'tiepoint_registration/html/tiepoint_registration_1.html',
{'image_set_list':image_set_list})
def tiepoint_registration_2(request, image_set_id):
from voxel_globe.meta import models
camera_set_list = models.ImageSet.objects.get(id=image_set_id).cameras.all()
return render(request, 'tiepoint_registration/html/tiepoint_registration_2.html',
{'camera_set_list':camera_set_list,
'image_set_id':image_set_id})
def tiepoint_registration_3(request, image_set_id, camera_set_id):
from voxel_globe.tiepoint_registration import tasks
image_set_id = int(image_set_id)
t = tasks.tiepoint_registration.apply_async(args=(image_set_id,camera_set_id), user=request.user)
return render(request, 'tiepoint_registration/html/tiepoint_registration_3.html',
{'task_id': t.task_id})
def tiepoint_error_1(request):
from voxel_globe.meta import models
image_set_list = models.ImageSet.objects.all()
return render(request, 'tiepoint_registration/html/tiepoint_error_1.html',
{'image_set_list':image_set_list})
def tiepoint_error_2(request, image_set_id):
from voxel_globe.meta import models
camera_set_list = models.ImageSet.objects.get(id=image_set_id).cameras.all()
return render(request, 'tiepoint_registration/html/tiepoint_error_2.html',
{'camera_set_list':camera_set_list,
'image_set_id':image_set_id})
def tiepoint_error_3(request, image_set_id, camera_set_id):
from voxel_globe.meta import models
scene_list = models.Scene.objects.all()
return render(request, 'tiepoint_registration/html/tiepoint_error_3.html',
{'scene_list':scene_list,
'camera_set_id':camera_set_id,
'image_set_id':image_set_id})
def tiepoint_error_4(request, image_set_id, camera_set_id, scene_id):
from voxel_globe.tiepoint_registration import tasks
image_set_id = int(image_set_id)
t = tasks.tiepoint_error_calculation.apply_async(args=(image_set_id,
camera_set_id,
scene_id),
user=request.user)
return render(request, 'tiepoint_registration/html/tiepoint_error_4.html',
{'task_id': t.task_id})
def order_status(request, task_id):
from celery.result import AsyncResult
task = AsyncResult(task_id)
return render(request, 'task/html/task_3d_error_results.html',
{'task': task}) | mit |
freeflightsim/fg-flying-club | google_appengine/lib/webob/tests/test_response.py | 32 | 1407 | from webob import *
def simple_app(environ, start_response):
start_response('200 OK', [
('Content-Type', 'text/html; charset=utf8'),
])
return ['OK']
def test_response():
req = Request.blank('/')
res = req.get_response(simple_app)
assert res.status == '200 OK'
assert res.status_int == 200
assert res.body == "OK"
assert res.charset == 'utf8'
assert res.content_type == 'text/html'
res.status = 404
assert res.status == '404 Not Found'
assert res.status_int == 404
res.body = 'Not OK'
assert ''.join(res.app_iter) == 'Not OK'
res.charset = 'iso8859-1'
assert res.headers['content-type'] == 'text/html; charset=iso8859-1'
res.content_type = 'text/xml'
assert res.headers['content-type'] == 'text/xml; charset=iso8859-1'
res.headers = {'content-type': 'text/html'}
assert res.headers['content-type'] == 'text/html'
assert res.headerlist == [('content-type', 'text/html')]
res.set_cookie('x', 'y')
assert res.headers['set-cookie'].strip(';') == 'x=y; Path=/'
res = Response('a body', '200 OK', content_type='text/html')
res.encode_content()
assert res.content_encoding == 'gzip'
assert res.body == '\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xffKTH\xcaO\xa9\x04\x00\xf6\x86GI\x06\x00\x00\x00'
res.decode_content()
assert res.content_encoding is None
assert res.body == 'a body'
| gpl-2.0 |
wreckJ/intellij-community | python/helpers/pydev/pydevd_console.py | 42 | 7593 | '''An helper file for the pydev debugger (REPL) console
'''
from code import InteractiveConsole
import sys
import traceback
import _pydev_completer
from pydevd_tracing import GetExceptionTracebackStr
from pydevd_vars import makeValidXmlValue
from pydev_imports import Exec
from pydevd_io import IOBuf
from pydev_console_utils import BaseInterpreterInterface, BaseStdIn
from pydev_override import overrides
import pydevd_save_locals
CONSOLE_OUTPUT = "output"
CONSOLE_ERROR = "error"
#=======================================================================================================================
# ConsoleMessage
#=======================================================================================================================
class ConsoleMessage:
"""Console Messages
"""
def __init__(self):
self.more = False
# List of tuple [('error', 'error_message'), ('message_list', 'output_message')]
self.console_messages = []
def add_console_message(self, message_type, message):
"""add messages in the console_messages list
"""
for m in message.split("\n"):
if m.strip():
self.console_messages.append((message_type, m))
def update_more(self, more):
"""more is set to true if further input is required from the user
else more is set to false
"""
self.more = more
def toXML(self):
"""Create an XML for console message_list, error and more (true/false)
<xml>
<message_list>console message_list</message_list>
<error>console error</error>
<more>true/false</more>
</xml>
"""
makeValid = makeValidXmlValue
xml = '<xml><more>%s</more>' % (self.more)
for message_type, message in self.console_messages:
xml += '<%s message="%s"></%s>' % (message_type, makeValid(message), message_type)
xml += '</xml>'
return xml
#=======================================================================================================================
# DebugConsoleStdIn
#=======================================================================================================================
class DebugConsoleStdIn(BaseStdIn):
overrides(BaseStdIn.readline)
def readline(self, *args, **kwargs):
sys.stderr.write('Warning: Reading from stdin is still not supported in this console.\n')
return '\n'
#=======================================================================================================================
# DebugConsole
#=======================================================================================================================
class DebugConsole(InteractiveConsole, BaseInterpreterInterface):
"""Wrapper around code.InteractiveConsole, in order to send
errors and outputs to the debug console
"""
overrides(BaseInterpreterInterface.createStdIn)
def createStdIn(self):
return DebugConsoleStdIn() #For now, raw_input is not supported in this console.
overrides(InteractiveConsole.push)
def push(self, line, frame):
"""Change built-in stdout and stderr methods by the
new custom StdMessage.
execute the InteractiveConsole.push.
Change the stdout and stderr back be the original built-ins
Return boolean (True if more input is required else False),
output_messages and input_messages
"""
more = False
original_stdout = sys.stdout
original_stderr = sys.stderr
try:
try:
self.frame = frame
out = sys.stdout = IOBuf()
err = sys.stderr = IOBuf()
more = self.addExec(line)
except Exception:
exc = GetExceptionTracebackStr()
err.buflist.append("Internal Error: %s" % (exc,))
finally:
#Remove frame references.
self.frame = None
frame = None
sys.stdout = original_stdout
sys.stderr = original_stderr
return more, out.buflist, err.buflist
overrides(BaseInterpreterInterface.doAddExec)
def doAddExec(self, line):
return InteractiveConsole.push(self, line)
overrides(InteractiveConsole.runcode)
def runcode(self, code):
"""Execute a code object.
When an exception occurs, self.showtraceback() is called to
display a traceback. All exceptions are caught except
SystemExit, which is reraised.
A note about KeyboardInterrupt: this exception may occur
elsewhere in this code, and may not always be caught. The
caller should be prepared to deal with it.
"""
try:
Exec(code, self.frame.f_globals, self.frame.f_locals)
pydevd_save_locals.save_locals(self.frame)
except SystemExit:
raise
except:
self.showtraceback()
#=======================================================================================================================
# InteractiveConsoleCache
#=======================================================================================================================
class InteractiveConsoleCache:
thread_id = None
frame_id = None
interactive_console_instance = None
#Note: On Jython 2.1 we can't use classmethod or staticmethod, so, just make the functions below free-functions.
def get_interactive_console(thread_id, frame_id, frame, console_message):
"""returns the global interactive console.
interactive console should have been initialized by this time
"""
if InteractiveConsoleCache.thread_id == thread_id and InteractiveConsoleCache.frame_id == frame_id:
return InteractiveConsoleCache.interactive_console_instance
InteractiveConsoleCache.interactive_console_instance = DebugConsole()
InteractiveConsoleCache.thread_id = thread_id
InteractiveConsoleCache.frame_id = frame_id
console_stacktrace = traceback.extract_stack(frame, limit=1)
if console_stacktrace:
current_context = console_stacktrace[0] # top entry from stacktrace
context_message = 'File "%s", line %s, in %s' % (current_context[0], current_context[1], current_context[2])
console_message.add_console_message(CONSOLE_OUTPUT, "[Current context]: %s" % (context_message,))
return InteractiveConsoleCache.interactive_console_instance
def clear_interactive_console():
InteractiveConsoleCache.thread_id = None
InteractiveConsoleCache.frame_id = None
InteractiveConsoleCache.interactive_console_instance = None
def execute_console_command(frame, thread_id, frame_id, line):
"""fetch an interactive console instance from the cache and
push the received command to the console.
create and return an instance of console_message
"""
console_message = ConsoleMessage()
interpreter = get_interactive_console(thread_id, frame_id, frame, console_message)
more, output_messages, error_messages = interpreter.push(line, frame)
console_message.update_more(more)
for message in output_messages:
console_message.add_console_message(CONSOLE_OUTPUT, message)
for message in error_messages:
console_message.add_console_message(CONSOLE_ERROR, message)
return console_message
def get_completions(frame, act_tok):
""" fetch all completions, create xml for the same
return the completions xml
"""
return _pydev_completer.GenerateCompletionsAsXML(frame, act_tok)
| apache-2.0 |
aringh/odl | odl/contrib/solvers/spdhg/misc.py | 1 | 22813 | # Copyright 2014-2018 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Functions for folders and files."""
from __future__ import print_function
from builtins import super
import numpy as np
import odl
import scipy.signal
import matplotlib
import matplotlib.pyplot as plt
from skimage.io import imsave
__all__ = ('total_variation', 'TotalVariationNonNegative', 'bregman',
'save_image', 'save_signal', 'divide_1Darray_equally', 'Blur2D',
'KullbackLeiblerSmooth')
def save_image(image, name, folder, fignum, cmap='gray', clim=None):
matplotlib.rc('text', usetex=False)
fig = plt.figure(fignum)
plt.clf()
image.show(name, cmap=cmap, fig=fig)
fig.savefig('{}/{}_fig.png'.format(folder, name), bbox_inches='tight')
if clim is None:
x = image - np.min(image)
if np.max(x) > 1e-4:
x /= np.max(x)
else:
x = (image - clim[0]) / (clim[1] - clim[0])
x = np.minimum(np.maximum(x, 0), 1)
imsave('{}/{}.png'.format(folder, name), np.rot90(x, 1))
def save_signal(signal, name, folder, fignum):
matplotlib.rc('text', usetex=False)
fig = plt.figure(fignum)
plt.clf()
signal.show(name, fig=fig)
fig.savefig('{}/{}_fig.png'.format(folder, name), bbox_inches='tight')
def bregman(f, v, subgrad):
return (odl.solvers.FunctionalQuadraticPerturb(f, linear_term=-subgrad) -
f(v) + subgrad.inner(v))
def partition_1d(arr, slices):
return tuple(arr[slc] for slc in slices)
def partition_equally_1d(arr, nparts, order='interlaced'):
if order == 'block':
stride = int(np.ceil(arr.size / nparts))
slc_list = [slice(i * stride, (i + 1) * stride) for i in range(nparts)]
elif order == 'interlaced':
slc_list = [slice(i, len(arr), nparts) for i in range(nparts)]
else:
raise ValueError
return partition_1d(arr, slc_list)
def divide_1Darray_equally(ind, nsub):
"""Divide an array into equal chunks to be used for instance in OSEM.
Parameters
----------
ind : ndarray
input array
nsubsets : int
number of subsets to be divided into
Returns
-------
sub2ind : list
list of indices for each subset
ind2sub : list
list of subsets for each index
"""
n_ind = len(ind)
sub2ind = partition_equally_1d(ind, nsub, order='interlaced')
ind2sub = []
for i in range(n_ind):
ind2sub.append([])
for i in range(nsub):
for j in sub2ind[i]:
ind2sub[j].append(i)
return (sub2ind, ind2sub)
def total_variation(domain, grad=None):
"""Total variation functional.
Parameters
----------
domain : odlspace
domain of TV functional
grad : gradient operator, optional
Gradient operator of the total variation functional. This may be any
linear operator and thereby generalizing TV. default=forward
differences with Neumann boundary conditions
Examples
--------
Check that the total variation of a constant is zero
>>> import odl.contrib.spdhg as spdhg, odl
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tv = spdhg.total_variation(space)
>>> x = space.one()
>>> tv(x) < 1e-10
"""
if grad is None:
grad = odl.Gradient(domain, method='forward', pad_mode='symmetric')
grad.norm = 2 * np.sqrt(sum(1 / grad.domain.cell_sides**2))
else:
grad = grad
f = odl.solvers.GroupL1Norm(grad.range, exponent=2)
return f * grad
class TotalVariationNonNegative(odl.solvers.Functional):
"""Total variation function with nonnegativity constraint and strongly
convex relaxation.
In formulas, this functional may represent
alpha * |grad x|_1 + char_fun(x) + beta/2 |x|^2_2
with regularization parameter alpha and strong convexity beta. In addition,
the nonnegativity constraint is achieved with the characteristic function
char_fun(x) = 0 if x >= 0 and infty else.
Parameters
----------
domain : odlspace
domain of TV functional
alpha : scalar, optional
Regularization parameter, positive
prox_options : dict, optional
name: string, optional
name of the method to perform the prox operator, default=FGP
warmstart: boolean, optional
Do you want a warm start, i.e. start with the dual variable
from the last call? default=True
niter: int, optional
number of iterations per call, default=5
p: array, optional
initial dual variable, default=zeros
grad : gradient operator, optional
Gradient operator to be used within the total variation functional.
default=see TV
"""
def __init__(self, domain, alpha=1, prox_options={}, grad=None,
strong_convexity=0):
"""
"""
self.strong_convexity = strong_convexity
if 'name' not in prox_options:
prox_options['name'] = 'FGP'
if 'warmstart' not in prox_options:
prox_options['warmstart'] = True
if 'niter' not in prox_options:
prox_options['niter'] = 5
if 'p' not in prox_options:
prox_options['p'] = None
if 'tol' not in prox_options:
prox_options['tol'] = None
self.prox_options = prox_options
self.alpha = alpha
self.tv = total_variation(domain, grad=grad)
self.grad = self.tv.right
self.nn = odl.solvers.IndicatorBox(domain, 0, np.inf)
self.l2 = 0.5 * odl.solvers.L2NormSquared(domain)
self.proj_P = self.tv.left.convex_conj.proximal(0)
self.proj_C = self.nn.proximal(1)
super().__init__(space=domain, linear=False, grad_lipschitz=0)
def __call__(self, x):
"""Evaluate functional.
Examples
--------
Check that the total variation of a constant is zero
>>> import odl.contrib.spdhg as spdhg, odl
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = space.one()
>>> tvnn(x) < 1e-10
Check that negative functions are mapped to infty
>>> import odl.contrib.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> np.isinf(tvnn(x))
"""
nn = self.nn(x)
if nn is np.inf:
return nn
else:
out = self.alpha * self.tv(x) + nn
if self.strong_convexity > 0:
out += self.strong_convexity * self.l2(x)
return out
def proximal(self, sigma):
"""Prox operator of TV. It allows the proximal step length to be a
vector of positive elements.
Examples
--------
Check that the proximal operator is the identity for sigma=0
>>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> y = tvnn.proximal(0)(x)
>>> (y-x).norm() < 1e-10
Check that negative functions are mapped to 0
>>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> y = tvnn.proximal(0.1)(x)
>>> y.norm() < 1e-10
"""
if sigma == 0:
return odl.IdentityOperator(self.domain)
else:
def tv_prox(z, out=None):
if out is None:
out = z.space.zero()
opts = self.prox_options
sigma_ = np.copy(sigma)
z_ = z.copy()
if self.strong_convexity > 0:
sigma_ /= (1 + sigma * self.strong_convexity)
z_ /= (1 + sigma * self.strong_convexity)
if opts['name'] == 'FGP':
if opts['warmstart']:
if opts['p'] is None:
opts['p'] = self.grad.range.zero()
p = opts['p']
else:
p = self.grad.range.zero()
sigma_sqrt = np.sqrt(sigma_)
z_ /= sigma_sqrt
grad = sigma_sqrt * self.grad
grad.norm = sigma_sqrt * self.grad.norm
niter = opts['niter']
alpha = self.alpha
out[:] = fgp_dual(p, z_, alpha, niter, grad, self.proj_C,
self.proj_P, tol=opts['tol'])
out *= sigma_sqrt
return out
else:
raise NotImplementedError('Not yet implemented')
return tv_prox
def fgp_dual(p, data, alpha, niter, grad, proj_C, proj_P, tol=None, **kwargs):
"""Computes a solution to the ROF problem with the fast gradient
projection algorithm.
Parameters
----------
p : np.array
dual initial variable
data : np.array
noisy data / proximal point
alpha : float
regularization parameter
niter : int
number of iterations
grad : instance of gradient class
class that supports grad(x), grad.adjoint(x), grad.norm
proj_C : function
projection onto the constraint set of the primal variable,
e.g. non-negativity
proj_P : function
projection onto the constraint set of the dual variable,
e.g. norm <= 1
tol : float (optional)
nonnegative parameter that gives the tolerance for convergence. If set
None, then the algorithm will run for a fixed number of iterations
Other Parameters
----------------
callback : callable, optional
Function called with the current iterate after each iteration.
"""
# Callback object
callback = kwargs.pop('callback', None)
if callback is not None and not callable(callback):
raise TypeError('`callback` {} is not callable'.format(callback))
factr = 1 / (grad.norm**2 * alpha)
q = p.copy()
x = data.space.zero()
t = 1.
if tol is None:
def convergence_eval(p1, p2):
return False
else:
def convergence_eval(p1, p2):
return (p1 - p2).norm() / p1.norm() < tol
pnew = p.copy()
if callback is not None:
callback(p)
for k in range(niter):
t0 = t
grad.adjoint(q, out=x)
proj_C(data - alpha * x, out=x)
grad(x, out=pnew)
pnew *= factr
pnew += q
proj_P(pnew, out=pnew)
converged = convergence_eval(p, pnew)
if not converged:
# update step size
t = (1 + np.sqrt(1 + 4 * t0 ** 2)) / 2.
# calculate next iterate
q[:] = pnew + (t0 - 1) / t * (pnew - p)
p[:] = pnew
if converged:
t = None
break
if callback is not None:
callback(p)
# get current image estimate
x = proj_C(data - alpha * grad.adjoint(p))
return x
class Blur2D(odl.Operator):
"""Blur operator"""
def __init__(self, domain, kernel, boundary_condition='wrap'):
"""Initialize a new instance.
"""
super().__init__(domain=domain, range=domain, linear=True)
self.__kernel = kernel
self.__boundary_condition = boundary_condition
@property
def kernel(self):
return self.__kernel
@property
def boundary_condition(self):
return self.__boundary_condition
def _call(self, x, out):
out[:] = scipy.signal.convolve2d(x, self.kernel, mode='same',
boundary='wrap')
@property
def gradient(self):
raise NotImplementedError('No yet implemented')
@property
def adjoint(self):
adjoint_kernel = self.kernel.copy().conj()
adjoint_kernel = np.fliplr(np.flipud(adjoint_kernel))
return Blur2D(self.domain, adjoint_kernel, self.boundary_condition)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r}, {!r})'.format(
self.__class__.__name__, self.domain, self.kernel,
self.boundary_condition)
class KullbackLeiblerSmooth(odl.solvers.Functional):
"""The smooth Kullback-Leibler divergence functional.
Notes
-----
If the functional is defined on an :math:`\mathbb{R}^n`-like space, the
smooth Kullback-Leibler functional :math:`\\phi` is defined as
.. math::
\\phi(x) = \\sum_{i=1}^n \\begin{cases}
x + r - y + y * \\log(y / (x + r))
& \\text{if $x \geq 0$} \\
(y / (2 * r^2)) * x^2 + (1 - y / r) * x + r - b +
b * \\log(b / r) & \\text{else}
\\end{cases}
where all variables on the right hand side of the equation have a subscript
i which is omitted for readability.
References
----------
[CERS2017] A. Chambolle, M. J. Ehrhardt, P. Richtarik and C.-B. Schoenlieb,
*Stochastic Primal-Dual Hybrid Gradient Algorithm with Arbitrary Sampling
and Imaging Applications*. ArXiv: http://arxiv.org/abs/1706.04957 (2017).
"""
def __init__(self, space, data, background):
"""Initialize a new instance.
Parameters
----------
space : `DiscreteLp` or `TensorSpace`
Domain of the functional.
data : ``space`` `element-like`
Data vector which has to be non-negative.
background : ``space`` `element-like`
Background vector which has to be non-negative.
"""
self.strong_convexity = 0
if background.ufuncs.less_equal(0).ufuncs.sum() > 0:
raise NotImplementedError('Background must be positive')
super().__init__(space=space, linear=False,
grad_lipschitz=np.max(data / background ** 2))
if data not in self.domain:
raise ValueError('`data` not in `domain`'
''.format(data, self.domain))
self.__data = data
self.__background = background
@property
def data(self):
"""The data in the Kullback-Leibler functional."""
return self.__data
@property
def background(self):
"""The background in the Kullback-Leibler functional."""
return self.__background
def _call(self, x):
"""Return the KL-diveregnce in the point ``x``.
If any components of ``x`` is non-positive, the value is positive
infinity.
"""
y = self.data
r = self.background
obj = self.domain.zero()
# x + r - y + y * log(y / (x + r)) = x - y * log(x + r) + c1
# with c1 = r - y + y * log y
i = x.ufuncs.greater_equal(0)
obj[i] = x[i] + r[i] - y[i]
j = y.ufuncs.greater(0)
k = i.ufuncs.logical_and(j)
obj[k] += y[k] * (y[k] / (x[k] + r[k])).ufuncs.log()
# (y / (2 * r^2)) * x^2 + (1 - y / r) * x + r - b + b * log(b / r)
# = (y / (2 * r^2)) * x^2 + (1 - y / r) * x + c2
# with c2 = r - b + b * log(b / r)
i = i.ufuncs.logical_not()
obj[i] += (y[i] / (2 * r[i]**2) * x[i]**2 + (1 - y[i] / r[i]) * x[i] +
r[i] - y[i])
k = i.ufuncs.logical_and(j)
obj[k] += y[k] * (y[k] / r[k]).ufuncs.log()
return obj.inner(self.domain.one())
@property
def gradient(self):
"""Gradient operator of the functional.
"""
raise NotImplementedError('No yet implemented')
@property
def proximal(self):
"""Return the `proximal factory` of the functional.
"""
raise NotImplementedError('No yet implemented')
@property
def convex_conj(self):
"""The convex conjugate functional of the KL-functional."""
return KullbackLeiblerSmoothConvexConj(self.domain, self.data,
self.background)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r}, {!r})'.format(
self.__class__.__name__, self.domain, self.data, self.background)
class KullbackLeiblerSmoothConvexConj(odl.solvers.Functional):
"""The convex conjugate of the smooth Kullback-Leibler divergence functional.
Notes
-----
If the functional is defined on an :math:`\mathbb{R}^n`-like space, the
convex conjugate of the smooth Kullback-Leibler functional :math:`\\phi^*`
is defined as
.. math::
\\phi^*(x) = \\sum_{i=1}^n \\begin{cases}
r^2 / (2 * y) * x^2 + (r - r^2 / y) * x + r^2 / (2 * y) +
3 / 2 * y - 2 * r - y * log(y / r)
& \\text{if $x < 1 - y / r$} \\
- r * x - y * log(1 - x)
& \\text{if $1 - y / r <= x < 1} \\
+ \infty
& \\text{else}
\\end{cases}
where all variables on the right hand side of the equation have a subscript
:math:`i` which is omitted for readability.
References
----------
[CERS2017] A. Chambolle, M. J. Ehrhardt, P. Richtarik and C.-B. Schoenlieb,
*Stochastic Primal-Dual Hybrid Gradient Algorithm with Arbitrary Sampling
and Imaging Applications*. ArXiv: http://arxiv.org/abs/1706.04957 (2017).
"""
def __init__(self, space, data, background):
"""Initialize a new instance.
Parameters
----------
space : `DiscreteLp` or `TensorSpace`
Domain of the functional.
data : ``space`` `element-like`
Data vector which has to be non-negative.
background : ``space`` `element-like`
Background vector which has to be non-negative.
"""
if background.ufuncs.less_equal(0).ufuncs.sum() > 0:
raise NotImplementedError('Background must be positive')
super().__init__(space=space, linear=False,
grad_lipschitz=np.inf)
if data is not None and data not in self.domain:
raise ValueError('`data` not in `domain`'
''.format(data, self.domain))
self.__data = data
self.__background = background
if np.min(self.data) == 0:
self.strong_convexity = np.inf
else:
self.strong_convexity = np.min(self.background**2 / self.data)
@property
def data(self):
"""The data in the Kullback-Leibler functional."""
return self.__data
@property
def background(self):
"""The background in the Kullback-Leibler functional."""
return self.__background
def _call(self, x):
"""Return the value in the point ``x``.
If any components of ``x`` is larger than or equal to 1, the value is
positive infinity.
"""
# TODO: cover properly the case data = 0
y = self.data
r = self.background
# if any element is greater or equal to one
if x.ufuncs.greater_equal(1).ufuncs.sum() > 0:
return np.inf
obj = self.domain.zero()
# out = sum(f)
# f =
# if x < 1 - y / r:
# r^2 / (2 * y) * x^2 + (r - r^2 / y) * x + r^2 / (2 * y) +
# 3 / 2 * y - 2 * r - y * log(y / r)
# if x >= 1 - y / r:
# - r * x - y * log(1 - x)
i = x.ufuncs.less(1 - y / r)
ry = r[i]**2 / y[i]
obj[i] += (ry / 2 * x[i]**2 + (r[i] - ry) * x[i] + ry / 2 +
3 / 2 * y[i] - 2 * r[i])
j = y.ufuncs.greater(0)
k = i.ufuncs.logical_and(j)
obj[k] -= y[k] * (y[k] / r[k]).ufuncs.log()
i = i.ufuncs.logical_not()
obj[i] -= r[i] * x[i]
k = i.ufuncs.logical_and(j)
obj[k] -= y[k] * (1 - x[k]).ufuncs.log()
return obj.inner(self.domain.one())
@property
def gradient(self):
"""Gradient operator of the functional."""
raise NotImplementedError('No yet implemented')
@property
def proximal(self):
space = self.domain
y = self.data
r = self.background
class ProxKullbackLeiblerSmoothConvexConj(odl.Operator):
"""Proximal operator of the convex conjugate of the smooth
Kullback-Leibler functional.
"""
def __init__(self, sigma):
"""Initialize a new instance.
Parameters
----------
sigma : positive float
Step size parameter
"""
self.sigma = float(sigma)
self.background = r
self.data = y
super().__init__(domain=space, range=space, linear=False)
def _call(self, x, out):
s = self.sigma
y = self.data
r = self.background
sr = s * r
sy = s * y
# out =
# if x < 1 - y / r:
# (y * x - s * r * y + s * r**2) / (y + s * r**2)
# if x >= 1 - y / r:
# 0.5 * (x + s * r + 1 -
# sqrt((x + s * r - 1)**2 + 4 * s * y)
i = x.ufuncs.less(1 - y / r)
# TODO: This may be faster without indexing on the GPU?
out[i] = ((y[i] * x[i] - sr[i] * y[i] + sr[i] * r[i]) /
(y[i] + sr[i] * r[i]))
i.ufuncs.logical_not(out=i)
out[i] = (x[i] + sr[i] + 1 -
((x[i] + sr[i] - 1) ** 2 + 4 * sy[i]).ufuncs.sqrt())
out[i] /= 2
return out
return ProxKullbackLeiblerSmoothConvexConj
@property
def convex_conj(self):
"""The convex conjugate functional of the smooth KL-functional."""
return KullbackLeiblerSmooth(self.domain, self.data,
self.background)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r}, {!r})'.format(
self.__class__.__name__, self.domain, self.data, self.background)
| mpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.