repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
Celthi/youtube-dl-GUI | youtube_dl/extractor/tube8.py | 32 | 3522 | from __future__ import unicode_literals
import json
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_urlparse,
compat_urllib_request,
)
from ..utils import (
int_or_none,
str_to_int,
)
from ..aes import aes_decrypt_text
class Tube8IE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?tube8\.com/(?:[^/]+/)+(?P<display_id>[^/]+)/(?P<id>\d+)'
_TESTS = [
{
'url': 'http://www.tube8.com/teen/kasia-music-video/229795/',
'md5': '44bf12b98313827dd52d35b8706a4ea0',
'info_dict': {
'id': '229795',
'display_id': 'kasia-music-video',
'ext': 'mp4',
'description': 'hot teen Kasia grinding',
'uploader': 'unknown',
'title': 'Kasia music video',
'age_limit': 18,
}
},
{
'url': 'http://www.tube8.com/shemale/teen/blonde-cd-gets-kidnapped-by-two-blacks-and-punished-for-being-a-slutty-girl/19569151/',
'only_matching': True,
},
]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
req = compat_urllib_request.Request(url)
req.add_header('Cookie', 'age_verified=1')
webpage = self._download_webpage(req, display_id)
flashvars = json.loads(self._html_search_regex(
r'var flashvars\s*=\s*({.+?})', webpage, 'flashvars'))
video_url = flashvars['video_url']
if flashvars.get('encrypted') is True:
video_url = aes_decrypt_text(video_url, flashvars['video_title'], 32).decode('utf-8')
path = compat_urllib_parse_urlparse(video_url).path
format_id = '-'.join(path.split('/')[4].split('_')[:2])
thumbnail = flashvars.get('image_url')
title = self._html_search_regex(
r'videotitle\s*=\s*"([^"]+)', webpage, 'title')
description = self._html_search_regex(
r'>Description:</strong>(.+?)<', webpage, 'description', fatal=False)
uploader = self._html_search_regex(
r'<strong class="video-username">(?:<a href="[^"]+">)?([^<]+)(?:</a>)?</strong>',
webpage, 'uploader', fatal=False)
like_count = int_or_none(self._html_search_regex(
r"rupVar\s*=\s*'(\d+)'", webpage, 'like count', fatal=False))
dislike_count = int_or_none(self._html_search_regex(
r"rdownVar\s*=\s*'(\d+)'", webpage, 'dislike count', fatal=False))
view_count = self._html_search_regex(
r'<strong>Views: </strong>([\d,\.]+)</li>', webpage, 'view count', fatal=False)
if view_count:
view_count = str_to_int(view_count)
comment_count = self._html_search_regex(
r'<span id="allCommentsCount">(\d+)</span>', webpage, 'comment count', fatal=False)
if comment_count:
comment_count = str_to_int(comment_count)
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
'uploader': uploader,
'format_id': format_id,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'comment_count': comment_count,
'age_limit': 18,
}
| mit |
KohlsTechnology/ansible | lib/ansible/modules/network/avi/avi_vsvip.py | 26 | 4732 | #!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.2
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_vsvip
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of VsVip Avi RESTful Object
description:
- This module is used to configure VsVip object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
cloud_ref:
description:
- It is a reference to an object of type cloud.
- Field introduced in 17.1.1.
dns_info:
description:
- Service discovery specific data including fully qualified domain name, type and time-to-live of the dns record.
- Field introduced in 17.1.1.
east_west_placement:
description:
- Force placement on all service engines in the service engine group (container clouds only).
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
name:
description:
- Name for the vsvip object.
- Field introduced in 17.1.1.
required: true
tenant_ref:
description:
- It is a reference to an object of type tenant.
- Field introduced in 17.1.1.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the vsvip object.
- Field introduced in 17.1.1.
vip:
description:
- List of virtual service ips and other shareable entities.
- Field introduced in 17.1.1.
vrf_context_ref:
description:
- Virtual routing context that the virtual service is bound to.
- This is used to provide the isolation of the set of networks the application is attached to.
- It is a reference to an object of type vrfcontext.
- Field introduced in 17.1.1.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create VsVip object
avi_vsvip:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_vsvip
"""
RETURN = '''
obj:
description: VsVip (api/vsvip) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
cloud_ref=dict(type='str',),
dns_info=dict(type='list',),
east_west_placement=dict(type='bool',),
name=dict(type='str', required=True),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
vip=dict(type='list',),
vrf_context_ref=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'vsvip',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
haeusser/tensorflow | tensorflow/contrib/layers/python/layers/regularizers.py | 63 | 6978 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Regularizers for use with layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import standard_ops
from tensorflow.python.platform import tf_logging as logging
__all__ = ['l1_regularizer',
'l2_regularizer',
'l1_l2_regularizer',
'sum_regularizer',
'apply_regularization']
def l1_regularizer(scale, scope=None):
"""Returns a function that can be used to apply L1 regularization to weights.
L1 regularization encourages sparsity.
Args:
scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
scope: An optional scope name.
Returns:
A function with signature `l1(weights)` that apply L1 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % scale)
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g' %
scale)
if scale == 0.:
logging.info('Scale of 0 disables regularizer.')
return lambda _: None
def l1(weights, name=None):
"""Applies L1 regularization to weights."""
with ops.name_scope(scope, 'l1_regularizer', [weights]) as name:
my_scale = ops.convert_to_tensor(scale,
dtype=weights.dtype.base_dtype,
name='scale')
return standard_ops.multiply(
my_scale,
standard_ops.reduce_sum(standard_ops.abs(weights)),
name=name)
return l1
def l2_regularizer(scale, scope=None):
"""Returns a function that can be used to apply L2 regularization to weights.
Small values of L2 can help prevent overfitting the training data.
Args:
scale: A scalar multiplier `Tensor`. 0.0 disables the regularizer.
scope: An optional scope name.
Returns:
A function with signature `l2(weights)` that applies L2 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
if isinstance(scale, numbers.Integral):
raise ValueError('scale cannot be an integer: %s' % (scale,))
if isinstance(scale, numbers.Real):
if scale < 0.:
raise ValueError('Setting a scale less than 0 on a regularizer: %g.' %
scale)
if scale == 0.:
logging.info('Scale of 0 disables regularizer.')
return lambda _: None
def l2(weights):
"""Applies l2 regularization to weights."""
with ops.name_scope(scope, 'l2_regularizer', [weights]) as name:
my_scale = ops.convert_to_tensor(scale,
dtype=weights.dtype.base_dtype,
name='scale')
return standard_ops.multiply(my_scale, nn.l2_loss(weights), name=name)
return l2
def l1_l2_regularizer(scale_l1=1.0, scale_l2=1.0, scope=None):
"""Returns a function that can be used to apply L1 L2 regularizations.
Args:
scale_l1: A scalar multiplier `Tensor` for L1 regularization.
scale_l2: A scalar multiplier `Tensor` for L2 regularization.
scope: An optional scope name.
Returns:
A function with signature `l1_l2(weights)` that applies a weighted sum of
L1 L2 regularization.
Raises:
ValueError: If scale is negative or if scale is not a float.
"""
scope = scope or 'l1_l2_regularizer'
return sum_regularizer([l1_regularizer(scale_l1),
l2_regularizer(scale_l2)],
scope=scope)
def sum_regularizer(regularizer_list, scope=None):
"""Returns a function that applies the sum of multiple regularizers.
Args:
regularizer_list: A list of regularizers to apply.
scope: An optional scope name
Returns:
A function with signature `sum_reg(weights)` that applies the
sum of all the input regularizers.
"""
regularizer_list = [reg for reg in regularizer_list if reg is not None]
if not regularizer_list:
return None
def sum_reg(weights):
"""Applies the sum of all the input regularizers."""
with ops.name_scope(scope, 'sum_regularizer', [weights]) as name:
regularizer_tensors = [reg(weights) for reg in regularizer_list]
return math_ops.add_n(regularizer_tensors, name=name)
return sum_reg
def apply_regularization(regularizer, weights_list=None):
"""Returns the summed penalty by applying `regularizer` to the `weights_list`.
Adding a regularization penalty over the layer weights and embedding weights
can help prevent overfitting the training data. Regularization over layer
biases is less common/useful, but assuming proper data preprocessing/mean
subtraction, it usually shouldn't hurt much either.
Args:
regularizer: A function that takes a single `Tensor` argument and returns
a scalar `Tensor` output.
weights_list: List of weights `Tensors` or `Variables` to apply
`regularizer` over. Defaults to the `GraphKeys.WEIGHTS` collection if
`None`.
Returns:
A scalar representing the overall regularization penalty.
Raises:
ValueError: If `regularizer` does not return a scalar output, or if we find
no weights.
"""
if not weights_list:
weights_list = ops.get_collection(ops.GraphKeys.WEIGHTS)
if not weights_list:
raise ValueError('No weights to regularize.')
with ops.name_scope('get_regularization_penalty',
values=weights_list) as scope:
penalties = [regularizer(w) for w in weights_list]
penalties = [
p if p is not None else constant_op.constant(0.0) for p in penalties
]
for p in penalties:
if p.get_shape().ndims != 0:
raise ValueError('regularizer must return a scalar Tensor instead of a '
'Tensor with rank %d.' % p.get_shape().ndims)
summed_penalty = math_ops.add_n(penalties, name=scope)
ops.add_to_collection(ops.GraphKeys.REGULARIZATION_LOSSES, summed_penalty)
return summed_penalty
| apache-2.0 |
2uller/LotF | App/Lib/sqlite3/test/userfunctions.py | 2 | 15477 | #-*- coding: ISO-8859-1 -*-
# pysqlite2/test/userfunctions.py: tests for user-defined functions and
# aggregates.
#
# Copyright (C) 2005-2007 Gerhard Häring <[email protected]>
#
# This file is part of pysqlite.
#
# This software is provided 'as-is', without any express or implied
# warranty. In no event will the authors be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
import unittest
import sqlite3 as sqlite
def func_returntext():
return "foo"
def func_returnunicode():
return u"bar"
def func_returnint():
return 42
def func_returnfloat():
return 3.14
def func_returnnull():
return None
def func_returnblob():
return buffer("blob")
def func_returnlonglong():
return 1<<31
def func_raiseexception():
5 // 0
def func_isstring(v):
return type(v) is unicode
def func_isint(v):
return type(v) is int
def func_isfloat(v):
return type(v) is float
def func_isnone(v):
return type(v) is type(None)
def func_isblob(v):
return type(v) is buffer
def func_islonglong(v):
return isinstance(v, (int, long)) and v >= 1<<31
class AggrNoStep:
def __init__(self):
pass
def finalize(self):
return 1
class AggrNoFinalize:
def __init__(self):
pass
def step(self, x):
pass
class AggrExceptionInInit:
def __init__(self):
5 // 0
def step(self, x):
pass
def finalize(self):
pass
class AggrExceptionInStep:
def __init__(self):
pass
def step(self, x):
5 // 0
def finalize(self):
return 42
class AggrExceptionInFinalize:
def __init__(self):
pass
def step(self, x):
pass
def finalize(self):
5 // 0
class AggrCheckType:
def __init__(self):
self.val = None
def step(self, whichType, val):
theType = {"str": unicode, "int": int, "float": float, "None": type(None), "blob": buffer}
self.val = int(theType[whichType] is type(val))
def finalize(self):
return self.val
class AggrSum:
def __init__(self):
self.val = 0.0
def step(self, val):
self.val += val
def finalize(self):
return self.val
class FunctionTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
self.con.create_function("returntext", 0, func_returntext)
self.con.create_function("returnunicode", 0, func_returnunicode)
self.con.create_function("returnint", 0, func_returnint)
self.con.create_function("returnfloat", 0, func_returnfloat)
self.con.create_function("returnnull", 0, func_returnnull)
self.con.create_function("returnblob", 0, func_returnblob)
self.con.create_function("returnlonglong", 0, func_returnlonglong)
self.con.create_function("raiseexception", 0, func_raiseexception)
self.con.create_function("isstring", 1, func_isstring)
self.con.create_function("isint", 1, func_isint)
self.con.create_function("isfloat", 1, func_isfloat)
self.con.create_function("isnone", 1, func_isnone)
self.con.create_function("isblob", 1, func_isblob)
self.con.create_function("islonglong", 1, func_islonglong)
def tearDown(self):
self.con.close()
def CheckFuncErrorOnCreate(self):
try:
self.con.create_function("bla", -100, lambda x: 2*x)
self.fail("should have raised an OperationalError")
except sqlite.OperationalError:
pass
def CheckFuncRefCount(self):
def getfunc():
def f():
return 1
return f
f = getfunc()
globals()["foo"] = f
# self.con.create_function("reftest", 0, getfunc())
self.con.create_function("reftest", 0, f)
cur = self.con.cursor()
cur.execute("select reftest()")
def CheckFuncReturnText(self):
cur = self.con.cursor()
cur.execute("select returntext()")
val = cur.fetchone()[0]
self.assertEqual(type(val), unicode)
self.assertEqual(val, "foo")
def CheckFuncReturnUnicode(self):
cur = self.con.cursor()
cur.execute("select returnunicode()")
val = cur.fetchone()[0]
self.assertEqual(type(val), unicode)
self.assertEqual(val, u"bar")
def CheckFuncReturnInt(self):
cur = self.con.cursor()
cur.execute("select returnint()")
val = cur.fetchone()[0]
self.assertEqual(type(val), int)
self.assertEqual(val, 42)
def CheckFuncReturnFloat(self):
cur = self.con.cursor()
cur.execute("select returnfloat()")
val = cur.fetchone()[0]
self.assertEqual(type(val), float)
if val < 3.139 or val > 3.141:
self.fail("wrong value")
def CheckFuncReturnNull(self):
cur = self.con.cursor()
cur.execute("select returnnull()")
val = cur.fetchone()[0]
self.assertEqual(type(val), type(None))
self.assertEqual(val, None)
def CheckFuncReturnBlob(self):
cur = self.con.cursor()
cur.execute("select returnblob()")
val = cur.fetchone()[0]
self.assertEqual(type(val), buffer)
self.assertEqual(val, buffer("blob"))
def CheckFuncReturnLongLong(self):
cur = self.con.cursor()
cur.execute("select returnlonglong()")
val = cur.fetchone()[0]
self.assertEqual(val, 1<<31)
def CheckFuncException(self):
cur = self.con.cursor()
try:
cur.execute("select raiseexception()")
cur.fetchone()
self.fail("should have raised OperationalError")
except sqlite.OperationalError, e:
self.assertEqual(e.args[0], 'user-defined function raised exception')
def CheckParamString(self):
cur = self.con.cursor()
cur.execute("select isstring(?)", ("foo",))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckParamInt(self):
cur = self.con.cursor()
cur.execute("select isint(?)", (42,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckParamFloat(self):
cur = self.con.cursor()
cur.execute("select isfloat(?)", (3.14,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckParamNone(self):
cur = self.con.cursor()
cur.execute("select isnone(?)", (None,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckParamBlob(self):
cur = self.con.cursor()
cur.execute("select isblob(?)", (buffer("blob"),))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckParamLongLong(self):
cur = self.con.cursor()
cur.execute("select islonglong(?)", (1<<42,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
class AggregateTests(unittest.TestCase):
def setUp(self):
self.con = sqlite.connect(":memory:")
cur = self.con.cursor()
cur.execute("""
create table test(
t text,
i integer,
f float,
n,
b blob
)
""")
cur.execute("insert into test(t, i, f, n, b) values (?, ?, ?, ?, ?)",
("foo", 5, 3.14, None, buffer("blob"),))
self.con.create_aggregate("nostep", 1, AggrNoStep)
self.con.create_aggregate("nofinalize", 1, AggrNoFinalize)
self.con.create_aggregate("excInit", 1, AggrExceptionInInit)
self.con.create_aggregate("excStep", 1, AggrExceptionInStep)
self.con.create_aggregate("excFinalize", 1, AggrExceptionInFinalize)
self.con.create_aggregate("checkType", 2, AggrCheckType)
self.con.create_aggregate("mysum", 1, AggrSum)
def tearDown(self):
#self.cur.close()
#self.con.close()
pass
def CheckAggrErrorOnCreate(self):
try:
self.con.create_function("bla", -100, AggrSum)
self.fail("should have raised an OperationalError")
except sqlite.OperationalError:
pass
def CheckAggrNoStep(self):
cur = self.con.cursor()
try:
cur.execute("select nostep(t) from test")
self.fail("should have raised an AttributeError")
except AttributeError, e:
self.assertEqual(e.args[0], "AggrNoStep instance has no attribute 'step'")
def CheckAggrNoFinalize(self):
cur = self.con.cursor()
try:
cur.execute("select nofinalize(t) from test")
val = cur.fetchone()[0]
self.fail("should have raised an OperationalError")
except sqlite.OperationalError, e:
self.assertEqual(e.args[0], "user-defined aggregate's 'finalize' method raised error")
def CheckAggrExceptionInInit(self):
cur = self.con.cursor()
try:
cur.execute("select excInit(t) from test")
val = cur.fetchone()[0]
self.fail("should have raised an OperationalError")
except sqlite.OperationalError, e:
self.assertEqual(e.args[0], "user-defined aggregate's '__init__' method raised error")
def CheckAggrExceptionInStep(self):
cur = self.con.cursor()
try:
cur.execute("select excStep(t) from test")
val = cur.fetchone()[0]
self.fail("should have raised an OperationalError")
except sqlite.OperationalError, e:
self.assertEqual(e.args[0], "user-defined aggregate's 'step' method raised error")
def CheckAggrExceptionInFinalize(self):
cur = self.con.cursor()
try:
cur.execute("select excFinalize(t) from test")
val = cur.fetchone()[0]
self.fail("should have raised an OperationalError")
except sqlite.OperationalError, e:
self.assertEqual(e.args[0], "user-defined aggregate's 'finalize' method raised error")
def CheckAggrCheckParamStr(self):
cur = self.con.cursor()
cur.execute("select checkType('str', ?)", ("foo",))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckAggrCheckParamInt(self):
cur = self.con.cursor()
cur.execute("select checkType('int', ?)", (42,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckAggrCheckParamFloat(self):
cur = self.con.cursor()
cur.execute("select checkType('float', ?)", (3.14,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckAggrCheckParamNone(self):
cur = self.con.cursor()
cur.execute("select checkType('None', ?)", (None,))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckAggrCheckParamBlob(self):
cur = self.con.cursor()
cur.execute("select checkType('blob', ?)", (buffer("blob"),))
val = cur.fetchone()[0]
self.assertEqual(val, 1)
def CheckAggrCheckAggrSum(self):
cur = self.con.cursor()
cur.execute("delete from test")
cur.executemany("insert into test(i) values (?)", [(10,), (20,), (30,)])
cur.execute("select mysum(i) from test")
val = cur.fetchone()[0]
self.assertEqual(val, 60)
class AuthorizerTests(unittest.TestCase):
@staticmethod
def authorizer_cb(action, arg1, arg2, dbname, source):
if action != sqlite.SQLITE_SELECT:
return sqlite.SQLITE_DENY
if arg2 == 'c2' or arg1 == 't2':
return sqlite.SQLITE_DENY
return sqlite.SQLITE_OK
def setUp(self):
self.con = sqlite.connect(":memory:")
self.con.executescript("""
create table t1 (c1, c2);
create table t2 (c1, c2);
insert into t1 (c1, c2) values (1, 2);
insert into t2 (c1, c2) values (4, 5);
""")
# For our security test:
self.con.execute("select c2 from t2")
self.con.set_authorizer(self.authorizer_cb)
def tearDown(self):
pass
def test_table_access(self):
try:
self.con.execute("select * from t2")
except sqlite.DatabaseError, e:
if not e.args[0].endswith("prohibited"):
self.fail("wrong exception text: %s" % e.args[0])
return
self.fail("should have raised an exception due to missing privileges")
def test_column_access(self):
try:
self.con.execute("select c2 from t1")
except sqlite.DatabaseError, e:
if not e.args[0].endswith("prohibited"):
self.fail("wrong exception text: %s" % e.args[0])
return
self.fail("should have raised an exception due to missing privileges")
class AuthorizerRaiseExceptionTests(AuthorizerTests):
@staticmethod
def authorizer_cb(action, arg1, arg2, dbname, source):
if action != sqlite.SQLITE_SELECT:
raise ValueError
if arg2 == 'c2' or arg1 == 't2':
raise ValueError
return sqlite.SQLITE_OK
class AuthorizerIllegalTypeTests(AuthorizerTests):
@staticmethod
def authorizer_cb(action, arg1, arg2, dbname, source):
if action != sqlite.SQLITE_SELECT:
return 0.0
if arg2 == 'c2' or arg1 == 't2':
return 0.0
return sqlite.SQLITE_OK
class AuthorizerLargeIntegerTests(AuthorizerTests):
@staticmethod
def authorizer_cb(action, arg1, arg2, dbname, source):
if action != sqlite.SQLITE_SELECT:
return 2**32
if arg2 == 'c2' or arg1 == 't2':
return 2**32
return sqlite.SQLITE_OK
def suite():
function_suite = unittest.makeSuite(FunctionTests, "Check")
aggregate_suite = unittest.makeSuite(AggregateTests, "Check")
authorizer_suite = unittest.makeSuite(AuthorizerTests)
return unittest.TestSuite((
function_suite,
aggregate_suite,
authorizer_suite,
unittest.makeSuite(AuthorizerRaiseExceptionTests),
unittest.makeSuite(AuthorizerIllegalTypeTests),
unittest.makeSuite(AuthorizerLargeIntegerTests),
))
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
| gpl-2.0 |
anjalisood/spark-tk | python/sparktk/dicom/ops/filter_by_keywords.py | 14 | 3974 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def filter_by_keywords(self, keywords_values_dict):
"""
Filter the rows based on dictionary of {"keyword":"value"}(applying 'and' operation on dictionary) from column holding xml string
Ex: keywords_values_dict -> {"SOPInstanceUID":"1.3.6.1.4.1.14519.5.2.1.7308.2101.234736319276602547946349519685", "Manufacturer":"SIEMENS", "StudyDate":"20030315"}
Parameters
----------
:param keywords_values_dict: (dict(str, str)) dictionary of keywords and values from xml string in metadata
Examples
--------
>>> dicom_path = "../datasets/dicom_uncompressed"
>>> dicom = tc.dicom.import_dcm(dicom_path)
>>> dicom.metadata.count()
3
<skip>
>>> dicom.metadata.inspect(truncate=30)
[#] id metadata
=======================================
[0] 0 <?xml version="1.0" encodin...
[1] 1 <?xml version="1.0" encodin...
[2] 2 <?xml version="1.0" encodin...
</skip>
#Part of xml string looks as below
<?xml version="1.0" encoding="UTF-8"?>
<NativeDicomModel xml:space="preserve">
<DicomAttribute keyword="FileMetaInformationVersion" tag="00020001" vr="OB"><InlineBinary>AAE=</InlineBinary></DicomAttribute>
<DicomAttribute keyword="MediaStorageSOPClassUID" tag="00020002" vr="UI"><Value number="1">1.2.840.10008.5.1.4.1.1.4</Value></DicomAttribute>
<DicomAttribute keyword="MediaStorageSOPInstanceUID" tag="00020003" vr="UI"><Value number="1">1.3.6.1.4.1.14519.5.2.1.7308.2101.234736319276602547946349519685</Value></DicomAttribute>
...
>>> keywords_values_dict = {"SOPInstanceUID":"1.3.6.1.4.1.14519.5.2.1.7308.2101.234736319276602547946349519685", "Manufacturer":"SIEMENS", "StudyDate":"20030315"}
>>> dicom.filter_by_keywords(keywords_values_dict)
>>> dicom.metadata.count()
1
<skip>
#After filter
>>> dicom.metadata.inspect(truncate=30)
[#] id metadata
=======================================
[0] 0 <?xml version="1.0" encodin...
>>> dicom.pixeldata.inspect(truncate=30)
[#] id imagematrix
=====================================================
[0] 0 [[ 0. 0. 0. ..., 0. 0. 0.]
[ 0. 125. 103. ..., 120. 213. 319.]
[ 0. 117. 94. ..., 135. 223. 325.]
...,
[ 0. 62. 21. ..., 896. 886. 854.]
[ 0. 63. 23. ..., 941. 872. 897.]
[ 0. 60. 30. ..., 951. 822. 906.]]
</skip>
"""
if not isinstance(keywords_values_dict, dict):
raise TypeError("keywords_values_dict should be a type of dict, but found type as %" % type(keywords_values_dict))
for key, value in keywords_values_dict.iteritems():
if not isinstance(key, basestring) or not isinstance(value, basestring):
raise TypeError("both keyword and value should be of <type 'str'>")
#Always scala dicom is invoked, as python joins are expensive compared to serailizations.
def f(scala_dicom):
scala_dicom.filterByKeywords(self._tc.jutils.convert.to_scala_map(keywords_values_dict))
self._call_scala(f) | apache-2.0 |
DDelon/youtube-dl | youtube_dl/extractor/phoenix.py | 108 | 1527 | from __future__ import unicode_literals
from .common import InfoExtractor
from .zdf import extract_from_xml_url
class PhoenixIE(InfoExtractor):
_VALID_URL = r'''(?x)https?://(?:www\.)?phoenix\.de/content/
(?:
phoenix/die_sendungen/(?:[^/]+/)?
)?
(?P<id>[0-9]+)'''
_TESTS = [
{
'url': 'http://www.phoenix.de/content/884301',
'md5': 'ed249f045256150c92e72dbb70eadec6',
'info_dict': {
'id': '884301',
'ext': 'mp4',
'title': 'Michael Krons mit Hans-Werner Sinn',
'description': 'Im Dialog - Sa. 25.10.14, 00.00 - 00.35 Uhr',
'upload_date': '20141025',
'uploader': 'Im Dialog',
}
},
{
'url': 'http://www.phoenix.de/content/phoenix/die_sendungen/869815',
'only_matching': True,
},
{
'url': 'http://www.phoenix.de/content/phoenix/die_sendungen/diskussionen/928234',
'only_matching': True,
},
]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
internal_id = self._search_regex(
r'<div class="phx_vod" id="phx_vod_([0-9]+)"',
webpage, 'internal video ID')
api_url = 'http://www.phoenix.de/php/zdfplayer-v1.3/data/beitragsDetails.php?ak=web&id=%s' % internal_id
return extract_from_xml_url(self, video_id, api_url)
| unlicense |
metaml/nupic | tests/integration/nupic/algorithms/knn_classifier_test/classifier_test.py | 25 | 10208 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import copy
import logging
import time
import unittest2 as unittest
import cPickle
import numpy
from nupic.regions.PyRegion import RealNumpyDType
from nupic.algorithms.KNNClassifier import KNNClassifier
import pca_knn_data
LOGGER = logging.getLogger(__name__)
class KNNClassifierTest(unittest.TestCase):
"""Tests for k Nearest Neighbor classifier"""
def runTestKNNClassifier(self, short = 0):
""" Test the KNN classifier in this module. short can be:
0 (short), 1 (medium), or 2 (long)
"""
failures = ""
if short != 2:
numpy.random.seed(42)
else:
seed_value = int(time.time())
numpy.random.seed(seed_value)
LOGGER.info('Seed used: %d', seed_value)
f = open('seedval', 'a')
f.write(str(seed_value))
f.write('\n')
f.close()
failures += simulateKMoreThanOne()
LOGGER.info("\nTesting KNN Classifier on dense patterns")
numPatterns, numClasses = getNumTestPatterns(short)
patternSize = 100
patterns = numpy.random.rand(numPatterns, patternSize)
patternDict = dict()
testDict = dict()
# Assume there are no repeated patterns -- if there are, then
# numpy.random would be completely broken.
# Patterns in testDict are identical to those in patternDict but for the
# first 2% of items.
for i in xrange(numPatterns):
patternDict[i] = dict()
patternDict[i]['pattern'] = patterns[i]
patternDict[i]['category'] = numpy.random.randint(0, numClasses-1)
testDict[i] = copy.deepcopy(patternDict[i])
testDict[i]['pattern'][:0.02*patternSize] = numpy.random.rand()
testDict[i]['category'] = None
LOGGER.info("\nTesting KNN Classifier with L2 norm")
knn = KNNClassifier(k=1)
failures += simulateClassifier(knn, patternDict, \
"KNN Classifier with L2 norm test")
LOGGER.info("\nTesting KNN Classifier with L1 norm")
knnL1 = KNNClassifier(k=1, distanceNorm=1.0)
failures += simulateClassifier(knnL1, patternDict, \
"KNN Classifier with L1 norm test")
# Test with exact matching classifications.
LOGGER.info("\nTesting KNN Classifier with exact matching. For testing we "
"slightly alter the training data and expect None to be returned for the "
"classifications.")
knnExact = KNNClassifier(k=1, exact=True)
failures += simulateClassifier(knnExact,
patternDict,
"KNN Classifier with exact matching test",
testDict=testDict)
numPatterns, numClasses = getNumTestPatterns(short)
patterns = (numpy.random.rand(numPatterns, 25) > 0.7).astype(RealNumpyDType)
patternDict = dict()
for i in patterns:
iString = str(i.tolist())
if not patternDict.has_key(iString):
randCategory = numpy.random.randint(0, numClasses-1)
patternDict[iString] = dict()
patternDict[iString]['pattern'] = i
patternDict[iString]['category'] = randCategory
LOGGER.info("\nTesting KNN on sparse patterns")
knnDense = KNNClassifier(k=1)
failures += simulateClassifier(knnDense, patternDict, \
"KNN Classifier on sparse pattern test")
self.assertEqual(len(failures), 0,
"Tests failed: \n" + failures)
if short == 2:
f = open('seedval', 'a')
f.write('Pass\n')
f.close()
def runTestPCAKNN(self, short = 0):
LOGGER.info('\nTesting PCA/k-NN classifier')
LOGGER.info('Mode=%s', short)
numDims = 10
numClasses = 10
k = 10
numPatternsPerClass = 100
numPatterns = int(.9 * numClasses * numPatternsPerClass)
numTests = numClasses * numPatternsPerClass - numPatterns
numSVDSamples = int(.1 * numPatterns)
keep = 1
train_data, train_class, test_data, test_class = \
pca_knn_data.generate(numDims, numClasses, k, numPatternsPerClass,
numPatterns, numTests, numSVDSamples, keep)
pca_knn = KNNClassifier(k=k,numSVDSamples=numSVDSamples,
numSVDDims=keep)
knn = KNNClassifier(k=k)
LOGGER.info('Training PCA k-NN')
for i in range(numPatterns):
knn.learn(train_data[i], train_class[i])
pca_knn.learn(train_data[i], train_class[i])
LOGGER.info('Testing PCA k-NN')
numWinnerFailures = 0
numInferenceFailures = 0
numDistFailures = 0
numAbsErrors = 0
for i in range(numTests):
winner, inference, dist, categoryDist = knn.infer(test_data[i])
pca_winner, pca_inference, pca_dist, pca_categoryDist \
= pca_knn.infer(test_data[i])
if winner != test_class[i]:
numAbsErrors += 1
if pca_winner != winner:
numWinnerFailures += 1
if (numpy.abs(pca_inference - inference) > 1e-4).any():
numInferenceFailures += 1
if (numpy.abs(pca_dist - dist) > 1e-4).any():
numDistFailures += 1
s0 = 100*float(numTests - numAbsErrors) / float(numTests)
s1 = 100*float(numTests - numWinnerFailures) / float(numTests)
s2 = 100*float(numTests - numInferenceFailures) / float(numTests)
s3 = 100*float(numTests - numDistFailures) / float(numTests)
LOGGER.info('PCA/k-NN success rate=%s%s', s0, '%')
LOGGER.info('Winner success=%s%s', s1, '%')
LOGGER.info('Inference success=%s%s', s2, '%')
LOGGER.info('Distance success=%s%s', s3, '%')
self.assertEqual(s1, 100.0,
"PCA/k-NN test failed")
def testKNNClassifierShort(self):
self.runTestKNNClassifier(0)
def testPCAKNNShort(self):
self.runTestPCAKNN(0)
def testKNNClassifierMedium(self):
self.runTestKNNClassifier(1)
def testPCAKNNMedium(self):
self.runTestPCAKNN(1)
def simulateKMoreThanOne():
"""A small test with k=3"""
failures = ""
LOGGER.info("Testing the sparse KNN Classifier with k=3")
knn = KNNClassifier(k=3)
v = numpy.zeros((6, 2))
v[0] = [1.0, 0.0]
v[1] = [1.0, 0.2]
v[2] = [1.0, 0.2]
v[3] = [1.0, 2.0]
v[4] = [1.0, 4.0]
v[5] = [1.0, 4.5]
knn.learn(v[0], 0)
knn.learn(v[1], 0)
knn.learn(v[2], 0)
knn.learn(v[3], 1)
knn.learn(v[4], 1)
knn.learn(v[5], 1)
winner, _inferenceResult, _dist, _categoryDist = knn.infer(v[0])
if winner != 0:
failures += "Inference failed with k=3\n"
winner, _inferenceResult, _dist, _categoryDist = knn.infer(v[2])
if winner != 0:
failures += "Inference failed with k=3\n"
winner, _inferenceResult, _dist, _categoryDist = knn.infer(v[3])
if winner != 0:
failures += "Inference failed with k=3\n"
winner, _inferenceResult, _dist, _categoryDist = knn.infer(v[5])
if winner != 1:
failures += "Inference failed with k=3\n"
if len(failures) == 0:
LOGGER.info("Tests passed.")
return failures
def simulateClassifier(knn, patternDict, testName, testDict=None):
"""Train this classifier instance with the given patterns."""
failures = ""
numPatterns = len(patternDict)
LOGGER.info("Training the classifier")
tick = time.time()
for i in patternDict.keys():
knn.learn(patternDict[i]['pattern'], patternDict[i]['category'])
tock = time.time()
LOGGER.info("Time Elapsed %s", tock-tick)
knnString = cPickle.dumps(knn)
LOGGER.info("Size of the classifier is %s", len(knnString))
# Run the classifier to infer categories on either the training data, or the
# test data (of it's provided).
error_count = 0
tick = time.time()
if testDict:
LOGGER.info("Testing the classifier on the test set")
for i in testDict.keys():
winner, _inferenceResult, _dist, _categoryDist \
= knn.infer(testDict[i]['pattern'])
if winner != testDict[i]['category']:
error_count += 1
else:
LOGGER.info("Testing the classifier on the training set")
LOGGER.info("Number of patterns: %s", len(patternDict))
for i in patternDict.keys():
LOGGER.info("Testing %s - %s %s", i, patternDict[i]['category'], \
len(patternDict[i]['pattern']))
winner, _inferenceResult, _dist, _categoryDist \
= knn.infer(patternDict[i]['pattern'])
if winner != patternDict[i]['category']:
error_count += 1
tock = time.time()
LOGGER.info("Time Elapsed %s", tock-tick)
error_rate = float(error_count) / numPatterns
LOGGER.info("Error rate is %s", error_rate)
if error_rate == 0:
LOGGER.info(testName + " passed")
else:
LOGGER.info(testName + " failed")
failures += testName + " failed\n"
return failures
def getNumTestPatterns(short=0):
"""Return the number of patterns and classes the test should use."""
if short==0:
LOGGER.info("Running short tests")
numPatterns = numpy.random.randint(300, 600)
numClasses = numpy.random.randint(50, 150)
elif short==1:
LOGGER.info("\nRunning medium tests")
numPatterns = numpy.random.randint(500, 1500)
numClasses = numpy.random.randint(50, 150)
else:
LOGGER.info("\nRunning long tests")
numPatterns = numpy.random.randint(500, 3000)
numClasses = numpy.random.randint(30, 1000)
LOGGER.info("number of patterns is %s", numPatterns)
LOGGER.info("number of classes is %s", numClasses)
return numPatterns, numClasses
if __name__ == "__main__":
unittest.main()
| agpl-3.0 |
Aristocles/CouchPotatoServer | libs/pyasn1/debug.py | 185 | 1541 | import sys
from pyasn1.compat.octets import octs2ints
from pyasn1 import error
from pyasn1 import __version__
flagNone = 0x0000
flagEncoder = 0x0001
flagDecoder = 0x0002
flagAll = 0xffff
flagMap = {
'encoder': flagEncoder,
'decoder': flagDecoder,
'all': flagAll
}
class Debug:
defaultPrinter = sys.stderr.write
def __init__(self, *flags):
self._flags = flagNone
self._printer = self.defaultPrinter
self('running pyasn1 version %s' % __version__)
for f in flags:
if f not in flagMap:
raise error.PyAsn1Error('bad debug flag %s' % (f,))
self._flags = self._flags | flagMap[f]
self('debug category \'%s\' enabled' % f)
def __str__(self):
return 'logger %s, flags %x' % (self._printer, self._flags)
def __call__(self, msg):
self._printer('DBG: %s\n' % msg)
def __and__(self, flag):
return self._flags & flag
def __rand__(self, flag):
return flag & self._flags
logger = 0
def setLogger(l):
global logger
logger = l
def hexdump(octets):
return ' '.join(
[ '%s%.2X' % (n%16 == 0 and ('\n%.5d: ' % n) or '', x)
for n,x in zip(range(len(octets)), octs2ints(octets)) ]
)
class Scope:
def __init__(self):
self._list = []
def __str__(self): return '.'.join(self._list)
def push(self, token):
self._list.append(token)
def pop(self):
return self._list.pop()
scope = Scope()
| gpl-3.0 |
xNovax/SickRage | lib/html5lib/treewalkers/pulldom.py | 1729 | 2302 | from __future__ import absolute_import, division, unicode_literals
from xml.dom.pulldom import START_ELEMENT, END_ELEMENT, \
COMMENT, IGNORABLE_WHITESPACE, CHARACTERS
from . import _base
from ..constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
ignore_until = None
previous = None
for event in self.tree:
if previous is not None and \
(ignore_until is None or previous[1] is ignore_until):
if previous[1] is ignore_until:
ignore_until = None
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = previous[1]
previous = event
if ignore_until is None or previous[1] is ignore_until:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
type, node = event
if type == START_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
attrs = {}
for attr in list(node.attributes.keys()):
attr = node.getAttributeNode(attr)
attrs[(attr.namespaceURI, attr.localName)] = attr.value
if name in voidElements:
for token in self.emptyTag(namespace,
name,
attrs,
not next or next[1] is not node):
yield token
else:
yield self.startTag(namespace, name, attrs)
elif type == END_ELEMENT:
name = node.nodeName
namespace = node.namespaceURI
if name not in voidElements:
yield self.endTag(namespace, name)
elif type == COMMENT:
yield self.comment(node.nodeValue)
elif type in (IGNORABLE_WHITESPACE, CHARACTERS):
for token in self.text(node.nodeValue):
yield token
else:
yield self.unknown(type)
| gpl-3.0 |
gencer/sentry | src/sentry/south_migrations/0060_fill_filter_key.py | 5 | 28004 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
# Note: Remember to use orm['appname.ModelName'] rather than "from appname.models..."
for project in orm['sentry.Project'].objects.all():
for key in orm['sentry.FilterValue'].objects.filter(project=project).values_list(
'key', flat=True
).distinct():
orm['sentry.FilterKey'].objects.create(project=project, key=key)
def backwards(self, orm):
"Write your backwards methods here."
models = {
'sentry.user': {
'Meta': {
'object_name': 'User',
'db_table': "'auth_user'"
},
'date_joined':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email':
('django.db.models.fields.EmailField', [], {
'max_length': '75',
'blank': 'True'
}),
'first_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'id': ('django.db.models.fields.AutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'is_staff': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'last_login':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'last_name':
('django.db.models.fields.CharField', [], {
'max_length': '30',
'blank': 'True'
}),
'password': ('django.db.models.fields.CharField', [], {
'max_length': '128'
}),
'username':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '30'
})
},
'contenttypes.contenttype': {
'Meta': {
'ordering': "('name',)",
'unique_together': "(('app_label', 'model'),)",
'object_name': 'ContentType',
'db_table': "'django_content_type'"
},
'app_label': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'model': ('django.db.models.fields.CharField', [], {
'max_length': '100'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '100'
})
},
'sentry.event': {
'Meta': {
'unique_together': "(('project', 'event_id'),)",
'object_name': 'Event',
'db_table': "'sentry_message'"
},
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'datetime': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'event_id': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'null': 'True',
'db_column': "'message_id'"
}
),
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'blank': 'True',
'related_name': "'event_set'",
'null': 'True',
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'server_name': (
'django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True',
'db_index': 'True'
}
),
'site': (
'django.db.models.fields.CharField', [], {
'max_length': '128',
'null': 'True',
'db_index': 'True'
}
),
'time_spent': ('django.db.models.fields.FloatField', [], {
'null': 'True'
})
},
'sentry.filterkey': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'FilterKey'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
})
},
'sentry.filtervalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value'),)",
'object_name': 'FilterValue'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.group': {
'Meta': {
'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)",
'object_name': 'Group',
'db_table': "'sentry_groupedmessage'"
},
'active_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'checksum':
('django.db.models.fields.CharField', [], {
'max_length': '32',
'db_index': 'True'
}),
'culprit': (
'django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True',
'db_column': "'view'",
'blank': 'True'
}
),
'data': ('django.db.models.fields.TextField', [], {
'null': 'True',
'blank': 'True'
}),
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'db_index': 'True'
}
),
'level': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '40',
'db_index': 'True',
'blank': 'True'
}
),
'logger': (
'django.db.models.fields.CharField', [], {
'default': "'root'",
'max_length': '64',
'db_index': 'True',
'blank': 'True'
}
),
'message': ('django.db.models.fields.TextField', [], {}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'resolved_at':
('django.db.models.fields.DateTimeField', [], {
'null': 'True',
'db_index': 'True'
}),
'score': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {
'default': '0'
}),
'times_seen': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '1',
'db_index': 'True'
}
),
'views': (
'django.db.models.fields.related.ManyToManyField', [], {
'to': "orm['sentry.View']",
'symmetrical': 'False',
'blank': 'True'
}
)
},
'sentry.groupbookmark': {
'Meta': {
'unique_together': "(('project', 'user', 'group'),)",
'object_name': 'GroupBookmark'
},
'group': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Group']"
}
),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'bookmark_set'",
'to': "orm['sentry.Project']"
}
),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'sentry_bookmark_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.groupmeta': {
'Meta': {
'unique_together': "(('group', 'key'),)",
'object_name': 'GroupMeta'
},
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.messagecountbyminute': {
'Meta': {
'unique_together': "(('project', 'group', 'date'),)",
'object_name': 'MessageCountByMinute'
},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {
'default': '0'
}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.messagefiltervalue': {
'Meta': {
'unique_together': "(('project', 'key', 'value', 'group'),)",
'object_name': 'MessageFilterValue'
},
'first_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'last_seen': (
'django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now',
'null': 'True',
'db_index': 'True'
}
),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '200'
})
},
'sentry.messageindex': {
'Meta': {
'unique_together': "(('column', 'value', 'object_id'),)",
'object_name': 'MessageIndex'
},
'column': ('django.db.models.fields.CharField', [], {
'max_length': '32'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {
'max_length': '128'
})
},
'sentry.option': {
'Meta': {
'object_name': 'Option'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '64'
}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {
'unique_together': "(('team', 'email'),)",
'object_name': 'PendingTeamMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'email': ('django.db.models.fields.EmailField', [], {
'max_length': '75'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'team': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'pending_member_set'",
'to': "orm['sentry.Team']"
}
),
'type': ('django.db.models.fields.IntegerField', [], {
'default': '0'
})
},
'sentry.project': {
'Meta': {
'object_name': 'Project'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '200'
}),
'owner': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'sentry_owned_project_set'",
'null': 'True',
'to': "orm['sentry.User']"
}
),
'public': ('django.db.models.fields.BooleanField', [], {
'default': 'False'
}),
'slug': (
'django.db.models.fields.SlugField', [], {
'max_length': '50',
'unique': 'True',
'null': 'True'
}
),
'status': (
'django.db.models.fields.PositiveIntegerField', [], {
'default': '0',
'db_index': 'True'
}
),
'team': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Team']",
'null': 'True'
}
)
},
'sentry.projectcountbyminute': {
'Meta': {
'unique_together': "(('project', 'date'),)",
'object_name': 'ProjectCountByMinute'
},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {
'default': '0'
}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
})
},
'sentry.projectkey': {
'Meta': {
'object_name': 'ProjectKey'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'key_set'",
'to': "orm['sentry.Project']"
}
),
'public_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'secret_key': (
'django.db.models.fields.CharField', [], {
'max_length': '32',
'unique': 'True',
'null': 'True'
}
),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']",
'null': 'True'
}
)
},
'sentry.projectoption': {
'Meta': {
'unique_together': "(('project', 'key'),)",
'object_name': 'ProjectOption',
'db_table': "'sentry_projectoptions'"
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {
'unique_together': "(('project', 'group'),)",
'object_name': 'SearchDocument'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'date_changed':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'group':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Group']"
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'project':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']"
}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '0'
}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '1'
})
},
'sentry.searchtoken': {
'Meta': {
'unique_together': "(('document', 'field', 'token'),)",
'object_name': 'SearchToken'
},
'document': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'token_set'",
'to': "orm['sentry.SearchDocument']"
}
),
'field':
('django.db.models.fields.CharField', [], {
'default': "'text'",
'max_length': '64'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {
'default': '1'
}),
'token': ('django.db.models.fields.CharField', [], {
'max_length': '128'
})
},
'sentry.team': {
'Meta': {
'object_name': 'Team'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'name': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'owner':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}),
'slug':
('django.db.models.fields.SlugField', [], {
'unique': 'True',
'max_length': '50'
})
},
'sentry.teammember': {
'Meta': {
'unique_together': "(('team', 'user'),)",
'object_name': 'TeamMember'
},
'date_added':
('django.db.models.fields.DateTimeField', [], {
'default': 'datetime.datetime.now'
}),
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'is_active': ('django.db.models.fields.BooleanField', [], {
'default': 'True'
}),
'team': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'member_set'",
'to': "orm['sentry.Team']"
}
),
'type': ('django.db.models.fields.IntegerField', [], {
'default': '0'
}),
'user': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'related_name': "'sentry_teammember_set'",
'to': "orm['sentry.User']"
}
)
},
'sentry.useroption': {
'Meta': {
'unique_together': "(('user', 'project', 'key'),)",
'object_name': 'UserOption'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'key': ('django.db.models.fields.CharField', [], {
'max_length': '64'
}),
'project': (
'sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.Project']",
'null': 'True'
}
),
'user':
('sentry.db.models.fields.FlexibleForeignKey', [], {
'to': "orm['sentry.User']"
}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.view': {
'Meta': {
'object_name': 'View'
},
'id':
('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {
'primary_key': 'True'
}),
'path':
('django.db.models.fields.CharField', [], {
'unique': 'True',
'max_length': '100'
}),
'verbose_name':
('django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True'
}),
'verbose_name_plural':
('django.db.models.fields.CharField', [], {
'max_length': '200',
'null': 'True'
})
}
}
complete_apps = ['sentry']
symmetrical = True
| bsd-3-clause |
nwjs/chromium.src | third_party/blink/renderer/bindings/scripts/generate_bindings.py | 1 | 2542 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Runs the bindings code generator for the given tasks.
"""
import optparse
import sys
import bind_gen
import web_idl
def parse_options():
parser = optparse.OptionParser(usage="%prog [options] TASK...")
parser.add_option("--web_idl_database", type="string",
help="filepath of the input database")
parser.add_option("--root_src_dir", type="string",
help='root directory of chromium project, i.e. "//"')
parser.add_option("--root_gen_dir", type="string",
help='root directory of generated code files, i.e. '
'"//out/Default/gen"')
parser.add_option("--output_core_reldir", type="string",
help='output directory for "core" component relative to '
'root_gen_dir')
parser.add_option("--output_modules_reldir", type="string",
help='output directory for "modules" component relative '
'to root_gen_dir')
options, args = parser.parse_args()
required_option_names = (
"web_idl_database", "root_src_dir", "root_gen_dir",
"output_core_reldir", "output_modules_reldir")
for opt_name in required_option_names:
if getattr(options, opt_name) is None:
parser.error("--{} is a required option.".format(opt_name))
if not args:
parser.error("No argument specified.")
return options, args
def main():
options, tasks = parse_options()
dispatch_table = {
'dictionary': bind_gen.generate_dictionaries,
'enumeration': bind_gen.generate_enumerations,
'interface': bind_gen.generate_interfaces,
'union': bind_gen.generate_unions,
}
for task in tasks:
if task not in dispatch_table:
sys.exit("Unknown task: {}".format(task))
web_idl_database = web_idl.Database.read_from_file(options.web_idl_database)
component_reldirs = {
web_idl.Component('core'): options.output_core_reldir,
web_idl.Component('modules'): options.output_modules_reldir,
}
bind_gen.init(root_src_dir=options.root_src_dir,
root_gen_dir=options.root_gen_dir,
component_reldirs=component_reldirs)
for task in tasks:
dispatch_table[task](web_idl_database=web_idl_database)
if __name__ == '__main__':
main()
| bsd-3-clause |
piece601/Openkore | src/scons-local-2.0.1/SCons/Warnings.py | 61 | 6806 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""SCons.Warnings
This file implements the warnings framework for SCons.
"""
__revision__ = "src/engine/SCons/Warnings.py 5134 2010/08/16 23:02:40 bdeegan"
import sys
import SCons.Errors
class Warning(SCons.Errors.UserError):
pass
class WarningOnByDefault(Warning):
pass
# NOTE: If you add a new warning class, add it to the man page, too!
class CacheWriteErrorWarning(Warning):
pass
class CorruptSConsignWarning(WarningOnByDefault):
pass
class DependencyWarning(Warning):
pass
class DuplicateEnvironmentWarning(WarningOnByDefault):
pass
class FutureReservedVariableWarning(WarningOnByDefault):
pass
class LinkWarning(WarningOnByDefault):
pass
class MisleadingKeywordsWarning(WarningOnByDefault):
pass
class MissingSConscriptWarning(WarningOnByDefault):
pass
class NoMD5ModuleWarning(WarningOnByDefault):
pass
class NoMetaclassSupportWarning(WarningOnByDefault):
pass
class NoObjectCountWarning(WarningOnByDefault):
pass
class NoParallelSupportWarning(WarningOnByDefault):
pass
class ReservedVariableWarning(WarningOnByDefault):
pass
class StackSizeWarning(WarningOnByDefault):
pass
class VisualCMissingWarning(WarningOnByDefault):
pass
# Used when MSVC_VERSION and MSVS_VERSION do not point to the
# same version (MSVS_VERSION is deprecated)
class VisualVersionMismatch(WarningOnByDefault):
pass
class VisualStudioMissingWarning(Warning):
pass
class FortranCxxMixWarning(LinkWarning):
pass
# Deprecation warnings
class FutureDeprecatedWarning(Warning):
pass
class DeprecatedWarning(Warning):
pass
class MandatoryDeprecatedWarning(DeprecatedWarning):
pass
# Special case; base always stays DeprecatedWarning
class PythonVersionWarning(DeprecatedWarning):
pass
class DeprecatedSourceCodeWarning(FutureDeprecatedWarning):
pass
class DeprecatedBuildDirWarning(DeprecatedWarning):
pass
class TaskmasterNeedsExecuteWarning(DeprecatedWarning):
pass
class DeprecatedCopyWarning(MandatoryDeprecatedWarning):
pass
class DeprecatedOptionsWarning(MandatoryDeprecatedWarning):
pass
class DeprecatedSourceSignaturesWarning(MandatoryDeprecatedWarning):
pass
class DeprecatedTargetSignaturesWarning(MandatoryDeprecatedWarning):
pass
class DeprecatedDebugOptionsWarning(MandatoryDeprecatedWarning):
pass
class DeprecatedSigModuleWarning(MandatoryDeprecatedWarning):
pass
class DeprecatedBuilderKeywordsWarning(MandatoryDeprecatedWarning):
pass
# The below is a list of 2-tuples. The first element is a class object.
# The second element is true if that class is enabled, false if it is disabled.
_enabled = []
# If set, raise the warning as an exception
_warningAsException = 0
# If not None, a function to call with the warning
_warningOut = None
def suppressWarningClass(clazz):
"""Suppresses all warnings that are of type clazz or
derived from clazz."""
_enabled.insert(0, (clazz, 0))
def enableWarningClass(clazz):
"""Enables all warnings that are of type clazz or
derived from clazz."""
_enabled.insert(0, (clazz, 1))
def warningAsException(flag=1):
"""Turn warnings into exceptions. Returns the old value of the flag."""
global _warningAsException
old = _warningAsException
_warningAsException = flag
return old
def warn(clazz, *args):
global _enabled, _warningAsException, _warningOut
warning = clazz(args)
for clazz, flag in _enabled:
if isinstance(warning, clazz):
if flag:
if _warningAsException:
raise warning
if _warningOut:
_warningOut(warning)
break
def process_warn_strings(arguments):
"""Process string specifications of enabling/disabling warnings,
as passed to the --warn option or the SetOption('warn') function.
An argument to this option should be of the form <warning-class>
or no-<warning-class>. The warning class is munged in order
to get an actual class name from the classes above, which we
need to pass to the {enable,disable}WarningClass() functions.
The supplied <warning-class> is split on hyphens, each element
is capitalized, then smushed back together. Then the string
"Warning" is appended to get the class name.
For example, 'deprecated' will enable the DeprecatedWarning
class. 'no-dependency' will disable the DependencyWarning class.
As a special case, --warn=all and --warn=no-all will enable or
disable (respectively) the base Warning class of all warnings.
"""
def _capitalize(s):
if s[:5] == "scons":
return "SCons" + s[5:]
else:
return s.capitalize()
for arg in arguments:
elems = arg.lower().split('-')
enable = 1
if elems[0] == 'no':
enable = 0
del elems[0]
if len(elems) == 1 and elems[0] == 'all':
class_name = "Warning"
else:
class_name = ''.join(map(_capitalize, elems)) + "Warning"
try:
clazz = globals()[class_name]
except KeyError:
sys.stderr.write("No warning type: '%s'\n" % arg)
else:
if enable:
enableWarningClass(clazz)
elif issubclass(clazz, MandatoryDeprecatedWarning):
fmt = "Can not disable mandataory warning: '%s'\n"
sys.stderr.write(fmt % arg)
else:
suppressWarningClass(clazz)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-2.0 |
TomHeatwole/osf.io | scripts/tests/test_triggered_mails.py | 31 | 2280 | import mock
from datetime import datetime, timedelta
from nose.tools import * # noqa
from tests.base import OsfTestCase
from tests.factories import UserFactory
from scripts.triggered_mails import main, find_inactive_users_with_no_inactivity_email_sent_or_queued
from website import mails
class TestTriggeredMails(OsfTestCase):
def setUp(self):
super(TestTriggeredMails, self).setUp()
self.user = UserFactory()
self.user.date_last_login = datetime.utcnow()
self.user.save()
@mock.patch('website.mails.queue_mail')
def test_dont_trigger_no_login_mail(self, mock_queue):
self.user.date_last_login = datetime.utcnow() - timedelta(seconds=6)
self.user.save()
main(dry_run=False)
assert_false(mock_queue.called)
@mock.patch('website.mails.queue_mail')
def test_trigger_no_login_mail(self, mock_queue):
self.user.date_last_login = datetime.utcnow() - timedelta(weeks=6)
self.user.save()
main(dry_run=False)
mock_queue.assert_called_with(
user=mock.ANY,
fullname=self.user.fullname,
to_addr=self.user.username,
mail={'callback': mock.ANY, 'template': 'no_login', 'subject': mock.ANY},
send_at=mock.ANY,
)
@mock.patch('website.mails.send_mail')
def test_find_inactive_users_with_no_inactivity_email_sent_or_queued(self, mock_mail):
user_active = UserFactory(fullname='Spot')
user_inactive = UserFactory(fullname='Nucha')
user_already_received_mail = UserFactory(fullname='Pep')
user_active.date_last_login = datetime.utcnow() - timedelta(seconds=6)
user_inactive.date_last_login = datetime.utcnow() - timedelta(weeks=6)
user_already_received_mail.date_last_login = datetime.utcnow() - timedelta(weeks=6)
user_active.save()
user_inactive.save()
user_already_received_mail.save()
mails.queue_mail(to_addr=user_already_received_mail.username,
send_at=datetime.utcnow(),
user=user_already_received_mail,
mail=mails.NO_LOGIN)
users = find_inactive_users_with_no_inactivity_email_sent_or_queued()
assert_equal(len(users), 1)
| apache-2.0 |
tuxfux-hlp-notes/python-batches | archieves/batch-61/modules/myenv/lib/python2.7/site-packages/pip/_vendor/requests/packages/urllib3/packages/ordered_dict.py | 1093 | 8936 | # Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
# Copyright 2009 Raymond Hettinger, released under the MIT License.
# http://code.activestate.com/recipes/576693/
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| gpl-3.0 |
EvanK/ansible | test/units/modules/network/f5/test_bigip_configsync_action.py | 21 | 3910 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_configsync_action import Parameters
from library.modules.bigip_configsync_action import ModuleManager
from library.modules.bigip_configsync_action import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_configsync_action import Parameters
from ansible.modules.network.f5.bigip_configsync_action import ModuleManager
from ansible.modules.network.f5.bigip_configsync_action import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
sync_device_to_group=True,
sync_group_to_device=True,
overwrite_config=True,
device_group="foo"
)
p = Parameters(params=args)
assert p.sync_device_to_group is True
assert p.sync_group_to_device is True
assert p.overwrite_config is True
assert p.device_group == 'foo'
def test_module_parameters_yes_no(self):
args = dict(
sync_device_to_group='yes',
sync_group_to_device='no',
overwrite_config='yes',
device_group="foo"
)
p = Parameters(params=args)
assert p.sync_device_to_group == 'yes'
assert p.sync_group_to_device == 'no'
assert p.overwrite_config == 'yes'
assert p.device_group == 'foo'
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
def tearDown(self):
self.patcher1.stop()
def test_update_agent_status_traps(self, *args):
set_module_args(dict(
sync_device_to_group='yes',
device_group="foo",
password='password',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm._device_group_exists = Mock(return_value=True)
mm._sync_to_group_required = Mock(return_value=False)
mm.execute_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=None)
mm._get_status_from_resource = Mock()
mm._get_status_from_resource.side_effect = [
'Changes Pending', 'Awaiting Initial Sync', 'In Sync'
]
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
JD-P/HackerNewsToJSON | hn2json.py | 1 | 6275 | #!/usr/bin/env python
"""Python-Pinboard
Python script for downloading your saved stories and saved comments on Hacker News
and converting them to a JSON format for easy use.
Originally written on Pythonista on iPad
"""
__version__ = "1.1"
__license__ = "BSD"
__copyright__ = "Copyright 2013-2014, Luciano Fiandesio"
__author__ = "Luciano Fiandesio <http://fiandes.io/> & John David Pressman <http://jdpressman.com>"
import argparse
import time
import re
import sys
import urllib
import urllib.parse as urlparse
import json
from bs4 import BeautifulSoup
import requests
from types import *
import xml.etree.ElementTree as xml
HACKERNEWS = 'https://news.ycombinator.com'
parser = argparse.ArgumentParser()
parser.add_argument("username", help="The Hacker News username to grab the stories from.")
parser.add_argument("password", help="The password to login with using the username.")
parser.add_argument("-f", "--file", help="Filepath to store the JSON document at.")
parser.add_argument("-n", "--number", default=1, type=int, help="Number of pages to grab, default 1. 0 grabs all pages.")
parser.add_argument("-s", "--stories", action="store_true", help="Grab stories only.")
parser.add_argument("-c", "--comments", action="store_true", help="Grab comments only.")
arguments = parser.parse_args()
def getSavedStories(session, hnuser, page_range):
"""Return a list of story IDs representing your saved stories.
This function does not return the actual metadata associated, just the IDs.
This list is traversed and each item inside is grabbed using the Hacker News
API by story ID."""
story_ids = []
for page_index in page_range:
saved = session.get(HACKERNEWS + '/upvoted?id=' +
hnuser + "&p=" + str(page_index))
soup = BeautifulSoup(saved.content)
for tag in soup.findAll('td',attrs={'class':'subtext'}):
if tag.a is not type(None):
a_tags = tag.find_all('a')
for a_tag in a_tags:
if a_tag['href'][:5] == 'item?':
story_id = a_tag['href'].split('id=')[1]
story_ids.append(story_id)
break
return story_ids
def getSavedComments(session, hnuser, page_range):
"""Return a list of IDs representing your saved comments.
This function does not return the actual metadata associated, just the IDs.
This list is traversed and each item inside is grabbed using the Hacker News
API by ID."""
comment_ids = []
for page_index in page_range:
saved = session.get(HACKERNEWS + '/upvoted?id=' +
hnuser + "&comments=t" + "&p=" + str(page_index))
soup = BeautifulSoup(saved.content)
for tag in soup.findAll('td',attrs={'class':'default'}):
if tag.a is not type(None):
a_tags = tag.find_all('a')
for a_tag in a_tags:
if a_tag['href'][:5] == 'item?':
comment_id = a_tag['href'].split('id=')[1]
comment_ids.append(comment_id)
break
return comment_ids
def loginToHackerNews(username, password):
s = requests.Session() # init a session (use cookies across requests)
headers = { # we need to specify an header to get the right cookie
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:25.0) Gecko/20100101 Firefox/25.0',
'Accept' : "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
}
# Build the login POST data and make the login request.
payload = {
'whence': 'news',
'acct': username,
'pw': password
}
auth = s.post(HACKERNEWS+'/login', data=payload, headers=headers )
if 'Bad login' in str(auth.content):
raise Exception("Hacker News authentication failed!")
if not username in str(auth.content):
raise Exception("Hacker News didn't succeed, username not displayed.")
return s # return the http session
def getHackerNewsItem(item_id):
"""Get an 'item' as specified in the HackerNews v0 API."""
time.sleep(0.2)
item_json_link = "https://hacker-news.firebaseio.com/v0/item/" + item_id + ".json"
try:
with urllib.request.urlopen(item_json_link) as item_json:
return json.loads(item_json.read().decode('utf-8'))
except urllib.error.URLError:
return {"title":"Item " + item_id + " could not be retrieved",
"id":item_id}
def item2stderr(item_id, item_count, item_total):
sys.stderr.write("Got item " + item_id + ". ({} of {})\n".format(item_count,
item_total))
def main():
json_items = {"saved_stories":list(), "saved_comments":list()}
if arguments.stories and arguments.comments:
# Assume that if somebody uses both flags they mean to grab both
arguments.stories = False
arguments.comments = False
item_count = 0
session = loginToHackerNews(arguments.username, arguments.password)
page_range = range(1, arguments.number + 1)
if arguments.stories or (not arguments.stories and not arguments.comments):
story_ids = getSavedStories(session,
arguments.username,
page_range)
for story_id in story_ids:
json_items["saved_stories"].append(getHackerNewsItem(story_id))
item_count += 1
item2stderr(story_id, item_count, len(story_ids))
if arguments.comments or (not arguments.stories and not arguments.comments):
item_count = 0
comment_ids = getSavedComments(session,
arguments.username,
page_range)
for comment_id in comment_ids:
json_items["saved_comments"].append(getHackerNewsItem(comment_id))
item_count += 1
item2stderr(comment_id, item_count, len(comment_ids))
if arguments.file:
with open(arguments.file, 'w') as outfile:
json.dump(json_items, outfile)
else:
print(json.dumps(json_items))
if __name__ == "__main__":
main()
| bsd-3-clause |
applidget/zxing-ios | cpp/scons/scons-local-2.0.0.final.0/SCons/Util.py | 34 | 49016 | """SCons.Util
Various utility functions go here.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Util.py 5023 2010/06/14 22:05:46 scons"
import os
import sys
import copy
import re
import types
from collections import UserDict, UserList, UserString
# Don't "from types import ..." these because we need to get at the
# types module later to look for UnicodeType.
InstanceType = types.InstanceType
MethodType = types.MethodType
FunctionType = types.FunctionType
try: unicode
except NameError: UnicodeType = None
else: UnicodeType = unicode
def dictify(keys, values, result={}):
for k, v in zip(keys, values):
result[k] = v
return result
_altsep = os.altsep
if _altsep is None and sys.platform == 'win32':
# My ActivePython 2.0.1 doesn't set os.altsep! What gives?
_altsep = '/'
if _altsep:
def rightmost_separator(path, sep):
return max(path.rfind(sep), path.rfind(_altsep))
else:
def rightmost_separator(path, sep):
return path.rfind(sep)
# First two from the Python Cookbook, just for completeness.
# (Yeah, yeah, YAGNI...)
def containsAny(str, set):
"""Check whether sequence str contains ANY of the items in set."""
for c in set:
if c in str: return 1
return 0
def containsAll(str, set):
"""Check whether sequence str contains ALL of the items in set."""
for c in set:
if c not in str: return 0
return 1
def containsOnly(str, set):
"""Check whether sequence str contains ONLY items in set."""
for c in str:
if c not in set: return 0
return 1
def splitext(path):
"Same as os.path.splitext() but faster."
sep = rightmost_separator(path, os.sep)
dot = path.rfind('.')
# An ext is only real if it has at least one non-digit char
if dot > sep and not containsOnly(path[dot:], "0123456789."):
return path[:dot],path[dot:]
else:
return path,""
def updrive(path):
"""
Make the drive letter (if any) upper case.
This is useful because Windows is inconsitent on the case
of the drive letter, which can cause inconsistencies when
calculating command signatures.
"""
drive, rest = os.path.splitdrive(path)
if drive:
path = drive.upper() + rest
return path
class NodeList(UserList):
"""This class is almost exactly like a regular list of Nodes
(actually it can hold any object), with one important difference.
If you try to get an attribute from this list, it will return that
attribute from every item in the list. For example:
>>> someList = NodeList([ ' foo ', ' bar ' ])
>>> someList.strip()
[ 'foo', 'bar' ]
"""
def __nonzero__(self):
return len(self.data) != 0
def __str__(self):
return ' '.join(map(str, self.data))
def __iter__(self):
return iter(self.data)
def __call__(self, *args, **kwargs):
result = [x(*args, **kwargs) for x in self.data]
return self.__class__(result)
def __getattr__(self, name):
result = [getattr(x, name) for x in self.data]
return self.__class__(result)
_get_env_var = re.compile(r'^\$([_a-zA-Z]\w*|{[_a-zA-Z]\w*})$')
def get_environment_var(varstr):
"""Given a string, first determine if it looks like a reference
to a single environment variable, like "$FOO" or "${FOO}".
If so, return that variable with no decorations ("FOO").
If not, return None."""
mo=_get_env_var.match(to_String(varstr))
if mo:
var = mo.group(1)
if var[0] == '{':
return var[1:-1]
else:
return var
else:
return None
class DisplayEngine(object):
print_it = True
def __call__(self, text, append_newline=1):
if not self.print_it:
return
if append_newline: text = text + '\n'
try:
sys.stdout.write(unicode(text))
except IOError:
# Stdout might be connected to a pipe that has been closed
# by now. The most likely reason for the pipe being closed
# is that the user has press ctrl-c. It this is the case,
# then SCons is currently shutdown. We therefore ignore
# IOError's here so that SCons can continue and shutdown
# properly so that the .sconsign is correctly written
# before SCons exits.
pass
def set_mode(self, mode):
self.print_it = mode
def render_tree(root, child_func, prune=0, margin=[0], visited={}):
"""
Render a tree of nodes into an ASCII tree view.
root - the root node of the tree
child_func - the function called to get the children of a node
prune - don't visit the same node twice
margin - the format of the left margin to use for children of root.
1 results in a pipe, and 0 results in no pipe.
visited - a dictionary of visited nodes in the current branch if not prune,
or in the whole tree if prune.
"""
rname = str(root)
children = child_func(root)
retval = ""
for pipe in margin[:-1]:
if pipe:
retval = retval + "| "
else:
retval = retval + " "
if rname in visited:
return retval + "+-[" + rname + "]\n"
retval = retval + "+-" + rname + "\n"
if not prune:
visited = copy.copy(visited)
visited[rname] = 1
for i in range(len(children)):
margin.append(i<len(children)-1)
retval = retval + render_tree(children[i], child_func, prune, margin, visited
)
margin.pop()
return retval
IDX = lambda N: N and 1 or 0
def print_tree(root, child_func, prune=0, showtags=0, margin=[0], visited={}):
"""
Print a tree of nodes. This is like render_tree, except it prints
lines directly instead of creating a string representation in memory,
so that huge trees can be printed.
root - the root node of the tree
child_func - the function called to get the children of a node
prune - don't visit the same node twice
showtags - print status information to the left of each node line
margin - the format of the left margin to use for children of root.
1 results in a pipe, and 0 results in no pipe.
visited - a dictionary of visited nodes in the current branch if not prune,
or in the whole tree if prune.
"""
rname = str(root)
if showtags:
if showtags == 2:
legend = (' E = exists\n' +
' R = exists in repository only\n' +
' b = implicit builder\n' +
' B = explicit builder\n' +
' S = side effect\n' +
' P = precious\n' +
' A = always build\n' +
' C = current\n' +
' N = no clean\n' +
' H = no cache\n' +
'\n')
sys.stdout.write(unicode(legend))
tags = ['[']
tags.append(' E'[IDX(root.exists())])
tags.append(' R'[IDX(root.rexists() and not root.exists())])
tags.append(' BbB'[[0,1][IDX(root.has_explicit_builder())] +
[0,2][IDX(root.has_builder())]])
tags.append(' S'[IDX(root.side_effect)])
tags.append(' P'[IDX(root.precious)])
tags.append(' A'[IDX(root.always_build)])
tags.append(' C'[IDX(root.is_up_to_date())])
tags.append(' N'[IDX(root.noclean)])
tags.append(' H'[IDX(root.nocache)])
tags.append(']')
else:
tags = []
def MMM(m):
return [" ","| "][m]
margins = list(map(MMM, margin[:-1]))
children = child_func(root)
if prune and rname in visited and children:
sys.stdout.write(''.join(tags + margins + ['+-[', rname, ']']) + u'\n')
return
sys.stdout.write(''.join(tags + margins + ['+-', rname]) + u'\n')
visited[rname] = 1
if children:
margin.append(1)
idx = IDX(showtags)
for C in children[:-1]:
print_tree(C, child_func, prune, idx, margin, visited)
margin[-1] = 0
print_tree(children[-1], child_func, prune, idx, margin, visited)
margin.pop()
# Functions for deciding if things are like various types, mainly to
# handle UserDict, UserList and UserString like their underlying types.
#
# Yes, all of this manual testing breaks polymorphism, and the real
# Pythonic way to do all of this would be to just try it and handle the
# exception, but handling the exception when it's not the right type is
# often too slow.
# We are using the following trick to speed up these
# functions. Default arguments are used to take a snapshot of the
# the global functions and constants used by these functions. This
# transforms accesses to global variable into local variables
# accesses (i.e. LOAD_FAST instead of LOAD_GLOBAL).
DictTypes = (dict, UserDict)
ListTypes = (list, UserList)
SequenceTypes = (list, tuple, UserList)
# Note that profiling data shows a speed-up when comparing
# explicitely with str and unicode instead of simply comparing
# with basestring. (at least on Python 2.5.1)
StringTypes = (str, unicode, UserString)
# Empirically, it is faster to check explicitely for str and
# unicode than for basestring.
BaseStringTypes = (str, unicode)
def is_Dict(obj, isinstance=isinstance, DictTypes=DictTypes):
return isinstance(obj, DictTypes)
def is_List(obj, isinstance=isinstance, ListTypes=ListTypes):
return isinstance(obj, ListTypes)
def is_Sequence(obj, isinstance=isinstance, SequenceTypes=SequenceTypes):
return isinstance(obj, SequenceTypes)
def is_Tuple(obj, isinstance=isinstance, tuple=tuple):
return isinstance(obj, tuple)
def is_String(obj, isinstance=isinstance, StringTypes=StringTypes):
return isinstance(obj, StringTypes)
def is_Scalar(obj, isinstance=isinstance, StringTypes=StringTypes, SequenceTypes=SequenceTypes):
# Profiling shows that there is an impressive speed-up of 2x
# when explicitely checking for strings instead of just not
# sequence when the argument (i.e. obj) is already a string.
# But, if obj is a not string then it is twice as fast to
# check only for 'not sequence'. The following code therefore
# assumes that the obj argument is a string must of the time.
return isinstance(obj, StringTypes) or not isinstance(obj, SequenceTypes)
def do_flatten(sequence, result, isinstance=isinstance,
StringTypes=StringTypes, SequenceTypes=SequenceTypes):
for item in sequence:
if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes):
result.append(item)
else:
do_flatten(item, result)
def flatten(obj, isinstance=isinstance, StringTypes=StringTypes,
SequenceTypes=SequenceTypes, do_flatten=do_flatten):
"""Flatten a sequence to a non-nested list.
Flatten() converts either a single scalar or a nested sequence
to a non-nested list. Note that flatten() considers strings
to be scalars instead of sequences like Python would.
"""
if isinstance(obj, StringTypes) or not isinstance(obj, SequenceTypes):
return [obj]
result = []
for item in obj:
if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes):
result.append(item)
else:
do_flatten(item, result)
return result
def flatten_sequence(sequence, isinstance=isinstance, StringTypes=StringTypes,
SequenceTypes=SequenceTypes, do_flatten=do_flatten):
"""Flatten a sequence to a non-nested list.
Same as flatten(), but it does not handle the single scalar
case. This is slightly more efficient when one knows that
the sequence to flatten can not be a scalar.
"""
result = []
for item in sequence:
if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes):
result.append(item)
else:
do_flatten(item, result)
return result
# Generic convert-to-string functions that abstract away whether or
# not the Python we're executing has Unicode support. The wrapper
# to_String_for_signature() will use a for_signature() method if the
# specified object has one.
#
def to_String(s,
isinstance=isinstance, str=str,
UserString=UserString, BaseStringTypes=BaseStringTypes):
if isinstance(s,BaseStringTypes):
# Early out when already a string!
return s
elif isinstance(s, UserString):
# s.data can only be either a unicode or a regular
# string. Please see the UserString initializer.
return s.data
else:
return str(s)
def to_String_for_subst(s,
isinstance=isinstance, str=str, to_String=to_String,
BaseStringTypes=BaseStringTypes, SequenceTypes=SequenceTypes,
UserString=UserString):
# Note that the test cases are sorted by order of probability.
if isinstance(s, BaseStringTypes):
return s
elif isinstance(s, SequenceTypes):
l = []
for e in s:
l.append(to_String_for_subst(e))
return ' '.join( s )
elif isinstance(s, UserString):
# s.data can only be either a unicode or a regular
# string. Please see the UserString initializer.
return s.data
else:
return str(s)
def to_String_for_signature(obj, to_String_for_subst=to_String_for_subst,
AttributeError=AttributeError):
try:
f = obj.for_signature
except AttributeError:
return to_String_for_subst(obj)
else:
return f()
# The SCons "semi-deep" copy.
#
# This makes separate copies of lists (including UserList objects)
# dictionaries (including UserDict objects) and tuples, but just copies
# references to anything else it finds.
#
# A special case is any object that has a __semi_deepcopy__() method,
# which we invoke to create the copy, which is used by the BuilderDict
# class because of its extra initialization argument.
#
# The dispatch table approach used here is a direct rip-off from the
# normal Python copy module.
_semi_deepcopy_dispatch = d = {}
def _semi_deepcopy_dict(x):
copy = {}
for key, val in x.items():
# The regular Python copy.deepcopy() also deepcopies the key,
# as follows:
#
# copy[semi_deepcopy(key)] = semi_deepcopy(val)
#
# Doesn't seem like we need to, but we'll comment it just in case.
copy[key] = semi_deepcopy(val)
return copy
d[dict] = _semi_deepcopy_dict
def _semi_deepcopy_list(x):
return list(map(semi_deepcopy, x))
d[list] = _semi_deepcopy_list
def _semi_deepcopy_tuple(x):
return tuple(map(semi_deepcopy, x))
d[tuple] = _semi_deepcopy_tuple
def _semi_deepcopy_inst(x):
if hasattr(x, '__semi_deepcopy__'):
return x.__semi_deepcopy__()
elif isinstance(x, UserDict):
return x.__class__(_semi_deepcopy_dict(x))
elif isinstance(x, UserList):
return x.__class__(_semi_deepcopy_list(x))
else:
return x
d[InstanceType] = _semi_deepcopy_inst
def semi_deepcopy(x):
copier = _semi_deepcopy_dispatch.get(type(x))
if copier:
return copier(x)
else:
return x
class Proxy(object):
"""A simple generic Proxy class, forwarding all calls to
subject. So, for the benefit of the python newbie, what does
this really mean? Well, it means that you can take an object, let's
call it 'objA', and wrap it in this Proxy class, with a statement
like this
proxyObj = Proxy(objA),
Then, if in the future, you do something like this
x = proxyObj.var1,
since Proxy does not have a 'var1' attribute (but presumably objA does),
the request actually is equivalent to saying
x = objA.var1
Inherit from this class to create a Proxy.
Note that, with new-style classes, this does *not* work transparently
for Proxy subclasses that use special .__*__() method names, because
those names are now bound to the class, not the individual instances.
You now need to know in advance which .__*__() method names you want
to pass on to the underlying Proxy object, and specifically delegate
their calls like this:
class Foo(Proxy):
__str__ = Delegate('__str__')
"""
def __init__(self, subject):
"""Wrap an object as a Proxy object"""
self._subject = subject
def __getattr__(self, name):
"""Retrieve an attribute from the wrapped object. If the named
attribute doesn't exist, AttributeError is raised"""
return getattr(self._subject, name)
def get(self):
"""Retrieve the entire wrapped object"""
return self._subject
def __cmp__(self, other):
if issubclass(other.__class__, self._subject.__class__):
return cmp(self._subject, other)
return cmp(self.__dict__, other.__dict__)
class Delegate(object):
"""A Python Descriptor class that delegates attribute fetches
to an underlying wrapped subject of a Proxy. Typical use:
class Foo(Proxy):
__str__ = Delegate('__str__')
"""
def __init__(self, attribute):
self.attribute = attribute
def __get__(self, obj, cls):
if isinstance(obj, cls):
return getattr(obj._subject, self.attribute)
else:
return self
# attempt to load the windows registry module:
can_read_reg = 0
try:
import winreg
can_read_reg = 1
hkey_mod = winreg
RegOpenKeyEx = winreg.OpenKeyEx
RegEnumKey = winreg.EnumKey
RegEnumValue = winreg.EnumValue
RegQueryValueEx = winreg.QueryValueEx
RegError = winreg.error
except ImportError:
try:
import win32api
import win32con
can_read_reg = 1
hkey_mod = win32con
RegOpenKeyEx = win32api.RegOpenKeyEx
RegEnumKey = win32api.RegEnumKey
RegEnumValue = win32api.RegEnumValue
RegQueryValueEx = win32api.RegQueryValueEx
RegError = win32api.error
except ImportError:
class _NoError(Exception):
pass
RegError = _NoError
if can_read_reg:
HKEY_CLASSES_ROOT = hkey_mod.HKEY_CLASSES_ROOT
HKEY_LOCAL_MACHINE = hkey_mod.HKEY_LOCAL_MACHINE
HKEY_CURRENT_USER = hkey_mod.HKEY_CURRENT_USER
HKEY_USERS = hkey_mod.HKEY_USERS
def RegGetValue(root, key):
"""This utility function returns a value in the registry
without having to open the key first. Only available on
Windows platforms with a version of Python that can read the
registry. Returns the same thing as
SCons.Util.RegQueryValueEx, except you just specify the entire
path to the value, and don't have to bother opening the key
first. So:
Instead of:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
r'SOFTWARE\Microsoft\Windows\CurrentVersion')
out = SCons.Util.RegQueryValueEx(k,
'ProgramFilesDir')
You can write:
out = SCons.Util.RegGetValue(SCons.Util.HKEY_LOCAL_MACHINE,
r'SOFTWARE\Microsoft\Windows\CurrentVersion\ProgramFilesDir')
"""
# I would use os.path.split here, but it's not a filesystem
# path...
p = key.rfind('\\') + 1
keyp = key[:p-1] # -1 to omit trailing slash
val = key[p:]
k = RegOpenKeyEx(root, keyp)
return RegQueryValueEx(k,val)
else:
try:
e = WindowsError
except NameError:
# Make sure we have a definition of WindowsError so we can
# run platform-independent tests of Windows functionality on
# platforms other than Windows. (WindowsError is, in fact, an
# OSError subclass on Windows.)
class WindowsError(OSError):
pass
import builtins
builtins.WindowsError = WindowsError
else:
del e
HKEY_CLASSES_ROOT = None
HKEY_LOCAL_MACHINE = None
HKEY_CURRENT_USER = None
HKEY_USERS = None
def RegGetValue(root, key):
raise WindowsError
def RegOpenKeyEx(root, key):
raise WindowsError
if sys.platform == 'win32':
def WhereIs(file, path=None, pathext=None, reject=[]):
if path is None:
try:
path = os.environ['PATH']
except KeyError:
return None
if is_String(path):
path = path.split(os.pathsep)
if pathext is None:
try:
pathext = os.environ['PATHEXT']
except KeyError:
pathext = '.COM;.EXE;.BAT;.CMD'
if is_String(pathext):
pathext = pathext.split(os.pathsep)
for ext in pathext:
if ext.lower() == file[-len(ext):].lower():
pathext = ['']
break
if not is_List(reject) and not is_Tuple(reject):
reject = [reject]
for dir in path:
f = os.path.join(dir, file)
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
try:
reject.index(fext)
except ValueError:
return os.path.normpath(fext)
continue
return None
elif os.name == 'os2':
def WhereIs(file, path=None, pathext=None, reject=[]):
if path is None:
try:
path = os.environ['PATH']
except KeyError:
return None
if is_String(path):
path = path.split(os.pathsep)
if pathext is None:
pathext = ['.exe', '.cmd']
for ext in pathext:
if ext.lower() == file[-len(ext):].lower():
pathext = ['']
break
if not is_List(reject) and not is_Tuple(reject):
reject = [reject]
for dir in path:
f = os.path.join(dir, file)
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
try:
reject.index(fext)
except ValueError:
return os.path.normpath(fext)
continue
return None
else:
def WhereIs(file, path=None, pathext=None, reject=[]):
import stat
if path is None:
try:
path = os.environ['PATH']
except KeyError:
return None
if is_String(path):
path = path.split(os.pathsep)
if not is_List(reject) and not is_Tuple(reject):
reject = [reject]
for d in path:
f = os.path.join(d, file)
if os.path.isfile(f):
try:
st = os.stat(f)
except OSError:
# os.stat() raises OSError, not IOError if the file
# doesn't exist, so in this case we let IOError get
# raised so as to not mask possibly serious disk or
# network issues.
continue
if stat.S_IMODE(st[stat.ST_MODE]) & 0111:
try:
reject.index(f)
except ValueError:
return os.path.normpath(f)
continue
return None
def PrependPath(oldpath, newpath, sep = os.pathsep,
delete_existing=1, canonicalize=None):
"""This prepends newpath elements to the given oldpath. Will only
add any particular path once (leaving the first one it encounters
and ignoring the rest, to preserve path order), and will
os.path.normpath and os.path.normcase all paths to help assure
this. This can also handle the case where the given old path
variable is a list instead of a string, in which case a list will
be returned instead of a string.
Example:
Old Path: "/foo/bar:/foo"
New Path: "/biz/boom:/foo"
Result: "/biz/boom:/foo:/foo/bar"
If delete_existing is 0, then adding a path that exists will
not move it to the beginning; it will stay where it is in the
list.
If canonicalize is not None, it is applied to each element of
newpath before use.
"""
orig = oldpath
is_list = 1
paths = orig
if not is_List(orig) and not is_Tuple(orig):
paths = paths.split(sep)
is_list = 0
if is_String(newpath):
newpaths = newpath.split(sep)
elif not is_List(newpath) and not is_Tuple(newpath):
newpaths = [ newpath ] # might be a Dir
else:
newpaths = newpath
if canonicalize:
newpaths=list(map(canonicalize, newpaths))
if not delete_existing:
# First uniquify the old paths, making sure to
# preserve the first instance (in Unix/Linux,
# the first one wins), and remembering them in normpaths.
# Then insert the new paths at the head of the list
# if they're not already in the normpaths list.
result = []
normpaths = []
for path in paths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.append(path)
normpaths.append(normpath)
newpaths.reverse() # since we're inserting at the head
for path in newpaths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.insert(0, path)
normpaths.append(normpath)
paths = result
else:
newpaths = newpaths + paths # prepend new paths
normpaths = []
paths = []
# now we add them only if they are unique
for path in newpaths:
normpath = os.path.normpath(os.path.normcase(path))
if path and not normpath in normpaths:
paths.append(path)
normpaths.append(normpath)
if is_list:
return paths
else:
return sep.join(paths)
def AppendPath(oldpath, newpath, sep = os.pathsep,
delete_existing=1, canonicalize=None):
"""This appends new path elements to the given old path. Will
only add any particular path once (leaving the last one it
encounters and ignoring the rest, to preserve path order), and
will os.path.normpath and os.path.normcase all paths to help
assure this. This can also handle the case where the given old
path variable is a list instead of a string, in which case a list
will be returned instead of a string.
Example:
Old Path: "/foo/bar:/foo"
New Path: "/biz/boom:/foo"
Result: "/foo/bar:/biz/boom:/foo"
If delete_existing is 0, then adding a path that exists
will not move it to the end; it will stay where it is in the list.
If canonicalize is not None, it is applied to each element of
newpath before use.
"""
orig = oldpath
is_list = 1
paths = orig
if not is_List(orig) and not is_Tuple(orig):
paths = paths.split(sep)
is_list = 0
if is_String(newpath):
newpaths = newpath.split(sep)
elif not is_List(newpath) and not is_Tuple(newpath):
newpaths = [ newpath ] # might be a Dir
else:
newpaths = newpath
if canonicalize:
newpaths=list(map(canonicalize, newpaths))
if not delete_existing:
# add old paths to result, then
# add new paths if not already present
# (I thought about using a dict for normpaths for speed,
# but it's not clear hashing the strings would be faster
# than linear searching these typically short lists.)
result = []
normpaths = []
for path in paths:
if not path:
continue
result.append(path)
normpaths.append(os.path.normpath(os.path.normcase(path)))
for path in newpaths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.append(path)
normpaths.append(normpath)
paths = result
else:
# start w/ new paths, add old ones if not present,
# then reverse.
newpaths = paths + newpaths # append new paths
newpaths.reverse()
normpaths = []
paths = []
# now we add them only if they are unique
for path in newpaths:
normpath = os.path.normpath(os.path.normcase(path))
if path and not normpath in normpaths:
paths.append(path)
normpaths.append(normpath)
paths.reverse()
if is_list:
return paths
else:
return sep.join(paths)
if sys.platform == 'cygwin':
def get_native_path(path):
"""Transforms an absolute path into a native path for the system. In
Cygwin, this converts from a Cygwin path to a Windows one."""
return os.popen('cygpath -w ' + path).read().replace('\n', '')
else:
def get_native_path(path):
"""Transforms an absolute path into a native path for the system.
Non-Cygwin version, just leave the path alone."""
return path
display = DisplayEngine()
def Split(arg):
if is_List(arg) or is_Tuple(arg):
return arg
elif is_String(arg):
return arg.split()
else:
return [arg]
class CLVar(UserList):
"""A class for command-line construction variables.
This is a list that uses Split() to split an initial string along
white-space arguments, and similarly to split any strings that get
added. This allows us to Do the Right Thing with Append() and
Prepend() (as well as straight Python foo = env['VAR'] + 'arg1
arg2') regardless of whether a user adds a list or a string to a
command-line construction variable.
"""
def __init__(self, seq = []):
UserList.__init__(self, Split(seq))
def __add__(self, other):
return UserList.__add__(self, CLVar(other))
def __radd__(self, other):
return UserList.__radd__(self, CLVar(other))
def __coerce__(self, other):
return (self, CLVar(other))
def __str__(self):
return ' '.join(self.data)
# A dictionary that preserves the order in which items are added.
# Submitted by David Benjamin to ActiveState's Python Cookbook web site:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
# Including fixes/enhancements from the follow-on discussions.
class OrderedDict(UserDict):
def __init__(self, dict = None):
self._keys = []
UserDict.__init__(self, dict)
def __delitem__(self, key):
UserDict.__delitem__(self, key)
self._keys.remove(key)
def __setitem__(self, key, item):
UserDict.__setitem__(self, key, item)
if key not in self._keys: self._keys.append(key)
def clear(self):
UserDict.clear(self)
self._keys = []
def copy(self):
dict = OrderedDict()
dict.update(self)
return dict
def items(self):
return list(zip(self._keys, list(self.values())))
def keys(self):
return self._keys[:]
def popitem(self):
try:
key = self._keys[-1]
except IndexError:
raise KeyError('dictionary is empty')
val = self[key]
del self[key]
return (key, val)
def setdefault(self, key, failobj = None):
UserDict.setdefault(self, key, failobj)
if key not in self._keys: self._keys.append(key)
def update(self, dict):
for (key, val) in dict.items():
self.__setitem__(key, val)
def values(self):
return list(map(self.get, self._keys))
class Selector(OrderedDict):
"""A callable ordered dictionary that maps file suffixes to
dictionary values. We preserve the order in which items are added
so that get_suffix() calls always return the first suffix added."""
def __call__(self, env, source, ext=None):
if ext is None:
try:
ext = source[0].suffix
except IndexError:
ext = ""
try:
return self[ext]
except KeyError:
# Try to perform Environment substitution on the keys of
# the dictionary before giving up.
s_dict = {}
for (k,v) in self.items():
if k is not None:
s_k = env.subst(k)
if s_k in s_dict:
# We only raise an error when variables point
# to the same suffix. If one suffix is literal
# and a variable suffix contains this literal,
# the literal wins and we don't raise an error.
raise KeyError(s_dict[s_k][0], k, s_k)
s_dict[s_k] = (k,v)
try:
return s_dict[ext][1]
except KeyError:
try:
return self[None]
except KeyError:
return None
if sys.platform == 'cygwin':
# On Cygwin, os.path.normcase() lies, so just report back the
# fact that the underlying Windows OS is case-insensitive.
def case_sensitive_suffixes(s1, s2):
return 0
else:
def case_sensitive_suffixes(s1, s2):
return (os.path.normcase(s1) != os.path.normcase(s2))
def adjustixes(fname, pre, suf, ensure_suffix=False):
if pre:
path, fn = os.path.split(os.path.normpath(fname))
if fn[:len(pre)] != pre:
fname = os.path.join(path, pre + fn)
# Only append a suffix if the suffix we're going to add isn't already
# there, and if either we've been asked to ensure the specific suffix
# is present or there's no suffix on it at all.
if suf and fname[-len(suf):] != suf and \
(ensure_suffix or not splitext(fname)[1]):
fname = fname + suf
return fname
# From Tim Peters,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# (Also in the printed Python Cookbook.)
def unique(s):
"""Return a list of the elements in s, but without duplicates.
For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3],
unique("abcabc") some permutation of ["a", "b", "c"], and
unique(([1, 2], [2, 3], [1, 2])) some permutation of
[[2, 3], [1, 2]].
For best speed, all sequence elements should be hashable. Then
unique() will usually work in linear time.
If not possible, the sequence elements should enjoy a total
ordering, and if list(s).sort() doesn't raise TypeError it's
assumed that they do enjoy a total ordering. Then unique() will
usually work in O(N*log2(N)) time.
If that's not possible either, the sequence elements must support
equality-testing. Then unique() will usually work in quadratic
time.
"""
n = len(s)
if n == 0:
return []
# Try using a dict first, as that's the fastest and will usually
# work. If it doesn't work, it will usually fail quickly, so it
# usually doesn't cost much to *try* it. It requires that all the
# sequence elements be hashable, and support equality comparison.
u = {}
try:
for x in s:
u[x] = 1
except TypeError:
pass # move on to the next method
else:
return list(u.keys())
del u
# We can't hash all the elements. Second fastest is to sort,
# which brings the equal elements together; then duplicates are
# easy to weed out in a single pass.
# NOTE: Python's list.sort() was designed to be efficient in the
# presence of many duplicate elements. This isn't true of all
# sort functions in all languages or libraries, so this approach
# is more effective in Python than it may be elsewhere.
try:
t = sorted(s)
except TypeError:
pass # move on to the next method
else:
assert n > 0
last = t[0]
lasti = i = 1
while i < n:
if t[i] != last:
t[lasti] = last = t[i]
lasti = lasti + 1
i = i + 1
return t[:lasti]
del t
# Brute force is all that's left.
u = []
for x in s:
if x not in u:
u.append(x)
return u
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# A more efficient implementation of Alex's uniquer(), this avoids the
# idfun() argument and function-call overhead by assuming that all
# items in the sequence are hashable.
def uniquer_hashables(seq):
seen = {}
result = []
for item in seq:
#if not item in seen:
if item not in seen:
seen[item] = 1
result.append(item)
return result
# Much of the logic here was originally based on recipe 4.9 from the
# Python CookBook, but we had to dumb it way down for Python 1.5.2.
class LogicalLines(object):
def __init__(self, fileobj):
self.fileobj = fileobj
def readline(self):
result = []
while True:
line = self.fileobj.readline()
if not line:
break
if line[-2:] == '\\\n':
result.append(line[:-2])
else:
result.append(line)
break
return ''.join(result)
def readlines(self):
result = []
while True:
line = self.readline()
if not line:
break
result.append(line)
return result
class UniqueList(UserList):
def __init__(self, seq = []):
UserList.__init__(self, seq)
self.unique = True
def __make_unique(self):
if not self.unique:
self.data = uniquer_hashables(self.data)
self.unique = True
def __lt__(self, other):
self.__make_unique()
return UserList.__lt__(self, other)
def __le__(self, other):
self.__make_unique()
return UserList.__le__(self, other)
def __eq__(self, other):
self.__make_unique()
return UserList.__eq__(self, other)
def __ne__(self, other):
self.__make_unique()
return UserList.__ne__(self, other)
def __gt__(self, other):
self.__make_unique()
return UserList.__gt__(self, other)
def __ge__(self, other):
self.__make_unique()
return UserList.__ge__(self, other)
def __cmp__(self, other):
self.__make_unique()
return UserList.__cmp__(self, other)
def __len__(self):
self.__make_unique()
return UserList.__len__(self)
def __getitem__(self, i):
self.__make_unique()
return UserList.__getitem__(self, i)
def __setitem__(self, i, item):
UserList.__setitem__(self, i, item)
self.unique = False
def __getslice__(self, i, j):
self.__make_unique()
return UserList.__getslice__(self, i, j)
def __setslice__(self, i, j, other):
UserList.__setslice__(self, i, j, other)
self.unique = False
def __add__(self, other):
result = UserList.__add__(self, other)
result.unique = False
return result
def __radd__(self, other):
result = UserList.__radd__(self, other)
result.unique = False
return result
def __iadd__(self, other):
result = UserList.__iadd__(self, other)
result.unique = False
return result
def __mul__(self, other):
result = UserList.__mul__(self, other)
result.unique = False
return result
def __rmul__(self, other):
result = UserList.__rmul__(self, other)
result.unique = False
return result
def __imul__(self, other):
result = UserList.__imul__(self, other)
result.unique = False
return result
def append(self, item):
UserList.append(self, item)
self.unique = False
def insert(self, i):
UserList.insert(self, i)
self.unique = False
def count(self, item):
self.__make_unique()
return UserList.count(self, item)
def index(self, item):
self.__make_unique()
return UserList.index(self, item)
def reverse(self):
self.__make_unique()
UserList.reverse(self)
def sort(self, *args, **kwds):
self.__make_unique()
return UserList.sort(self, *args, **kwds)
def extend(self, other):
UserList.extend(self, other)
self.unique = False
class Unbuffered(object):
"""
A proxy class that wraps a file object, flushing after every write,
and delegating everything else to the wrapped object.
"""
def __init__(self, file):
self.file = file
self.softspace = 0 ## backward compatibility; not supported in Py3k
def write(self, arg):
try:
self.file.write(arg)
self.file.flush()
except IOError:
# Stdout might be connected to a pipe that has been closed
# by now. The most likely reason for the pipe being closed
# is that the user has press ctrl-c. It this is the case,
# then SCons is currently shutdown. We therefore ignore
# IOError's here so that SCons can continue and shutdown
# properly so that the .sconsign is correctly written
# before SCons exits.
pass
def __getattr__(self, attr):
return getattr(self.file, attr)
def make_path_relative(path):
""" makes an absolute path name to a relative pathname.
"""
if os.path.isabs(path):
drive_s,path = os.path.splitdrive(path)
import re
if not drive_s:
path=re.compile("/*(.*)").findall(path)[0]
else:
path=path[1:]
assert( not os.path.isabs( path ) ), path
return path
# The original idea for AddMethod() and RenameFunction() come from the
# following post to the ActiveState Python Cookbook:
#
# ASPN: Python Cookbook : Install bound methods in an instance
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/223613
#
# That code was a little fragile, though, so the following changes
# have been wrung on it:
#
# * Switched the installmethod() "object" and "function" arguments,
# so the order reflects that the left-hand side is the thing being
# "assigned to" and the right-hand side is the value being assigned.
#
# * Changed explicit type-checking to the "try: klass = object.__class__"
# block in installmethod() below so that it still works with the
# old-style classes that SCons uses.
#
# * Replaced the by-hand creation of methods and functions with use of
# the "new" module, as alluded to in Alex Martelli's response to the
# following Cookbook post:
#
# ASPN: Python Cookbook : Dynamically added methods to a class
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/81732
def AddMethod(obj, function, name=None):
"""
Adds either a bound method to an instance or an unbound method to
a class. If name is ommited the name of the specified function
is used by default.
Example:
a = A()
def f(self, x, y):
self.z = x + y
AddMethod(f, A, "add")
a.add(2, 4)
print a.z
AddMethod(lambda self, i: self.l[i], a, "listIndex")
print a.listIndex(5)
"""
if name is None:
name = function.func_name
else:
function = RenameFunction(function, name)
if hasattr(obj, '__class__') and obj.__class__ is not type:
# "obj" is an instance, so it gets a bound method.
setattr(obj, name, MethodType(function, obj, obj.__class__))
else:
# "obj" is a class, so it gets an unbound method.
setattr(obj, name, MethodType(function, None, obj))
def RenameFunction(function, name):
"""
Returns a function identical to the specified function, but with
the specified name.
"""
return FunctionType(function.func_code,
function.func_globals,
name,
function.func_defaults)
md5 = False
def MD5signature(s):
return str(s)
def MD5filesignature(fname, chunksize=65536):
f = open(fname, "rb")
result = f.read()
f.close()
return result
try:
import hashlib
except ImportError:
pass
else:
if hasattr(hashlib, 'md5'):
md5 = True
def MD5signature(s):
m = hashlib.md5()
m.update(str(s))
return m.hexdigest()
def MD5filesignature(fname, chunksize=65536):
m = hashlib.md5()
f = open(fname, "rb")
while True:
blck = f.read(chunksize)
if not blck:
break
m.update(str(blck))
f.close()
return m.hexdigest()
def MD5collect(signatures):
"""
Collects a list of signatures into an aggregate signature.
signatures - a list of signatures
returns - the aggregate signature
"""
if len(signatures) == 1:
return signatures[0]
else:
return MD5signature(', '.join(signatures))
def silent_intern(x):
"""
Perform sys.intern() on the passed argument and return the result.
If the input is ineligible (e.g. a unicode string) the original argument is
returned and no exception is thrown.
"""
try:
return sys.intern(x)
except TypeError:
return x
# From Dinu C. Gherman,
# Python Cookbook, second edition, recipe 6.17, p. 277.
# Also:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/68205
# ASPN: Python Cookbook: Null Object Design Pattern
#TODO??? class Null(object):
class Null(object):
""" Null objects always and reliably "do nothing." """
def __new__(cls, *args, **kwargs):
if not '_instance' in vars(cls):
cls._instance = super(Null, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def __repr__(self):
return "Null(0x%08X)" % id(self)
def __nonzero__(self):
return False
def __getattr__(self, name):
return self
def __setattr__(self, name, value):
return self
def __delattr__(self, name):
return self
class NullSeq(Null):
def __len__(self):
return 0
def __iter__(self):
return iter(())
def __getitem__(self, i):
return self
def __delitem__(self, i):
return self
def __setitem__(self, i, v):
return self
del __revision__
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| apache-2.0 |
huijunwu/heron | heron/tools/cli/tests/python/opts_unittest.py | 5 | 2120 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
''' opts_unittest.py '''
import unittest2 as unittest
import heron.tools.cli.src.python.opts as opts
#pylint: disable=missing-docstring, no-self-use
class OptsTest(unittest.TestCase):
def setUp(self):
pass
def test_one_opt(self):
opts.clear_config()
opts.set_config('XX', 'xx')
self.assertEqual('xx', opts.get_config('XX'))
def test_two_opts(self):
opts.clear_config()
opts.set_config('XX', 'xx')
opts.set_config('YY', 'yy')
self.assertEqual('xx', opts.get_config('XX'))
self.assertEqual('yy', opts.get_config('YY'))
def test_non_exist_key(self):
opts.clear_config()
opts.set_config('XX', 'xx')
self.assertEqual(None, opts.get_config('YY'))
def test_many_opts(self):
opts.clear_config()
for k in range(1, 100):
key = "key-%d" % (k)
value = "value-%d" % (k)
opts.set_config(key, value)
for k in range(1, 100):
key = "key-%d" % (k)
value = "value-%d" % (k)
self.assertEqual(value, opts.get_config(key))
def test_clear_opts(self):
opts.clear_config()
opts.set_config('YY', 'yy')
self.assertEqual('yy', opts.get_config('YY'))
opts.clear_config()
self.assertEqual(None, opts.get_config('YY'))
def tearDown(self):
opts.clear_config()
| apache-2.0 |
sometallgit/AutoUploader | Python27/Lib/site-packages/rsa/cli.py | 81 | 12185 | # -*- coding: utf-8 -*-
#
# Copyright 2011 Sybren A. Stüvel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Commandline scripts.
These scripts are called by the executables defined in setup.py.
"""
from __future__ import with_statement, print_function
import abc
import sys
from optparse import OptionParser
import rsa
import rsa.bigfile
import rsa.pkcs1
HASH_METHODS = sorted(rsa.pkcs1.HASH_METHODS.keys())
def keygen():
"""Key generator."""
# Parse the CLI options
parser = OptionParser(usage='usage: %prog [options] keysize',
description='Generates a new RSA keypair of "keysize" bits.')
parser.add_option('--pubout', type='string',
help='Output filename for the public key. The public key is '
'not saved if this option is not present. You can use '
'pyrsa-priv2pub to create the public key file later.')
parser.add_option('-o', '--out', type='string',
help='Output filename for the private key. The key is '
'written to stdout if this option is not present.')
parser.add_option('--form',
help='key format of the private and public keys - default PEM',
choices=('PEM', 'DER'), default='PEM')
(cli, cli_args) = parser.parse_args(sys.argv[1:])
if len(cli_args) != 1:
parser.print_help()
raise SystemExit(1)
try:
keysize = int(cli_args[0])
except ValueError:
parser.print_help()
print('Not a valid number: %s' % cli_args[0], file=sys.stderr)
raise SystemExit(1)
print('Generating %i-bit key' % keysize, file=sys.stderr)
(pub_key, priv_key) = rsa.newkeys(keysize)
# Save public key
if cli.pubout:
print('Writing public key to %s' % cli.pubout, file=sys.stderr)
data = pub_key.save_pkcs1(format=cli.form)
with open(cli.pubout, 'wb') as outfile:
outfile.write(data)
# Save private key
data = priv_key.save_pkcs1(format=cli.form)
if cli.out:
print('Writing private key to %s' % cli.out, file=sys.stderr)
with open(cli.out, 'wb') as outfile:
outfile.write(data)
else:
print('Writing private key to stdout', file=sys.stderr)
sys.stdout.write(data)
class CryptoOperation(object):
"""CLI callable that operates with input, output, and a key."""
__metaclass__ = abc.ABCMeta
keyname = 'public' # or 'private'
usage = 'usage: %%prog [options] %(keyname)s_key'
description = None
operation = 'decrypt'
operation_past = 'decrypted'
operation_progressive = 'decrypting'
input_help = 'Name of the file to %(operation)s. Reads from stdin if ' \
'not specified.'
output_help = 'Name of the file to write the %(operation_past)s file ' \
'to. Written to stdout if this option is not present.'
expected_cli_args = 1
has_output = True
key_class = rsa.PublicKey
def __init__(self):
self.usage = self.usage % self.__class__.__dict__
self.input_help = self.input_help % self.__class__.__dict__
self.output_help = self.output_help % self.__class__.__dict__
@abc.abstractmethod
def perform_operation(self, indata, key, cli_args=None):
"""Performs the program's operation.
Implement in a subclass.
:returns: the data to write to the output.
"""
def __call__(self):
"""Runs the program."""
(cli, cli_args) = self.parse_cli()
key = self.read_key(cli_args[0], cli.keyform)
indata = self.read_infile(cli.input)
print(self.operation_progressive.title(), file=sys.stderr)
outdata = self.perform_operation(indata, key, cli_args)
if self.has_output:
self.write_outfile(outdata, cli.output)
def parse_cli(self):
"""Parse the CLI options
:returns: (cli_opts, cli_args)
"""
parser = OptionParser(usage=self.usage, description=self.description)
parser.add_option('-i', '--input', type='string', help=self.input_help)
if self.has_output:
parser.add_option('-o', '--output', type='string', help=self.output_help)
parser.add_option('--keyform',
help='Key format of the %s key - default PEM' % self.keyname,
choices=('PEM', 'DER'), default='PEM')
(cli, cli_args) = parser.parse_args(sys.argv[1:])
if len(cli_args) != self.expected_cli_args:
parser.print_help()
raise SystemExit(1)
return cli, cli_args
def read_key(self, filename, keyform):
"""Reads a public or private key."""
print('Reading %s key from %s' % (self.keyname, filename), file=sys.stderr)
with open(filename, 'rb') as keyfile:
keydata = keyfile.read()
return self.key_class.load_pkcs1(keydata, keyform)
def read_infile(self, inname):
"""Read the input file"""
if inname:
print('Reading input from %s' % inname, file=sys.stderr)
with open(inname, 'rb') as infile:
return infile.read()
print('Reading input from stdin', file=sys.stderr)
return sys.stdin.read()
def write_outfile(self, outdata, outname):
"""Write the output file"""
if outname:
print('Writing output to %s' % outname, file=sys.stderr)
with open(outname, 'wb') as outfile:
outfile.write(outdata)
else:
print('Writing output to stdout', file=sys.stderr)
sys.stdout.write(outdata)
class EncryptOperation(CryptoOperation):
"""Encrypts a file."""
keyname = 'public'
description = ('Encrypts a file. The file must be shorter than the key '
'length in order to be encrypted. For larger files, use the '
'pyrsa-encrypt-bigfile command.')
operation = 'encrypt'
operation_past = 'encrypted'
operation_progressive = 'encrypting'
def perform_operation(self, indata, pub_key, cli_args=None):
"""Encrypts files."""
return rsa.encrypt(indata, pub_key)
class DecryptOperation(CryptoOperation):
"""Decrypts a file."""
keyname = 'private'
description = ('Decrypts a file. The original file must be shorter than '
'the key length in order to have been encrypted. For larger '
'files, use the pyrsa-decrypt-bigfile command.')
operation = 'decrypt'
operation_past = 'decrypted'
operation_progressive = 'decrypting'
key_class = rsa.PrivateKey
def perform_operation(self, indata, priv_key, cli_args=None):
"""Decrypts files."""
return rsa.decrypt(indata, priv_key)
class SignOperation(CryptoOperation):
"""Signs a file."""
keyname = 'private'
usage = 'usage: %%prog [options] private_key hash_method'
description = ('Signs a file, outputs the signature. Choose the hash '
'method from %s' % ', '.join(HASH_METHODS))
operation = 'sign'
operation_past = 'signature'
operation_progressive = 'Signing'
key_class = rsa.PrivateKey
expected_cli_args = 2
output_help = ('Name of the file to write the signature to. Written '
'to stdout if this option is not present.')
def perform_operation(self, indata, priv_key, cli_args):
"""Signs files."""
hash_method = cli_args[1]
if hash_method not in HASH_METHODS:
raise SystemExit('Invalid hash method, choose one of %s' %
', '.join(HASH_METHODS))
return rsa.sign(indata, priv_key, hash_method)
class VerifyOperation(CryptoOperation):
"""Verify a signature."""
keyname = 'public'
usage = 'usage: %%prog [options] public_key signature_file'
description = ('Verifies a signature, exits with status 0 upon success, '
'prints an error message and exits with status 1 upon error.')
operation = 'verify'
operation_past = 'verified'
operation_progressive = 'Verifying'
key_class = rsa.PublicKey
expected_cli_args = 2
has_output = False
def perform_operation(self, indata, pub_key, cli_args):
"""Verifies files."""
signature_file = cli_args[1]
with open(signature_file, 'rb') as sigfile:
signature = sigfile.read()
try:
rsa.verify(indata, signature, pub_key)
except rsa.VerificationError:
raise SystemExit('Verification failed.')
print('Verification OK', file=sys.stderr)
class BigfileOperation(CryptoOperation):
"""CryptoOperation that doesn't read the entire file into memory."""
def __init__(self):
CryptoOperation.__init__(self)
self.file_objects = []
def __del__(self):
"""Closes any open file handles."""
for fobj in self.file_objects:
fobj.close()
def __call__(self):
"""Runs the program."""
(cli, cli_args) = self.parse_cli()
key = self.read_key(cli_args[0], cli.keyform)
# Get the file handles
infile = self.get_infile(cli.input)
outfile = self.get_outfile(cli.output)
# Call the operation
print(self.operation_progressive.title(), file=sys.stderr)
self.perform_operation(infile, outfile, key, cli_args)
def get_infile(self, inname):
"""Returns the input file object"""
if inname:
print('Reading input from %s' % inname, file=sys.stderr)
fobj = open(inname, 'rb')
self.file_objects.append(fobj)
else:
print('Reading input from stdin', file=sys.stderr)
fobj = sys.stdin
return fobj
def get_outfile(self, outname):
"""Returns the output file object"""
if outname:
print('Will write output to %s' % outname, file=sys.stderr)
fobj = open(outname, 'wb')
self.file_objects.append(fobj)
else:
print('Will write output to stdout', file=sys.stderr)
fobj = sys.stdout
return fobj
class EncryptBigfileOperation(BigfileOperation):
"""Encrypts a file to VARBLOCK format."""
keyname = 'public'
description = ('Encrypts a file to an encrypted VARBLOCK file. The file '
'can be larger than the key length, but the output file is only '
'compatible with Python-RSA.')
operation = 'encrypt'
operation_past = 'encrypted'
operation_progressive = 'encrypting'
def perform_operation(self, infile, outfile, pub_key, cli_args=None):
"""Encrypts files to VARBLOCK."""
return rsa.bigfile.encrypt_bigfile(infile, outfile, pub_key)
class DecryptBigfileOperation(BigfileOperation):
"""Decrypts a file in VARBLOCK format."""
keyname = 'private'
description = ('Decrypts an encrypted VARBLOCK file that was encrypted '
'with pyrsa-encrypt-bigfile')
operation = 'decrypt'
operation_past = 'decrypted'
operation_progressive = 'decrypting'
key_class = rsa.PrivateKey
def perform_operation(self, infile, outfile, priv_key, cli_args=None):
"""Decrypts a VARBLOCK file."""
return rsa.bigfile.decrypt_bigfile(infile, outfile, priv_key)
encrypt = EncryptOperation()
decrypt = DecryptOperation()
sign = SignOperation()
verify = VerifyOperation()
encrypt_bigfile = EncryptBigfileOperation()
decrypt_bigfile = DecryptBigfileOperation()
| mit |
ptkool/spark | examples/src/main/python/ml/polynomial_expansion_example.py | 123 | 1522 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
# $example on$
from pyspark.ml.feature import PolynomialExpansion
from pyspark.ml.linalg import Vectors
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession\
.builder\
.appName("PolynomialExpansionExample")\
.getOrCreate()
# $example on$
df = spark.createDataFrame([
(Vectors.dense([2.0, 1.0]),),
(Vectors.dense([0.0, 0.0]),),
(Vectors.dense([3.0, -1.0]),)
], ["features"])
polyExpansion = PolynomialExpansion(degree=3, inputCol="features", outputCol="polyFeatures")
polyDF = polyExpansion.transform(df)
polyDF.show(truncate=False)
# $example off$
spark.stop()
| apache-2.0 |
michalbe/servo | src/components/script/dom/bindings/codegen/parser/tests/test_special_method_signature_mismatch.py | 241 | 6622 | def WebIDLTest(parser, harness):
threw = False
try:
parser.parse("""
interface SpecialMethodSignatureMismatch1 {
getter long long foo(long index);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodSignatureMismatch2 {
getter void foo(unsigned long index);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodSignatureMismatch3 {
getter boolean foo(unsigned long index, boolean extraArg);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodSignatureMismatch4 {
getter boolean foo(unsigned long... index);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodSignatureMismatch5 {
getter boolean foo(optional unsigned long index);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodSignatureMismatch6 {
getter boolean foo();
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodSignatureMismatch7 {
deleter long long foo(long index);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodSignatureMismatch9 {
deleter boolean foo(unsigned long index, boolean extraArg);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodSignatureMismatch10 {
deleter boolean foo(unsigned long... index);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodSignatureMismatch11 {
deleter boolean foo(optional unsigned long index);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodSignatureMismatch12 {
deleter boolean foo();
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodSignatureMismatch13 {
setter long long foo(long index, long long value);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodSignatureMismatch15 {
setter boolean foo(unsigned long index, boolean value, long long extraArg);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodSignatureMismatch16 {
setter boolean foo(unsigned long index, boolean... value);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodSignatureMismatch17 {
setter boolean foo(unsigned long index, optional boolean value);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodSignatureMismatch18 {
setter boolean foo();
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodSignatureMismatch20 {
creator long long foo(long index, long long value);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodSignatureMismatch22 {
creator boolean foo(unsigned long index, boolean value, long long extraArg);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodSignatureMismatch23 {
creator boolean foo(unsigned long index, boolean... value);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodSignatureMismatch24 {
creator boolean foo(unsigned long index, optional boolean value);
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
threw = False
try:
parser.parse("""
interface SpecialMethodSignatureMismatch25 {
creator boolean foo();
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown.")
| mpl-2.0 |
jward6/academics-python | datastructures/linkedStackTest.py | 1 | 1812 | import unittest
class Empty(Exception):
pass
class Stack:
class _Node:
__slots__ = '_element', '_next'
def __init__(self, e, n):
self._element = e
self._next = n
def __init__(self):
self._head = self._Node(None, None)
self._length = 0
def __len__(self):
return self._length
def is_empty(self):
return self._length == 0
def top(self):
if self.is_empty():
raise Empty('Stack is empty')
return self._head._next._element
def push(self, e):
n = self._Node(e, self._head._next)
self._head._next = n
self._length += 1
def pop(self):
if self.is_empty():
raise Empty('Stack is empty')
n = self._head._next
self._head._next = n._next
self._length -= 1
return n._element
def __iter__(self):
if self.is_empty():
raise Empty('Stack is empty')
n = self._head._next
while(n != None):
yield n._element
n = n._next
class StackTests(unittest.TestCase):
def test_push(self):
s = Stack()
s.push(15)
s.push(20)
self.assertEqual(len(s), 2)
def test_top(self):
s = Stack()
s.push(15)
s.push(20)
self.assertEqual(s.top(), 20)
def test_pop(self):
s = Stack()
s.push(15)
s.push(20)
self.assertEqual(s.pop(), 20)
self.assertEqual(len(s), 1)
self.assertEqual(s.pop(), 15)
def test_iter(self):
s = Stack()
s.push(15)
s.push(20)
s.push(25)
val = 25
for e in s:
self.assertEqual(e, val)
val -= 5
if __name__ == '__main__':
unittest.main()
| mit |
ChrisTruncer/rdpy | rdpy/security/pyDes.py | 2 | 32292 | #############################################################################
# Documentation #
#############################################################################
# Author: Todd Whiteman
# Date: 16th March, 2009
# Verion: 2.0.0
# License: Public Domain - free to do as you wish
# Homepage: http://twhiteman.netfirms.com/des.html
#
# This is a pure python implementation of the DES encryption algorithm.
# It's pure python to avoid portability issues, since most DES
# implementations are programmed in C (for performance reasons).
#
# Triple DES class is also implemented, utilising the DES base. Triple DES
# is either DES-EDE3 with a 24 byte key, or DES-EDE2 with a 16 byte key.
#
# See the README.txt that should come with this python module for the
# implementation methods used.
#
# Thanks to:
# * David Broadwell for ideas, comments and suggestions.
# * Mario Wolff for pointing out and debugging some triple des CBC errors.
# * Santiago Palladino for providing the PKCS5 padding technique.
# * Shaya for correcting the PAD_PKCS5 triple des CBC errors.
#
"""A pure python implementation of the DES and TRIPLE DES encryption algorithms.
Class initialization
--------------------
pyDes.des(key, [mode], [IV], [pad], [padmode])
pyDes.triple_des(key, [mode], [IV], [pad], [padmode])
key -> Bytes containing the encryption key. 8 bytes for DES, 16 or 24 bytes
for Triple DES
mode -> Optional argument for encryption type, can be either
pyDes.ECB (Electronic Code Book) or pyDes.CBC (Cypher Block Chaining)
IV -> Optional Initial Value bytes, must be supplied if using CBC mode.
Length must be 8 bytes.
pad -> Optional argument, set the pad character (PAD_NORMAL) to use during
all encrypt/decrpt operations done with this instance.
padmode -> Optional argument, set the padding mode (PAD_NORMAL or PAD_PKCS5)
to use during all encrypt/decrpt operations done with this instance.
I recommend to use PAD_PKCS5 padding, as then you never need to worry about any
padding issues, as the padding can be removed unambiguously upon decrypting
data that was encrypted using PAD_PKCS5 padmode.
Common methods
--------------
encrypt(data, [pad], [padmode])
decrypt(data, [pad], [padmode])
data -> Bytes to be encrypted/decrypted
pad -> Optional argument. Only when using padmode of PAD_NORMAL. For
encryption, adds this characters to the end of the data block when
data is not a multiple of 8 bytes. For decryption, will remove the
trailing characters that match this pad character from the last 8
bytes of the unencrypted data block.
padmode -> Optional argument, set the padding mode, must be one of PAD_NORMAL
or PAD_PKCS5). Defaults to PAD_NORMAL.
Example
-------
from pyDes import *
data = "Please encrypt my data"
k = des("DESCRYPT", CBC, "\0\0\0\0\0\0\0\0", pad=None, padmode=PAD_PKCS5)
# For Python3, you'll need to use bytes, i.e.:
# data = b"Please encrypt my data"
# k = des(b"DESCRYPT", CBC, b"\0\0\0\0\0\0\0\0", pad=None, padmode=PAD_PKCS5)
d = k.encrypt(data)
print "Encrypted: %r" % d
print "Decrypted: %r" % k.decrypt(d)
assert k.decrypt(d, padmode=PAD_PKCS5) == data
See the module source (pyDes.py) for more examples of use.
You can also run the pyDes.py file without and arguments to see a simple test.
Note: This code was not written for high-end systems needing a fast
implementation, but rather a handy portable solution with small usage.
"""
import sys
# _pythonMajorVersion is used to handle Python2 and Python3 differences.
_pythonMajorVersion = sys.version_info[0]
# Modes of crypting / cyphering
ECB = 0
CBC = 1
# Modes of padding
PAD_NORMAL = 1
PAD_PKCS5 = 2
# PAD_PKCS5: is a method that will unambiguously remove all padding
# characters after decryption, when originally encrypted with
# this padding mode.
# For a good description of the PKCS5 padding technique, see:
# http://www.faqs.org/rfcs/rfc1423.html
# The base class shared by des and triple des.
class _baseDes(object):
def __init__(self, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL):
if IV:
IV = self._guardAgainstUnicode(IV)
if pad:
pad = self._guardAgainstUnicode(pad)
self.block_size = 8
# Sanity checking of arguments.
if pad and padmode == PAD_PKCS5:
raise ValueError("Cannot use a pad character with PAD_PKCS5")
if IV and len(IV) != self.block_size:
raise ValueError("Invalid Initial Value (IV), must be a multiple of " + str(self.block_size) + " bytes")
# Set the passed in variables
self._mode = mode
self._iv = IV
self._padding = pad
self._padmode = padmode
def getKey(self):
"""getKey() -> bytes"""
return self.__key
def setKey(self, key):
"""Will set the crypting key for this object."""
key = self._guardAgainstUnicode(key)
self.__key = key
def getMode(self):
"""getMode() -> pyDes.ECB or pyDes.CBC"""
return self._mode
def setMode(self, mode):
"""Sets the type of crypting mode, pyDes.ECB or pyDes.CBC"""
self._mode = mode
def getPadding(self):
"""getPadding() -> bytes of length 1. Padding character."""
return self._padding
def setPadding(self, pad):
"""setPadding() -> bytes of length 1. Padding character."""
if pad is not None:
pad = self._guardAgainstUnicode(pad)
self._padding = pad
def getPadMode(self):
"""getPadMode() -> pyDes.PAD_NORMAL or pyDes.PAD_PKCS5"""
return self._padmode
def setPadMode(self, mode):
"""Sets the type of padding mode, pyDes.PAD_NORMAL or pyDes.PAD_PKCS5"""
self._padmode = mode
def getIV(self):
"""getIV() -> bytes"""
return self._iv
def setIV(self, IV):
"""Will set the Initial Value, used in conjunction with CBC mode"""
if not IV or len(IV) != self.block_size:
raise ValueError("Invalid Initial Value (IV), must be a multiple of " + str(self.block_size) + " bytes")
IV = self._guardAgainstUnicode(IV)
self._iv = IV
def _padData(self, data, pad, padmode):
# Pad data depending on the mode
if padmode is None:
# Get the default padding mode.
padmode = self.getPadMode()
if pad and padmode == PAD_PKCS5:
raise ValueError("Cannot use a pad character with PAD_PKCS5")
if padmode == PAD_NORMAL:
if len(data) % self.block_size == 0:
# No padding required.
return data
if not pad:
# Get the default padding.
pad = self.getPadding()
if not pad:
raise ValueError("Data must be a multiple of " + str(self.block_size) + " bytes in length. Use padmode=PAD_PKCS5 or set the pad character.")
data += (self.block_size - (len(data) % self.block_size)) * pad
elif padmode == PAD_PKCS5:
pad_len = 8 - (len(data) % self.block_size)
if _pythonMajorVersion < 3:
data += pad_len * chr(pad_len)
else:
data += bytes([pad_len] * pad_len)
return data
def _unpadData(self, data, pad, padmode):
# Unpad data depending on the mode.
if not data:
return data
if pad and padmode == PAD_PKCS5:
raise ValueError("Cannot use a pad character with PAD_PKCS5")
if padmode is None:
# Get the default padding mode.
padmode = self.getPadMode()
if padmode == PAD_NORMAL:
if not pad:
# Get the default padding.
pad = self.getPadding()
if pad:
data = data[:-self.block_size] + \
data[-self.block_size:].rstrip(pad)
elif padmode == PAD_PKCS5:
if _pythonMajorVersion < 3:
pad_len = ord(data[-1])
else:
pad_len = data[-1]
data = data[:-pad_len]
return data
def _guardAgainstUnicode(self, data):
# Only accept byte strings or ascii unicode values, otherwise
# there is no way to correctly decode the data into bytes.
if _pythonMajorVersion < 3:
if isinstance(data, unicode):
raise ValueError("pyDes can only work with bytes, not Unicode strings.")
else:
if isinstance(data, str):
# Only accept ascii unicode values.
try:
return data.encode('ascii')
except UnicodeEncodeError:
pass
raise ValueError("pyDes can only work with encoded strings, not Unicode.")
return data
#############################################################################
# DES #
#############################################################################
class des(_baseDes):
"""DES encryption/decrytpion class
Supports ECB (Electronic Code Book) and CBC (Cypher Block Chaining) modes.
pyDes.des(key,[mode], [IV])
key -> Bytes containing the encryption key, must be exactly 8 bytes
mode -> Optional argument for encryption type, can be either pyDes.ECB
(Electronic Code Book), pyDes.CBC (Cypher Block Chaining)
IV -> Optional Initial Value bytes, must be supplied if using CBC mode.
Must be 8 bytes in length.
pad -> Optional argument, set the pad character (PAD_NORMAL) to use
during all encrypt/decrpt operations done with this instance.
padmode -> Optional argument, set the padding mode (PAD_NORMAL or
PAD_PKCS5) to use during all encrypt/decrpt operations done
with this instance.
"""
# Permutation and translation tables for DES
__pc1 = [56, 48, 40, 32, 24, 16, 8,
0, 57, 49, 41, 33, 25, 17,
9, 1, 58, 50, 42, 34, 26,
18, 10, 2, 59, 51, 43, 35,
62, 54, 46, 38, 30, 22, 14,
6, 61, 53, 45, 37, 29, 21,
13, 5, 60, 52, 44, 36, 28,
20, 12, 4, 27, 19, 11, 3
]
# number left rotations of pc1
__left_rotations = [
1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1
]
# permuted choice key (table 2)
__pc2 = [
13, 16, 10, 23, 0, 4,
2, 27, 14, 5, 20, 9,
22, 18, 11, 3, 25, 7,
15, 6, 26, 19, 12, 1,
40, 51, 30, 36, 46, 54,
29, 39, 50, 44, 32, 47,
43, 48, 38, 55, 33, 52,
45, 41, 49, 35, 28, 31
]
# initial permutation IP
__ip = [57, 49, 41, 33, 25, 17, 9, 1,
59, 51, 43, 35, 27, 19, 11, 3,
61, 53, 45, 37, 29, 21, 13, 5,
63, 55, 47, 39, 31, 23, 15, 7,
56, 48, 40, 32, 24, 16, 8, 0,
58, 50, 42, 34, 26, 18, 10, 2,
60, 52, 44, 36, 28, 20, 12, 4,
62, 54, 46, 38, 30, 22, 14, 6
]
# Expansion table for turning 32 bit blocks into 48 bits
__expansion_table = [
31, 0, 1, 2, 3, 4,
3, 4, 5, 6, 7, 8,
7, 8, 9, 10, 11, 12,
11, 12, 13, 14, 15, 16,
15, 16, 17, 18, 19, 20,
19, 20, 21, 22, 23, 24,
23, 24, 25, 26, 27, 28,
27, 28, 29, 30, 31, 0
]
# The (in)famous S-boxes
__sbox = [
# S1
[14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7,
0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8,
4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0,
15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13],
# S2
[15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10,
3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5,
0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15,
13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9],
# S3
[10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8,
13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1,
13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7,
1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12],
# S4
[7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15,
13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9,
10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4,
3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14],
# S5
[2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9,
14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6,
4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14,
11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3],
# S6
[12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11,
10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8,
9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6,
4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13],
# S7
[4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1,
13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6,
1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2,
6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12],
# S8
[13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7,
1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2,
7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8,
2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11],
]
# 32-bit permutation function P used on the output of the S-boxes
__p = [
15, 6, 19, 20, 28, 11,
27, 16, 0, 14, 22, 25,
4, 17, 30, 9, 1, 7,
23,13, 31, 26, 2, 8,
18, 12, 29, 5, 21, 10,
3, 24
]
# final permutation IP^-1
__fp = [
39, 7, 47, 15, 55, 23, 63, 31,
38, 6, 46, 14, 54, 22, 62, 30,
37, 5, 45, 13, 53, 21, 61, 29,
36, 4, 44, 12, 52, 20, 60, 28,
35, 3, 43, 11, 51, 19, 59, 27,
34, 2, 42, 10, 50, 18, 58, 26,
33, 1, 41, 9, 49, 17, 57, 25,
32, 0, 40, 8, 48, 16, 56, 24
]
# Type of crypting being done
ENCRYPT = 0x00
DECRYPT = 0x01
# Initialisation
def __init__(self, key, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL):
# Sanity checking of arguments.
if len(key) != 8:
raise ValueError("Invalid DES key size. Key must be exactly 8 bytes long.")
_baseDes.__init__(self, mode, IV, pad, padmode)
self.key_size = 8
self.L = []
self.R = []
self.Kn = [ [0] * 48 ] * 16 # 16 48-bit keys (K1 - K16)
self.final = []
self.setKey(key)
def setKey(self, key):
"""Will set the crypting key for this object. Must be 8 bytes."""
_baseDes.setKey(self, key)
self.__create_sub_keys()
def __String_to_BitList(self, data):
"""Turn the string data, into a list of bits (1, 0)'s"""
if _pythonMajorVersion < 3:
# Turn the strings into integers. Python 3 uses a bytes
# class, which already has this behaviour.
data = [ord(c) for c in data]
l = len(data) * 8
result = [0] * l
pos = 0
for ch in data:
i = 7
while i >= 0:
if ch & (1 << i) != 0:
result[pos] = 1
else:
result[pos] = 0
pos += 1
i -= 1
return result
def __BitList_to_String(self, data):
"""Turn the list of bits -> data, into a string"""
result = []
pos = 0
c = 0
while pos < len(data):
c += data[pos] << (7 - (pos % 8))
if (pos % 8) == 7:
result.append(c)
c = 0
pos += 1
if _pythonMajorVersion < 3:
return ''.join([ chr(c) for c in result ])
else:
return bytes(result)
def __permutate(self, table, block):
"""Permutate this block with the specified table"""
return list(map(lambda x: block[x], table))
# Transform the secret key, so that it is ready for data processing
# Create the 16 subkeys, K[1] - K[16]
def __create_sub_keys(self):
"""Create the 16 subkeys K[1] to K[16] from the given key"""
key = self.__permutate(des.__pc1, self.__String_to_BitList(self.getKey()))
i = 0
# Split into Left and Right sections
self.L = key[:28]
self.R = key[28:]
while i < 16:
j = 0
# Perform circular left shifts
while j < des.__left_rotations[i]:
self.L.append(self.L[0])
del self.L[0]
self.R.append(self.R[0])
del self.R[0]
j += 1
# Create one of the 16 subkeys through pc2 permutation
self.Kn[i] = self.__permutate(des.__pc2, self.L + self.R)
i += 1
# Main part of the encryption algorithm, the number cruncher :)
def __des_crypt(self, block, crypt_type):
"""Crypt the block of data through DES bit-manipulation"""
block = self.__permutate(des.__ip, block)
self.L = block[:32]
self.R = block[32:]
# Encryption starts from Kn[1] through to Kn[16]
if crypt_type == des.ENCRYPT:
iteration = 0
iteration_adjustment = 1
# Decryption starts from Kn[16] down to Kn[1]
else:
iteration = 15
iteration_adjustment = -1
i = 0
while i < 16:
# Make a copy of R[i-1], this will later become L[i]
tempR = self.R[:]
# Permutate R[i - 1] to start creating R[i]
self.R = self.__permutate(des.__expansion_table, self.R)
# Exclusive or R[i - 1] with K[i], create B[1] to B[8] whilst here
self.R = list(map(lambda x, y: x ^ y, self.R, self.Kn[iteration]))
B = [self.R[:6], self.R[6:12], self.R[12:18], self.R[18:24], self.R[24:30], self.R[30:36], self.R[36:42], self.R[42:]]
# Optimization: Replaced below commented code with above
#j = 0
#B = []
#while j < len(self.R):
# self.R[j] = self.R[j] ^ self.Kn[iteration][j]
# j += 1
# if j % 6 == 0:
# B.append(self.R[j-6:j])
# Permutate B[1] to B[8] using the S-Boxes
j = 0
Bn = [0] * 32
pos = 0
while j < 8:
# Work out the offsets
m = (B[j][0] << 1) + B[j][5]
n = (B[j][1] << 3) + (B[j][2] << 2) + (B[j][3] << 1) + B[j][4]
# Find the permutation value
v = des.__sbox[j][(m << 4) + n]
# Turn value into bits, add it to result: Bn
Bn[pos] = (v & 8) >> 3
Bn[pos + 1] = (v & 4) >> 2
Bn[pos + 2] = (v & 2) >> 1
Bn[pos + 3] = v & 1
pos += 4
j += 1
# Permutate the concatination of B[1] to B[8] (Bn)
self.R = self.__permutate(des.__p, Bn)
# Xor with L[i - 1]
self.R = list(map(lambda x, y: x ^ y, self.R, self.L))
# Optimization: This now replaces the below commented code
#j = 0
#while j < len(self.R):
# self.R[j] = self.R[j] ^ self.L[j]
# j += 1
# L[i] becomes R[i - 1]
self.L = tempR
i += 1
iteration += iteration_adjustment
# Final permutation of R[16]L[16]
self.final = self.__permutate(des.__fp, self.R + self.L)
return self.final
# Data to be encrypted/decrypted
def crypt(self, data, crypt_type):
"""Crypt the data in blocks, running it through des_crypt()"""
# Error check the data
if not data:
return ''
if len(data) % self.block_size != 0:
if crypt_type == des.DECRYPT: # Decryption must work on 8 byte blocks
raise ValueError("Invalid data length, data must be a multiple of " + str(self.block_size) + " bytes\n.")
if not self.getPadding():
raise ValueError("Invalid data length, data must be a multiple of " + str(self.block_size) + " bytes\n. Try setting the optional padding character")
else:
data += (self.block_size - (len(data) % self.block_size)) * self.getPadding()
# print "Len of data: %f" % (len(data) / self.block_size)
if self.getMode() == CBC:
if self.getIV():
iv = self.__String_to_BitList(self.getIV())
else:
raise ValueError("For CBC mode, you must supply the Initial Value (IV) for ciphering")
# Split the data into blocks, crypting each one seperately
i = 0
dict = {}
result = []
#cached = 0
#lines = 0
while i < len(data):
# Test code for caching encryption results
#lines += 1
#if dict.has_key(data[i:i+8]):
#print "Cached result for: %s" % data[i:i+8]
# cached += 1
# result.append(dict[data[i:i+8]])
# i += 8
# continue
block = self.__String_to_BitList(data[i:i+8])
# Xor with IV if using CBC mode
if self.getMode() == CBC:
if crypt_type == des.ENCRYPT:
block = list(map(lambda x, y: x ^ y, block, iv))
#j = 0
#while j < len(block):
# block[j] = block[j] ^ iv[j]
# j += 1
processed_block = self.__des_crypt(block, crypt_type)
if crypt_type == des.DECRYPT:
processed_block = list(map(lambda x, y: x ^ y, processed_block, iv))
#j = 0
#while j < len(processed_block):
# processed_block[j] = processed_block[j] ^ iv[j]
# j += 1
iv = block
else:
iv = processed_block
else:
processed_block = self.__des_crypt(block, crypt_type)
# Add the resulting crypted block to our list
#d = self.__BitList_to_String(processed_block)
#result.append(d)
result.append(self.__BitList_to_String(processed_block))
#dict[data[i:i+8]] = d
i += 8
# print "Lines: %d, cached: %d" % (lines, cached)
# Return the full crypted string
if _pythonMajorVersion < 3:
return ''.join(result)
else:
return bytes.fromhex('').join(result)
def encrypt(self, data, pad=None, padmode=None):
"""encrypt(data, [pad], [padmode]) -> bytes
data : Bytes to be encrypted
pad : Optional argument for encryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be encrypted
with the already specified key. Data does not have to be a
multiple of 8 bytes if the padding character is supplied, or
the padmode is set to PAD_PKCS5, as bytes will then added to
ensure the be padded data is a multiple of 8 bytes.
"""
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
data = self._padData(data, pad, padmode)
return self.crypt(data, des.ENCRYPT)
def decrypt(self, data, pad=None, padmode=None):
"""decrypt(data, [pad], [padmode]) -> bytes
data : Bytes to be encrypted
pad : Optional argument for decryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be decrypted
with the already specified key. In PAD_NORMAL mode, if the
optional padding character is supplied, then the un-encrypted
data will have the padding characters removed from the end of
the bytes. This pad removal only occurs on the last 8 bytes of
the data (last data block). In PAD_PKCS5 mode, the special
padding end markers will be removed from the data after decrypting.
"""
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
data = self.crypt(data, des.DECRYPT)
return self._unpadData(data, pad, padmode)
#############################################################################
# Triple DES #
#############################################################################
class triple_des(_baseDes):
"""Triple DES encryption/decrytpion class
This algorithm uses the DES-EDE3 (when a 24 byte key is supplied) or
the DES-EDE2 (when a 16 byte key is supplied) encryption methods.
Supports ECB (Electronic Code Book) and CBC (Cypher Block Chaining) modes.
pyDes.des(key, [mode], [IV])
key -> Bytes containing the encryption key, must be either 16 or
24 bytes long
mode -> Optional argument for encryption type, can be either pyDes.ECB
(Electronic Code Book), pyDes.CBC (Cypher Block Chaining)
IV -> Optional Initial Value bytes, must be supplied if using CBC mode.
Must be 8 bytes in length.
pad -> Optional argument, set the pad character (PAD_NORMAL) to use
during all encrypt/decrpt operations done with this instance.
padmode -> Optional argument, set the padding mode (PAD_NORMAL or
PAD_PKCS5) to use during all encrypt/decrpt operations done
with this instance.
"""
def __init__(self, key, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL):
_baseDes.__init__(self, mode, IV, pad, padmode)
self.setKey(key)
def setKey(self, key):
"""Will set the crypting key for this object. Either 16 or 24 bytes long."""
self.key_size = 24 # Use DES-EDE3 mode
if len(key) != self.key_size:
if len(key) == 16: # Use DES-EDE2 mode
self.key_size = 16
else:
raise ValueError("Invalid triple DES key size. Key must be either 16 or 24 bytes long")
if self.getMode() == CBC:
if not self.getIV():
# Use the first 8 bytes of the key
self._iv = key[:self.block_size]
if len(self.getIV()) != self.block_size:
raise ValueError("Invalid IV, must be 8 bytes in length")
self.__key1 = des(key[:8], self._mode, self._iv,
self._padding, self._padmode)
self.__key2 = des(key[8:16], self._mode, self._iv,
self._padding, self._padmode)
if self.key_size == 16:
self.__key3 = self.__key1
else:
self.__key3 = des(key[16:], self._mode, self._iv,
self._padding, self._padmode)
_baseDes.setKey(self, key)
# Override setter methods to work on all 3 keys.
def setMode(self, mode):
"""Sets the type of crypting mode, pyDes.ECB or pyDes.CBC"""
_baseDes.setMode(self, mode)
for key in (self.__key1, self.__key2, self.__key3):
key.setMode(mode)
def setPadding(self, pad):
"""setPadding() -> bytes of length 1. Padding character."""
_baseDes.setPadding(self, pad)
for key in (self.__key1, self.__key2, self.__key3):
key.setPadding(pad)
def setPadMode(self, mode):
"""Sets the type of padding mode, pyDes.PAD_NORMAL or pyDes.PAD_PKCS5"""
_baseDes.setPadMode(self, mode)
for key in (self.__key1, self.__key2, self.__key3):
key.setPadMode(mode)
def setIV(self, IV):
"""Will set the Initial Value, used in conjunction with CBC mode"""
_baseDes.setIV(self, IV)
for key in (self.__key1, self.__key2, self.__key3):
key.setIV(IV)
def encrypt(self, data, pad=None, padmode=None):
"""encrypt(data, [pad], [padmode]) -> bytes
data : bytes to be encrypted
pad : Optional argument for encryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be encrypted
with the already specified key. Data does not have to be a
multiple of 8 bytes if the padding character is supplied, or
the padmode is set to PAD_PKCS5, as bytes will then added to
ensure the be padded data is a multiple of 8 bytes.
"""
ENCRYPT = des.ENCRYPT
DECRYPT = des.DECRYPT
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
# Pad the data accordingly.
data = self._padData(data, pad, padmode)
if self.getMode() == CBC:
self.__key1.setIV(self.getIV())
self.__key2.setIV(self.getIV())
self.__key3.setIV(self.getIV())
i = 0
result = []
while i < len(data):
block = self.__key1.crypt(data[i:i+8], ENCRYPT)
block = self.__key2.crypt(block, DECRYPT)
block = self.__key3.crypt(block, ENCRYPT)
self.__key1.setIV(block)
self.__key2.setIV(block)
self.__key3.setIV(block)
result.append(block)
i += 8
if _pythonMajorVersion < 3:
return ''.join(result)
else:
return bytes.fromhex('').join(result)
else:
data = self.__key1.crypt(data, ENCRYPT)
data = self.__key2.crypt(data, DECRYPT)
return self.__key3.crypt(data, ENCRYPT)
def decrypt(self, data, pad=None, padmode=None):
"""decrypt(data, [pad], [padmode]) -> bytes
data : bytes to be encrypted
pad : Optional argument for decryption padding. Must only be one byte
padmode : Optional argument for overriding the padding mode.
The data must be a multiple of 8 bytes and will be decrypted
with the already specified key. In PAD_NORMAL mode, if the
optional padding character is supplied, then the un-encrypted
data will have the padding characters removed from the end of
the bytes. This pad removal only occurs on the last 8 bytes of
the data (last data block). In PAD_PKCS5 mode, the special
padding end markers will be removed from the data after
decrypting, no pad character is required for PAD_PKCS5.
"""
ENCRYPT = des.ENCRYPT
DECRYPT = des.DECRYPT
data = self._guardAgainstUnicode(data)
if pad is not None:
pad = self._guardAgainstUnicode(pad)
if self.getMode() == CBC:
self.__key1.setIV(self.getIV())
self.__key2.setIV(self.getIV())
self.__key3.setIV(self.getIV())
i = 0
result = []
while i < len(data):
iv = data[i:i+8]
block = self.__key3.crypt(iv, DECRYPT)
block = self.__key2.crypt(block, ENCRYPT)
block = self.__key1.crypt(block, DECRYPT)
self.__key1.setIV(iv)
self.__key2.setIV(iv)
self.__key3.setIV(iv)
result.append(block)
i += 8
if _pythonMajorVersion < 3:
data = ''.join(result)
else:
data = bytes.fromhex('').join(result)
else:
data = self.__key3.crypt(data, DECRYPT)
data = self.__key2.crypt(data, ENCRYPT)
data = self.__key1.crypt(data, DECRYPT)
return self._unpadData(data, pad, padmode) | gpl-3.0 |
ishay2b/tensorflow | tensorflow/python/kernel_tests/stack_op_test.py | 9 | 11444 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Pack Op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
def np_split_squeeze(array, axis):
axis_len = array.shape[axis]
return [
np.squeeze(
arr, axis=(axis,)) for arr in np.split(
array, axis_len, axis=axis)
]
class StackOpTest(test.TestCase):
def testSimple(self):
np.random.seed(7)
with self.test_session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
for dtype in [np.float32, np.int32, np.int64]:
data = np.random.randn(*shape).astype(dtype)
# Convert [data[0], data[1], ...] separately to tensorflow
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(constant_op.constant, data))
# Pack back into a single tensorflow tensor
c = array_ops.stack(xs)
self.assertAllEqual(c.eval(), data)
def testSimpleParallel(self):
np.random.seed(7)
with self.test_session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape).astype(np.float32)
xs = list(map(constant_op.constant, data))
c = array_ops.parallel_stack(xs)
self.assertAllEqual(c.eval(), data)
def testConst(self):
np.random.seed(7)
with self.test_session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
for dtype in [np.float32, np.int32, np.int64]:
data = np.random.randn(*shape).astype(dtype)
# Pack back into a single tensorflow tensor directly using np array
c = array_ops.stack(data)
# This is implemented via a Const:
self.assertEqual(c.op.type, "Const")
self.assertAllEqual(c.eval(), data)
# Python lists also work for 1-D case:
if len(shape) == 1:
data_list = list(data)
cl = array_ops.stack(data_list)
self.assertEqual(cl.op.type, "Const")
self.assertAllEqual(cl.eval(), data)
# Verify that shape induction works with shapes produced via const stack
a = constant_op.constant([1, 2, 3, 4, 5, 6])
b = array_ops.reshape(a, array_ops.stack([2, 3]))
self.assertAllEqual(b.get_shape(), [2, 3])
def testConstParallel(self):
np.random.seed(7)
with self.test_session(use_gpu=True):
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape).astype(np.float32)
if len(shape) == 1:
data_list = list(data)
cl = array_ops.parallel_stack(data_list)
self.assertAllEqual(cl.eval(), data)
data = np.random.randn(*shape).astype(np.float32)
c = array_ops.parallel_stack(data)
self.assertAllEqual(c.eval(), data)
def testGradientsAxis0(self):
np.random.seed(7)
for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
shapes = [shape[1:]] * shape[0]
with self.test_session(use_gpu=True):
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(constant_op.constant, data))
c = array_ops.stack(xs)
err = gradient_checker.compute_gradient_error(xs, shapes, c, shape)
self.assertLess(err, 1e-6)
def testGradientsAxis1(self):
np.random.seed(7)
for shape in (2, 3), (3, 2), (4, 3, 2):
data = np.random.randn(*shape)
shapes = [shape[1:]] * shape[0]
out_shape = list(shape[1:])
out_shape.insert(1, shape[0])
with self.test_session(use_gpu=True):
# TODO(irving): Remove list() once we handle maps correctly
xs = list(map(constant_op.constant, data))
c = array_ops.stack(xs, axis=1)
err = gradient_checker.compute_gradient_error(xs, shapes, c, out_shape)
self.assertLess(err, 1e-6)
def testZeroSize(self):
# Verify that stack doesn't crash for zero size inputs
with self.test_session(use_gpu=True):
for shape in (0,), (3, 0), (0, 3):
x = np.zeros((2,) + shape).astype(np.int32)
p = array_ops.stack(list(x)).eval()
self.assertAllEqual(p, x)
p = array_ops.parallel_stack(list(x)).eval()
self.assertAllEqual(p, x)
def testAxis0Default(self):
with self.test_session(use_gpu=True):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
stacked = array_ops.stack(t).eval()
parallel_stacked = array_ops.parallel_stack(t).eval()
self.assertAllEqual(stacked, np.array([[1, 2, 3], [4, 5, 6]]))
self.assertAllEqual(parallel_stacked, np.array([[1, 2, 3], [4, 5, 6]]))
def testAgainstNumpy(self):
# For 1 to 5 dimensions.
for i in range(1, 6):
expected = np.random.random(np.random.permutation(i) + 1)
# For all the possible axis to split it, including negative indices.
for j in range(-i, i):
test_arrays = np_split_squeeze(expected, j)
with self.test_session(use_gpu=True):
actual_pack = array_ops.stack(test_arrays, axis=j)
self.assertEqual(expected.shape, actual_pack.get_shape())
actual_pack = actual_pack.eval()
actual_stack = array_ops.stack(test_arrays, axis=j)
self.assertEqual(expected.shape, actual_stack.get_shape())
actual_stack = actual_stack.eval()
self.assertNDArrayNear(expected, actual_stack, 1e-6)
def testDimOutOfRange(self):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
with self.assertRaisesRegexp(ValueError, r"axis = 2 not in \[-2, 2\)"):
array_ops.stack(t, axis=2)
def testDimOutOfNegativeRange(self):
t = [constant_op.constant([1, 2, 3]), constant_op.constant([4, 5, 6])]
with self.assertRaisesRegexp(ValueError, r"axis = -3 not in \[-2, 2\)"):
array_ops.stack(t, axis=-3)
class AutomaticPackingTest(test.TestCase):
def testSimple(self):
with self.test_session(use_gpu=True):
self.assertAllEqual(
[1, 0, 2],
ops.convert_to_tensor([1, constant_op.constant(0), 2]).eval())
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor(
[[0, 0, 0], [0, constant_op.constant(1), 0],
[0, 0, 0]]).eval())
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor(
[[0, 0, 0], constant_op.constant([0, 1, 0]),
[0, 0, 0]]).eval())
self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
ops.convert_to_tensor([
constant_op.constant([0, 0, 0]),
constant_op.constant([0, 1, 0]),
constant_op.constant([0, 0, 0])
]).eval())
def testWithNDArray(self):
with self.test_session(use_gpu=True):
result = ops.convert_to_tensor([[[0., 0.],
constant_op.constant([1., 1.])],
np.array(
[[2., 2.], [3., 3.]],
dtype=np.float32)])
self.assertAllEqual([[[0., 0.], [1., 1.]], [[2., 2.], [3., 3.]]],
result.eval())
def testVariable(self):
with self.test_session(use_gpu=True):
v = variables.Variable(17)
result = ops.convert_to_tensor([[0, 0, 0], [0, v, 0], [0, 0, 0]])
v.initializer.run()
self.assertAllEqual([[0, 0, 0], [0, 17, 0], [0, 0, 0]], result.eval())
v.assign(38).op.run()
self.assertAllEqual([[0, 0, 0], [0, 38, 0], [0, 0, 0]], result.eval())
def testDtype(self):
t_0 = ops.convert_to_tensor([[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]])
self.assertEqual(dtypes.float32, t_0.dtype)
t_1 = ops.convert_to_tensor([[0., 0., 0.], constant_op.constant(
[0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]])
self.assertEqual(dtypes.float64, t_1.dtype)
t_2 = ops.convert_to_tensor(
[[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]], dtype=dtypes.float64)
self.assertEqual(dtypes.float64, t_2.dtype)
with self.assertRaises(TypeError):
ops.convert_to_tensor([
constant_op.constant(
[0., 0., 0.], dtype=dtypes.float32), constant_op.constant(
[0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]
])
with self.assertRaises(TypeError):
ops.convert_to_tensor(
[[0., 0., 0.], constant_op.constant(
[0., 0., 0.], dtype=dtypes.float64), [0., 0., 0.]],
dtype=dtypes.float32)
with self.assertRaises(TypeError):
ops.convert_to_tensor(
[constant_op.constant(
[0., 0., 0.], dtype=dtypes.float64)],
dtype=dtypes.float32)
def testPlaceholder(self):
with self.test_session(use_gpu=True):
# Test using placeholder with a defined shape.
ph_0 = array_ops.placeholder(dtypes.int32, shape=[])
result_0 = ops.convert_to_tensor([[0, 0, 0], [0, ph_0, 0], [0, 0, 0]])
self.assertAllEqual(
[[0, 0, 0], [0, 1, 0], [0, 0, 0]], result_0.eval(feed_dict={ph_0: 1}))
self.assertAllEqual(
[[0, 0, 0], [0, 2, 0], [0, 0, 0]], result_0.eval(feed_dict={ph_0: 2}))
# Test using placeholder with an undefined shape.
ph_1 = array_ops.placeholder(dtypes.int32)
result_1 = ops.convert_to_tensor([[0, 0, 0], [0, ph_1, 0], [0, 0, 0]])
self.assertAllEqual(
[[0, 0, 0], [0, 1, 0], [0, 0, 0]], result_1.eval(feed_dict={ph_1: 1}))
self.assertAllEqual(
[[0, 0, 0], [0, 2, 0], [0, 0, 0]], result_1.eval(feed_dict={ph_1: 2}))
def testShapeErrors(self):
# Static shape error.
ph_0 = array_ops.placeholder(dtypes.int32, shape=[1])
with self.assertRaises(ValueError):
ops.convert_to_tensor([[0, 0, 0], [0, ph_0, 0], [0, 0, 0]])
# Dynamic shape error.
ph_1 = array_ops.placeholder(dtypes.int32)
result_1 = ops.convert_to_tensor([[0, 0, 0], [0, ph_1, 0], [0, 0, 0]])
with self.test_session(use_gpu=True):
with self.assertRaises(errors_impl.InvalidArgumentError):
result_1.eval(feed_dict={ph_1: [1]})
if __name__ == "__main__":
test.main()
| apache-2.0 |
LoHChina/nova | nova/tests/unit/virt/libvirt/volume/test_hgst.py | 38 | 2570 | # Copyright 2015 HGST
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from os_brick.initiator import connector
from nova.tests.unit.virt.libvirt.volume import test_volume
from nova.virt.libvirt.volume import hgst
# Actual testing of the os_brick HGST driver done in the os_brick testcases
# Here we're concerned only with the small API shim that connects Nova
# so these will be pretty simple cases.
class LibvirtHGSTVolumeDriverTestCase(test_volume.LibvirtVolumeBaseTestCase):
def test_libvirt_hgst_driver_type(self):
drvr = hgst.LibvirtHGSTVolumeDriver(self.fake_conn)
self.assertIsInstance(drvr.connector, connector.HGSTConnector)
def test_libvirt_hgst_driver_connect(self):
def brick_conn_vol(data):
return {'path': '/dev/space01'}
drvr = hgst.LibvirtHGSTVolumeDriver(self.fake_conn)
drvr.connector.connect_volume = brick_conn_vol
di = {'path': '/dev/space01', 'name': 'space01'}
ci = {'data': di}
drvr.connect_volume(ci, None)
self.assertEqual('/dev/space01',
ci['data']['device_path'])
def test_libvirt_hgst_driver_get_config(self):
drvr = hgst.LibvirtHGSTVolumeDriver(self.fake_conn)
di = {'path': '/dev/space01', 'name': 'space01', 'type': 'raw',
'dev': 'vda1', 'bus': 'pci0', 'device_path': '/dev/space01'}
ci = {'data': di}
conf = drvr.get_config(ci, di)
self.assertEqual('block', conf.source_type)
self.assertEqual('/dev/space01', conf.source_path)
def test_libvirt_hgst_driver_disconnect(self):
drvr = hgst.LibvirtHGSTVolumeDriver(self.fake_conn)
drvr.connector.disconnect_volume = mock.MagicMock()
di = {'path': '/dev/space01', 'name': 'space01', 'type': 'raw',
'dev': 'vda1', 'bus': 'pci0', 'device_path': '/dev/space01'}
ci = {'data': di}
drvr.disconnect_volume(ci, di)
drvr.connector.disconnect_volume.assert_called_once_with(
di, None)
| apache-2.0 |
codester2/devide.johannes | extra/soappy-cvp/SOAPpy/SOAPBuilder.py | 8 | 22127 | """
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
ident = '$Id: SOAPBuilder.py,v 1.27 2005/02/21 20:24:13 warnes Exp $'
from version import __version__
import cgi
import copy
from wstools.XMLname import toXMLname, fromXMLname
import fpconst
# SOAPpy modules
from Config import Config
from NS import NS
from Types import *
# Test whether this Python version has Types.BooleanType
# If it doesn't have it, then False and True are serialized as integers
try:
BooleanType
pythonHasBooleanType = 1
except NameError:
pythonHasBooleanType = 0
################################################################################
# SOAP Builder
################################################################################
class SOAPBuilder:
_xml_top = '<?xml version="1.0"?>\n'
_xml_enc_top = '<?xml version="1.0" encoding="%s"?>\n'
_env_top = ( '%(ENV_T)s:Envelope\n' + \
' %(ENV_T)s:encodingStyle="%(ENC)s"\n' ) % \
NS.__dict__
_env_bot = '</%(ENV_T)s:Envelope>\n' % NS.__dict__
# Namespaces potentially defined in the Envelope tag.
_env_ns = {NS.ENC: NS.ENC_T, NS.ENV: NS.ENV_T,
NS.XSD: NS.XSD_T, NS.XSD2: NS.XSD2_T, NS.XSD3: NS.XSD3_T,
NS.XSI: NS.XSI_T, NS.XSI2: NS.XSI2_T, NS.XSI3: NS.XSI3_T}
def __init__(self, args = (), kw = {}, method = None, namespace = None,
header = None, methodattrs = None, envelope = 1, encoding = 'UTF-8',
use_refs = 0, config = Config, noroot = 0):
# Test the encoding, raising an exception if it's not known
if encoding != None:
''.encode(encoding)
self.args = args
self.kw = kw
self.envelope = envelope
self.encoding = encoding
self.method = method
self.namespace = namespace
self.header = header
self.methodattrs= methodattrs
self.use_refs = use_refs
self.config = config
self.out = []
self.tcounter = 0
self.ncounter = 1
self.icounter = 1
self.envns = {}
self.ids = {}
self.depth = 0
self.multirefs = []
self.multis = 0
self.body = not isinstance(args, bodyType)
self.noroot = noroot
def build(self):
if Config.debug: print "In build."
ns_map = {}
# Cache whether typing is on or not
typed = self.config.typed
if self.header:
# Create a header.
self.dump(self.header, "Header", typed = typed)
#self.header = None # Wipe it out so no one is using it.
if self.body:
# Call genns to record that we've used SOAP-ENV.
self.depth += 1
body_ns = self.genns(ns_map, NS.ENV)[0]
self.out.append("<%sBody>\n" % body_ns)
if self.method:
# Save the NS map so that it can be restored when we
# fall out of the scope of the method definition
save_ns_map = ns_map.copy()
self.depth += 1
a = ''
if self.methodattrs:
for (k, v) in self.methodattrs.items():
a += ' %s="%s"' % (k, v)
if self.namespace: # Use the namespace info handed to us
methodns, n = self.genns(ns_map, self.namespace)
else:
methodns, n = '', ''
self.out.append('<%s%s%s%s%s>\n' % (
methodns, self.method, n, a, self.genroot(ns_map)))
try:
if type(self.args) != TupleType:
args = (self.args,)
else:
args = self.args
for i in args:
self.dump(i, typed = typed, ns_map = ns_map)
if hasattr(self.config, "argsOrdering") and self.config.argsOrdering.has_key(self.method):
for k in self.config.argsOrdering.get(self.method):
self.dump(self.kw.get(k), k, typed = typed, ns_map = ns_map)
else:
for (k, v) in self.kw.items():
self.dump(v, k, typed = typed, ns_map = ns_map)
except RecursionError:
if self.use_refs == 0:
# restart
b = SOAPBuilder(args = self.args, kw = self.kw,
method = self.method, namespace = self.namespace,
header = self.header, methodattrs = self.methodattrs,
envelope = self.envelope, encoding = self.encoding,
use_refs = 1, config = self.config)
return b.build()
raise
if self.method:
self.out.append("</%s%s>\n" % (methodns, self.method))
# End of the method definition; drop any local namespaces
ns_map = save_ns_map
self.depth -= 1
if self.body:
# dump may add to self.multirefs, but the for loop will keep
# going until it has used all of self.multirefs, even those
# entries added while in the loop.
self.multis = 1
for obj, tag in self.multirefs:
self.dump(obj, tag, typed = typed, ns_map = ns_map)
self.out.append("</%sBody>\n" % body_ns)
self.depth -= 1
if self.envelope:
e = map (lambda ns: ' xmlns:%s="%s"\n' % (ns[1], ns[0]),
self.envns.items())
self.out = ['<', self._env_top] + e + ['>\n'] + \
self.out + \
[self._env_bot]
if self.encoding != None:
self.out.insert(0, self._xml_enc_top % self.encoding)
return ''.join(self.out).encode(self.encoding)
self.out.insert(0, self._xml_top)
return ''.join(self.out)
def gentag(self):
if Config.debug: print "In gentag."
self.tcounter += 1
return "v%d" % self.tcounter
def genns(self, ns_map, nsURI):
if nsURI == None:
return ('', '')
if type(nsURI) == TupleType: # already a tuple
if len(nsURI) == 2:
ns, nsURI = nsURI
else:
ns, nsURI = None, nsURI[0]
else:
ns = None
if ns_map.has_key(nsURI):
return (ns_map[nsURI] + ':', '')
if self._env_ns.has_key(nsURI):
ns = self.envns[nsURI] = ns_map[nsURI] = self._env_ns[nsURI]
return (ns + ':', '')
if not ns:
ns = "ns%d" % self.ncounter
self.ncounter += 1
ns_map[nsURI] = ns
if self.config.buildWithNamespacePrefix:
return (ns + ':', ' xmlns:%s="%s"' % (ns, nsURI))
else:
return ('', ' xmlns="%s"' % (nsURI))
def genroot(self, ns_map):
if self.noroot:
return ''
if self.depth != 2:
return ''
ns, n = self.genns(ns_map, NS.ENC)
return ' %sroot="%d"%s' % (ns, not self.multis, n)
# checkref checks an element to see if it needs to be encoded as a
# multi-reference element or not. If it returns None, the element has
# been handled and the caller can continue with subsequent elements.
# If it returns a string, the string should be included in the opening
# tag of the marshaled element.
def checkref(self, obj, tag, ns_map):
if self.depth < 2:
return ''
if not self.ids.has_key(id(obj)):
n = self.ids[id(obj)] = self.icounter
self.icounter = n + 1
if self.use_refs == 0:
return ''
if self.depth == 2:
return ' id="i%d"' % n
self.multirefs.append((obj, tag))
else:
if self.use_refs == 0:
raise RecursionError, "Cannot serialize recursive object"
n = self.ids[id(obj)]
if self.multis and self.depth == 2:
return ' id="i%d"' % n
self.out.append('<%s href="#i%d"%s/>\n' %
(tag, n, self.genroot(ns_map)))
return None
# dumpers
def dump(self, obj, tag = None, typed = 1, ns_map = {}):
if Config.debug: print "In dump.", "obj=", obj
ns_map = ns_map.copy()
self.depth += 1
if type(tag) not in (NoneType, StringType, UnicodeType):
raise KeyError, "tag must be a string or None"
try:
meth = getattr(self, "dump_" + type(obj).__name__)
except AttributeError:
if type(obj) == LongType:
obj_type = "integer"
elif pythonHasBooleanType and type(obj) == BooleanType:
obj_type = "boolean"
else:
obj_type = type(obj).__name__
self.out.append(self.dumper(None, obj_type, obj, tag, typed,
ns_map, self.genroot(ns_map)))
else:
meth(obj, tag, typed, ns_map)
self.depth -= 1
# generic dumper
def dumper(self, nsURI, obj_type, obj, tag, typed = 1, ns_map = {},
rootattr = '', id = '',
xml = '<%(tag)s%(type)s%(id)s%(attrs)s%(root)s>%(data)s</%(tag)s>\n'):
if Config.debug: print "In dumper."
if nsURI == None:
nsURI = self.config.typesNamespaceURI
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
a = n = t = ''
if typed and obj_type:
ns, n = self.genns(ns_map, nsURI)
ins = self.genns(ns_map, self.config.schemaNamespaceURI)[0]
t = ' %stype="%s%s"%s' % (ins, ns, obj_type, n)
try: a = obj._marshalAttrs(ns_map, self)
except: pass
try: data = obj._marshalData()
except:
if (obj_type != "string"): # strings are already encoded
data = cgi.escape(str(obj))
else:
data = obj
return xml % {"tag": tag, "type": t, "data": data, "root": rootattr,
"id": id, "attrs": a}
def dump_float(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_float."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
if Config.strict_range:
doubleType(obj)
if fpconst.isPosInf(obj):
obj = "INF"
elif fpconst.isNegInf(obj):
obj = "-INF"
elif fpconst.isNaN(obj):
obj = "NaN"
else:
obj = repr(obj)
# Note: python 'float' is actually a SOAP 'double'.
self.out.append(self.dumper(None, "double", obj, tag, typed, ns_map,
self.genroot(ns_map)))
def dump_string(self, obj, tag, typed = 0, ns_map = {}):
if Config.debug: print "In dump_string."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try: data = obj._marshalData()
except: data = obj
self.out.append(self.dumper(None, "string", cgi.escape(data), tag,
typed, ns_map, self.genroot(ns_map), id))
dump_str = dump_string # For Python 2.2+
dump_unicode = dump_string
def dump_None(self, obj, tag, typed = 0, ns_map = {}):
if Config.debug: print "In dump_None."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
ns = self.genns(ns_map, self.config.schemaNamespaceURI)[0]
self.out.append('<%s %snull="1"%s/>\n' %
(tag, ns, self.genroot(ns_map)))
dump_NoneType = dump_None # For Python 2.2+
def dump_list(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_list.", "obj=", obj
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
if type(obj) == InstanceType:
data = obj.data
else:
data = obj
if typed:
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try:
sample = data[0]
empty = 0
except:
# preserve type if present
if getattr(obj,"_typed",None) and getattr(obj,"_type",None):
if getattr(obj, "_complexType", None):
sample = typedArrayType(typed=obj._type,
complexType = obj._complexType)
sample._typename = obj._type
if not getattr(obj,"_ns",None): obj._ns = NS.URN
else:
sample = typedArrayType(typed=obj._type)
else:
sample = structType()
empty = 1
# First scan list to see if all are the same type
same_type = 1
if not empty:
for i in data[1:]:
if type(sample) != type(i) or \
(type(sample) == InstanceType and \
sample.__class__ != i.__class__):
same_type = 0
break
ndecl = ''
if same_type:
if (isinstance(sample, structType)) or \
type(sample) == DictType or \
(isinstance(sample, anyType) and \
(getattr(sample, "_complexType", None) and \
sample._complexType)): # force to urn struct
try:
tns = obj._ns or NS.URN
except:
tns = NS.URN
ns, ndecl = self.genns(ns_map, tns)
try:
typename = sample._typename
except:
typename = "SOAPStruct"
t = ns + typename
elif isinstance(sample, anyType):
ns = sample._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ns, ndecl = self.genns(ns_map, ns)
t = ns + str(sample._type)
else:
t = 'ur-type'
else:
typename = type(sample).__name__
# For Python 2.2+
if type(sample) == StringType: typename = 'string'
# HACK: unicode is a SOAP string
if type(sample) == UnicodeType: typename = 'string'
# HACK: python 'float' is actually a SOAP 'double'.
if typename=="float": typename="double"
t = self.genns(ns_map, self.config.typesNamespaceURI)[0] + \
typename
else:
t = self.genns(ns_map, self.config.typesNamespaceURI)[0] + \
"ur-type"
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
ens, edecl = self.genns(ns_map, NS.ENC)
ins, idecl = self.genns(ns_map, self.config.schemaNamespaceURI)
if typed:
self.out.append(
'<%s %sarrayType="%s[%d]" %stype="%sArray"%s%s%s%s%s%s>\n' %
(tag, ens, t, len(data), ins, ens, ndecl, edecl, idecl,
self.genroot(ns_map), id, a))
if typed:
try: elemsname = obj._elemsname
except: elemsname = "item"
else:
elemsname = tag
for i in data:
self.dump(i, elemsname, not same_type, ns_map)
if typed: self.out.append('</%s>\n' % tag)
dump_tuple = dump_list
def dump_dictionary(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_dictionary."
tag = tag or self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
id = self.checkref(obj, tag, ns_map)
if id == None:
return
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
self.out.append('<%s%s%s%s>\n' %
(tag, id, a, self.genroot(ns_map)))
for (k, v) in obj.items():
if k[0] != "_":
self.dump(v, k, 1, ns_map)
self.out.append('</%s>\n' % tag)
dump_dict = dump_dictionary # For Python 2.2+
def dump_instance(self, obj, tag, typed = 1, ns_map = {}):
if Config.debug: print "In dump_instance.", "obj=", obj, "tag=", tag
if not tag:
# If it has a name use it.
if isinstance(obj, anyType) and obj._name:
tag = obj._name
else:
tag = self.gentag()
tag = toXMLname(tag) # convert from SOAP 1.2 XML name encoding
if isinstance(obj, arrayType): # Array
self.dump_list(obj, tag, typed, ns_map)
return
if isinstance(obj, faultType): # Fault
cns, cdecl = self.genns(ns_map, NS.ENC)
vns, vdecl = self.genns(ns_map, NS.ENV)
self.out.append('''<%sFault %sroot="1"%s%s>
<faultcode>%s</faultcode>
<faultstring>%s</faultstring>
''' % (vns, cns, vdecl, cdecl, obj.faultcode, obj.faultstring))
if hasattr(obj, "detail"):
self.dump(obj.detail, "detail", typed, ns_map)
self.out.append("</%sFault>\n" % vns)
return
r = self.genroot(ns_map)
try: a = obj._marshalAttrs(ns_map, self)
except: a = ''
if isinstance(obj, voidType): # void
self.out.append("<%s%s%s></%s>\n" % (tag, a, r, tag))
return
id = self.checkref(obj, tag, ns_map)
if id == None:
return
if isinstance(obj, structType):
# Check for namespace
ndecl = ''
ns = obj._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ns, ndecl = self.genns(ns_map, ns)
tag = ns + tag
self.out.append("<%s%s%s%s%s>\n" % (tag, ndecl, id, a, r))
keylist = obj.__dict__.keys()
# first write out items with order information
if hasattr(obj, '_keyord'):
for i in range(len(obj._keyord)):
self.dump(obj._aslist(i), obj._keyord[i], 1, ns_map)
keylist.remove(obj._keyord[i])
# now write out the rest
for k in keylist:
if (k[0] != "_"):
self.dump(getattr(obj,k), k, 1, ns_map)
if isinstance(obj, bodyType):
self.multis = 1
for v, k in self.multirefs:
self.dump(v, k, typed = typed, ns_map = ns_map)
self.out.append('</%s>\n' % tag)
elif isinstance(obj, anyType):
t = ''
if typed:
ns = obj._validNamespaceURI(self.config.typesNamespaceURI,
self.config.strictNamespaces)
if ns:
ons, ondecl = self.genns(ns_map, ns)
ins, indecl = self.genns(ns_map,
self.config.schemaNamespaceURI)
t = ' %stype="%s%s"%s%s' % \
(ins, ons, obj._type, ondecl, indecl)
self.out.append('<%s%s%s%s%s>%s</%s>\n' %
(tag, t, id, a, r, obj._marshalData(), tag))
else: # Some Class
self.out.append('<%s%s%s>\n' % (tag, id, r))
for (k, v) in obj.__dict__.items():
if k[0] != "_":
self.dump(v, k, 1, ns_map)
self.out.append('</%s>\n' % tag)
################################################################################
# SOAPBuilder's more public interface
################################################################################
def buildSOAP(args=(), kw={}, method=None, namespace=None,
header=None, methodattrs=None, envelope=1, encoding='UTF-8',
config=Config, noroot = 0):
t = SOAPBuilder(args=args, kw=kw, method=method, namespace=namespace,
header=header, methodattrs=methodattrs,envelope=envelope,
encoding=encoding, config=config,noroot=noroot)
return t.build()
| bsd-3-clause |
lmazuel/ansible | lib/ansible/modules/source_control/git_config.py | 70 | 7530 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Marius Gedminas <[email protected]>
# (c) 2016, Matthew Gamble <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: git_config
author:
- "Matthew Gamble"
- "Marius Gedminas"
version_added: 2.1
requirements: ['git']
short_description: Read and write git configuration
description:
- The C(git_config) module changes git configuration by invoking 'git config'.
This is needed if you don't want to use M(template) for the entire git
config file (e.g. because you need to change just C(user.email) in
/etc/.git/config). Solutions involving M(command) are cumbersone or
don't work correctly in check mode.
options:
list_all:
description:
- List all settings (optionally limited to a given I(scope))
required: false
choices: [ "yes", "no" ]
default: no
name:
description:
- The name of the setting. If no value is supplied, the value will
be read from the config if it has been set.
required: false
default: null
repo:
description:
- Path to a git repository for reading and writing values from a
specific repo.
required: false
default: null
scope:
description:
- Specify which scope to read/set values from. This is required
when setting config values. If this is set to local, you must
also specify the repo parameter. It defaults to system only when
not using I(list_all)=yes.
required: false
choices: [ "local", "global", "system" ]
default: null
value:
description:
- When specifying the name of a single setting, supply a value to
set that setting to the given value.
required: false
default: null
'''
EXAMPLES = '''
# Set some settings in ~/.gitconfig
- git_config:
name: alias.ci
scope: global
value: commit
- git_config:
name: alias.st
scope: global
value: status
# Or system-wide:
- git_config:
name: alias.remotev
scope: system
value: remote -v
- git_config:
name: core.editor
scope: global
value: vim
# scope=system is the default
- git_config:
name: alias.diffc
value: diff --cached
- git_config:
name: color.ui
value: auto
# Make etckeeper not complain when invoked by cron
- git_config:
name: user.email
repo: /etc
scope: local
value: 'root@{{ ansible_fqdn }}'
# Read individual values from git config
- git_config:
name: alias.ci
scope: global
# scope: system is also assumed when reading values, unless list_all=yes
- git_config:
name: alias.diffc
# Read all values from git config
- git_config:
list_all: yes
scope: global
# When list_all=yes and no scope is specified, you get configuration from all scopes
- git_config:
list_all: yes
# Specify a repository to include local settings
- git_config:
list_all: yes
repo: /path/to/repo.git
'''
RETURN = '''
---
config_value:
description: When list_all=no and value is not set, a string containing the value of the setting in name
returned: success
type: string
sample: "vim"
config_values:
description: When list_all=yes, a dict containing key/value pairs of multiple configuration settings
returned: success
type: dictionary
sample:
core.editor: "vim"
color.ui: "auto"
alias.diffc: "diff --cached"
alias.remotev: "remote -v"
'''
def main():
module = AnsibleModule(
argument_spec=dict(
list_all=dict(required=False, type='bool', default=False),
name=dict(type='str'),
repo=dict(type='path'),
scope=dict(required=False, type='str', choices=['local', 'global', 'system']),
value=dict(required=False)
),
mutually_exclusive=[['list_all', 'name'], ['list_all', 'value']],
required_if=[('scope', 'local', ['repo'])],
required_one_of=[['list_all', 'name']],
supports_check_mode=True,
)
git_path = module.get_bin_path('git', True)
params = module.params
# We check error message for a pattern, so we need to make sure the messages appear in the form we're expecting.
# Set the locale to C to ensure consistent messages.
module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
if params['name']:
name = params['name']
else:
name = None
if params['scope']:
scope = params['scope']
elif params['list_all']:
scope = None
else:
scope = 'system'
if params['value']:
new_value = params['value']
else:
new_value = None
args = [git_path, "config", "--includes"]
if params['list_all']:
args.append('-l')
if scope:
args.append("--" + scope)
if name:
args.append(name)
if scope == 'local':
dir = params['repo']
elif params['list_all'] and params['repo']:
# Include local settings from a specific repo when listing all available settings
dir = params['repo']
else:
# Run from root directory to avoid accidentally picking up any local config settings
dir = "/"
(rc, out, err) = module.run_command(' '.join(args), cwd=dir)
if params['list_all'] and scope and rc == 128 and 'unable to read config file' in err:
# This just means nothing has been set at the given scope
module.exit_json(changed=False, msg='', config_values={})
elif rc >= 2:
# If the return code is 1, it just means the option hasn't been set yet, which is fine.
module.fail_json(rc=rc, msg=err, cmd=' '.join(args))
if params['list_all']:
values = out.rstrip().splitlines()
config_values = {}
for value in values:
k, v = value.split('=', 1)
config_values[k] = v
module.exit_json(changed=False, msg='', config_values=config_values)
elif not new_value:
module.exit_json(changed=False, msg='', config_value=out.rstrip())
else:
old_value = out.rstrip()
if old_value == new_value:
module.exit_json(changed=False, msg="")
if not module.check_mode:
new_value_quoted = "'" + new_value + "'"
(rc, out, err) = module.run_command(' '.join(args + [new_value_quoted]), cwd=dir)
if err:
module.fail_json(rc=rc, msg=err, cmd=' '.join(args + [new_value_quoted]))
module.exit_json(
msg='setting changed',
diff=dict(
before_header=' '.join(args),
before=old_value + "\n",
after_header=' '.join(args),
after=new_value + "\n"
),
changed=True
)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
alon/servo | components/script/dom/bindings/codegen/parser/tests/test_lenientSetter.py | 137 | 1867 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
def should_throw(parser, harness, message, code):
parser = parser.reset();
threw = False
try:
parser.parse(code)
parser.finish()
except:
threw = True
harness.ok(threw, "Should have thrown: %s" % message)
def WebIDLTest(parser, harness):
# The [LenientSetter] extended attribute MUST take no arguments.
should_throw(parser, harness, "no arguments", """
interface I {
[LenientSetter=X] readonly attribute long A;
};
""")
# An attribute with the [LenientSetter] extended attribute MUST NOT
# also be declared with the [PutForwards] extended attribute.
should_throw(parser, harness, "PutForwards", """
interface I {
[PutForwards=B, LenientSetter] readonly attribute J A;
};
interface J {
attribute long B;
};
""")
# An attribute with the [LenientSetter] extended attribute MUST NOT
# also be declared with the [Replaceable] extended attribute.
should_throw(parser, harness, "Replaceable", """
interface I {
[Replaceable, LenientSetter] readonly attribute J A;
};
""")
# The [LenientSetter] extended attribute MUST NOT be used on an
# attribute that is not read only.
should_throw(parser, harness, "writable attribute", """
interface I {
[LenientSetter] attribute long A;
};
""")
# The [LenientSetter] extended attribute MUST NOT be used on a
# static attribute.
should_throw(parser, harness, "static attribute", """
interface I {
[LenientSetter] static readonly attribute long A;
};
""")
| mpl-2.0 |
aperigault/ansible | lib/ansible/modules/cloud/softlayer/sl_vm.py | 34 | 11435 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: sl_vm
short_description: create or cancel a virtual instance in SoftLayer
description:
- Creates or cancels SoftLayer instances.
- When created, optionally waits for it to be 'running'.
version_added: "2.1"
options:
instance_id:
description:
- Instance Id of the virtual instance to perform action option.
hostname:
description:
- Hostname to be provided to a virtual instance.
domain:
description:
- Domain name to be provided to a virtual instance.
datacenter:
description:
- Datacenter for the virtual instance to be deployed.
tags:
description:
- Tag or list of tags to be provided to a virtual instance.
hourly:
description:
- Flag to determine if the instance should be hourly billed.
type: bool
default: 'yes'
private:
description:
- Flag to determine if the instance should be private only.
type: bool
default: 'no'
dedicated:
description:
- Flag to determine if the instance should be deployed in dedicated space.
type: bool
default: 'no'
local_disk:
description:
- Flag to determine if local disk should be used for the new instance.
type: bool
default: 'yes'
cpus:
description:
- Count of cpus to be assigned to new virtual instance.
required: true
memory:
description:
- Amount of memory to be assigned to new virtual instance.
required: true
disks:
description:
- List of disk sizes to be assigned to new virtual instance.
required: true
default: [ 25 ]
os_code:
description:
- OS Code to be used for new virtual instance.
image_id:
description:
- Image Template to be used for new virtual instance.
nic_speed:
description:
- NIC Speed to be assigned to new virtual instance.
default: 10
public_vlan:
description:
- VLAN by its Id to be assigned to the public NIC.
private_vlan:
description:
- VLAN by its Id to be assigned to the private NIC.
ssh_keys:
description:
- List of ssh keys by their Id to be assigned to a virtual instance.
post_uri:
description:
- URL of a post provisioning script to be loaded and executed on virtual instance.
state:
description:
- Create, or cancel a virtual instance.
- Specify C(present) for create, C(absent) to cancel.
choices: [ absent, present ]
default: present
wait:
description:
- Flag used to wait for active status before returning.
type: bool
default: 'yes'
wait_time:
description:
- Time in seconds before wait returns.
default: 600
requirements:
- python >= 2.6
- softlayer >= 4.1.1
author:
- Matt Colton (@mcltn)
'''
EXAMPLES = '''
- name: Build instance
hosts: localhost
gather_facts: no
tasks:
- name: Build instance request
sl_vm:
hostname: instance-1
domain: anydomain.com
datacenter: dal09
tags: ansible-module-test
hourly: yes
private: no
dedicated: no
local_disk: yes
cpus: 1
memory: 1024
disks: [25]
os_code: UBUNTU_LATEST
wait: no
- name: Build additional instances
hosts: localhost
gather_facts: no
tasks:
- name: Build instances request
sl_vm:
hostname: "{{ item.hostname }}"
domain: "{{ item.domain }}"
datacenter: "{{ item.datacenter }}"
tags: "{{ item.tags }}"
hourly: "{{ item.hourly }}"
private: "{{ item.private }}"
dedicated: "{{ item.dedicated }}"
local_disk: "{{ item.local_disk }}"
cpus: "{{ item.cpus }}"
memory: "{{ item.memory }}"
disks: "{{ item.disks }}"
os_code: "{{ item.os_code }}"
ssh_keys: "{{ item.ssh_keys }}"
wait: "{{ item.wait }}"
with_items:
- hostname: instance-2
domain: anydomain.com
datacenter: dal09
tags:
- ansible-module-test
- ansible-module-test-slaves
hourly: yes
private: no
dedicated: no
local_disk: yes
cpus: 1
memory: 1024
disks:
- 25
- 100
os_code: UBUNTU_LATEST
ssh_keys: []
wait: True
- hostname: instance-3
domain: anydomain.com
datacenter: dal09
tags:
- ansible-module-test
- ansible-module-test-slaves
hourly: yes
private: no
dedicated: no
local_disk: yes
cpus: 1
memory: 1024
disks:
- 25
- 100
os_code: UBUNTU_LATEST
ssh_keys: []
wait: yes
- name: Cancel instances
hosts: localhost
gather_facts: no
tasks:
- name: Cancel by tag
sl_vm:
state: absent
tags: ansible-module-test
'''
# TODO: Disabled RETURN as it is breaking the build for docs. Needs to be fixed.
RETURN = '''# '''
import json
import time
try:
import SoftLayer
from SoftLayer import VSManager
HAS_SL = True
vsManager = VSManager(SoftLayer.create_client_from_env())
except ImportError:
HAS_SL = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import string_types
# TODO: get this info from API
STATES = ['present', 'absent']
DATACENTERS = ['ams01', 'ams03', 'che01', 'dal01', 'dal05', 'dal06', 'dal09', 'dal10', 'dal12', 'dal13', 'fra02',
'fra04', 'fra05', 'hkg02', 'hou02', 'lon02', 'lon04', 'lon06', 'mel01', 'mex01', 'mil01', 'mon01',
'osl01', 'par01', 'sao01', 'sea01', 'seo01', 'sjc01', 'sjc03', 'sjc04', 'sng01', 'syd01', 'syd04',
'tok02', 'tor01', 'wdc01', 'wdc04', 'wdc06', 'wdc07']
CPU_SIZES = [1, 2, 4, 8, 16, 32, 56]
MEMORY_SIZES = [1024, 2048, 4096, 6144, 8192, 12288, 16384, 32768, 49152, 65536, 131072, 247808]
INITIALDISK_SIZES = [25, 100]
LOCALDISK_SIZES = [25, 100, 150, 200, 300]
SANDISK_SIZES = [10, 20, 25, 30, 40, 50, 75, 100, 125, 150, 175, 200, 250, 300, 350, 400, 500, 750, 1000, 1500, 2000]
NIC_SPEEDS = [10, 100, 1000]
def create_virtual_instance(module):
instances = vsManager.list_instances(
hostname=module.params.get('hostname'),
domain=module.params.get('domain'),
datacenter=module.params.get('datacenter')
)
if instances:
return False, None
# Check if OS or Image Template is provided (Can't be both, defaults to OS)
if (module.params.get('os_code') is not None and module.params.get('os_code') != ''):
module.params['image_id'] = ''
elif (module.params.get('image_id') is not None and module.params.get('image_id') != ''):
module.params['os_code'] = ''
module.params['disks'] = [] # Blank out disks since it will use the template
else:
return False, None
tags = module.params.get('tags')
if isinstance(tags, list):
tags = ','.join(map(str, module.params.get('tags')))
instance = vsManager.create_instance(
hostname=module.params.get('hostname'),
domain=module.params.get('domain'),
cpus=module.params.get('cpus'),
memory=module.params.get('memory'),
hourly=module.params.get('hourly'),
datacenter=module.params.get('datacenter'),
os_code=module.params.get('os_code'),
image_id=module.params.get('image_id'),
local_disk=module.params.get('local_disk'),
disks=module.params.get('disks'),
ssh_keys=module.params.get('ssh_keys'),
nic_speed=module.params.get('nic_speed'),
private=module.params.get('private'),
public_vlan=module.params.get('public_vlan'),
private_vlan=module.params.get('private_vlan'),
dedicated=module.params.get('dedicated'),
post_uri=module.params.get('post_uri'),
tags=tags,
)
if instance is not None and instance['id'] > 0:
return True, instance
else:
return False, None
def wait_for_instance(module, id):
instance = None
completed = False
wait_timeout = time.time() + module.params.get('wait_time')
while not completed and wait_timeout > time.time():
try:
completed = vsManager.wait_for_ready(id, 10, 2)
if completed:
instance = vsManager.get_instance(id)
except Exception:
completed = False
return completed, instance
def cancel_instance(module):
canceled = True
if module.params.get('instance_id') is None and (module.params.get('tags') or module.params.get('hostname') or module.params.get('domain')):
tags = module.params.get('tags')
if isinstance(tags, string_types):
tags = [module.params.get('tags')]
instances = vsManager.list_instances(tags=tags, hostname=module.params.get('hostname'), domain=module.params.get('domain'))
for instance in instances:
try:
vsManager.cancel_instance(instance['id'])
except Exception:
canceled = False
elif module.params.get('instance_id') and module.params.get('instance_id') != 0:
try:
vsManager.cancel_instance(instance['id'])
except Exception:
canceled = False
else:
return False, None
return canceled, None
def main():
module = AnsibleModule(
argument_spec=dict(
instance_id=dict(type='str'),
hostname=dict(type='str'),
domain=dict(type='str'),
datacenter=dict(type='str', choices=DATACENTERS),
tags=dict(type='str'),
hourly=dict(type='bool', default=True),
private=dict(type='bool', default=False),
dedicated=dict(type='bool', default=False),
local_disk=dict(type='bool', default=True),
cpus=dict(type='int', choices=CPU_SIZES),
memory=dict(type='int', choices=MEMORY_SIZES),
disks=dict(type='list', default=[25]),
os_code=dict(type='str'),
image_id=dict(type='str'),
nic_speed=dict(type='int', choices=NIC_SPEEDS),
public_vlan=dict(type='str'),
private_vlan=dict(type='str'),
ssh_keys=dict(type='list', default=[]),
post_uri=dict(type='str'),
state=dict(type='str', default='present', choices=STATES),
wait=dict(type='bool', default=True),
wait_time=dict(type='int', default=600),
)
)
if not HAS_SL:
module.fail_json(msg='softlayer python library required for this module')
if module.params.get('state') == 'absent':
(changed, instance) = cancel_instance(module)
elif module.params.get('state') == 'present':
(changed, instance) = create_virtual_instance(module)
if module.params.get('wait') is True and instance:
(changed, instance) = wait_for_instance(module, instance['id'])
module.exit_json(changed=changed, instance=json.loads(json.dumps(instance, default=lambda o: o.__dict__)))
if __name__ == '__main__':
main()
| gpl-3.0 |
Serag8/Bachelor | google_appengine/google/appengine/ext/remote_api/remote_api_stub.py | 4 | 31174 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""An apiproxy stub that calls a remote handler via HTTP.
This allows easy remote access to the App Engine datastore, and potentially any
of the other App Engine APIs, using the same interface you use when accessing
the service locally.
An example Python script:
---
from google.appengine.ext import db
from google.appengine.ext.remote_api import remote_api_stub
from myapp import models
import getpass
def auth_func():
return (raw_input('Username:'), getpass.getpass('Password:'))
remote_api_stub.ConfigureRemoteApi(None, '/_ah/remote_api', auth_func,
'my-app.appspot.com')
# Now you can access the remote datastore just as if your code was running on
# App Engine!
houses = models.House.all().fetch(100)
for a_house in q:
a_house.doors += 1
db.put(houses)
---
A few caveats:
- Where possible, avoid iterating over queries. Fetching as many results as you
will need is faster and more efficient. If you don't know how many results
you need, or you need 'all of them', iterating is fine.
- Likewise, it's a good idea to put entities in batches. Instead of calling put
for each individual entity, accumulate them and put them in batches using
db.put(), if you can.
- Requests and responses are still limited to 1MB each, so if you have large
entities or try and fetch or put many of them at once, your requests may fail.
"""
import google
import os
import pickle
import random
import sys
import thread
import threading
import yaml
import hashlib
if os.environ.get('APPENGINE_RUNTIME') == 'python27':
from google.appengine.api import apiproxy_rpc
from google.appengine.api import apiproxy_stub_map
from google.appengine.datastore import datastore_pb
from google.appengine.ext.remote_api import remote_api_pb
from google.appengine.ext.remote_api import remote_api_services
from google.appengine.runtime import apiproxy_errors
else:
from google.appengine.api import apiproxy_rpc
from google.appengine.api import apiproxy_stub_map
from google.appengine.datastore import datastore_pb
from google.appengine.ext.remote_api import remote_api_pb
from google.appengine.ext.remote_api import remote_api_services
from google.appengine.runtime import apiproxy_errors
from google.appengine.tools import appengine_rpc
_REQUEST_ID_HEADER = 'HTTP_X_APPENGINE_REQUEST_ID'
class Error(Exception):
"""Base class for exceptions in this module."""
class ConfigurationError(Error):
"""Exception for configuration errors."""
class UnknownJavaServerError(Error):
"""Exception for exceptions returned from a Java remote_api handler."""
def GetUserAgent():
"""Determines the value of the 'User-agent' header to use for HTTP requests.
Returns:
String containing the 'user-agent' header value, which includes the SDK
version, the platform information, and the version of Python;
e.g., "remote_api/1.0.1 Darwin/9.2.0 Python/2.5.2".
"""
product_tokens = []
product_tokens.append("Google-remote_api/1.0")
product_tokens.append(appengine_rpc.GetPlatformToken())
python_version = ".".join(str(i) for i in sys.version_info)
product_tokens.append("Python/%s" % python_version)
return " ".join(product_tokens)
def GetSourceName():
return "Google-remote_api-1.0"
def HashEntity(entity):
"""Return a very-likely-unique hash of an entity."""
return hashlib.sha1(entity.Encode()).digest()
class TransactionData(object):
"""Encapsulates data about an individual transaction."""
def __init__(self, thread_id, is_xg):
self.thread_id = thread_id
self.preconditions = {}
self.entities = {}
self.is_xg = is_xg
class RemoteStub(object):
"""A stub for calling services on a remote server over HTTP.
You can use this to stub out any service that the remote server supports.
"""
_local = threading.local()
def __init__(self, server, path, _test_stub_map=None):
"""Constructs a new RemoteStub that communicates with the specified server.
Args:
server: An instance of a subclass of
google.appengine.tools.appengine_rpc.AbstractRpcServer.
path: The path to the handler this stub should send requests to.
"""
self._server = server
self._path = path
self._test_stub_map = _test_stub_map
def _PreHookHandler(self, service, call, request, response):
pass
def _PostHookHandler(self, service, call, request, response):
pass
def MakeSyncCall(self, service, call, request, response):
self._PreHookHandler(service, call, request, response)
try:
test_stub = self._test_stub_map and self._test_stub_map.GetStub(service)
if test_stub:
test_stub.MakeSyncCall(service, call, request, response)
else:
self._MakeRealSyncCall(service, call, request, response)
finally:
self._PostHookHandler(service, call, request, response)
@classmethod
def _GetRequestId(cls):
"""Returns the id of the request associated with the current thread."""
return cls._local.request_id
@classmethod
def _SetRequestId(cls, request_id):
"""Set the id of the request associated with the current thread."""
cls._local.request_id = request_id
def _MakeRealSyncCall(self, service, call, request, response):
request_pb = remote_api_pb.Request()
request_pb.set_service_name(service)
request_pb.set_method(call)
request_pb.set_request(request.Encode())
if hasattr(self._local, 'request_id'):
request_pb.set_request_id(self._local.request_id)
response_pb = remote_api_pb.Response()
encoded_request = request_pb.Encode()
encoded_response = self._server.Send(self._path, encoded_request)
response_pb.ParseFromString(encoded_response)
if response_pb.has_application_error():
error_pb = response_pb.application_error()
raise apiproxy_errors.ApplicationError(error_pb.code(),
error_pb.detail())
elif response_pb.has_exception():
raise pickle.loads(response_pb.exception())
elif response_pb.has_java_exception():
raise UnknownJavaServerError("An unknown error has occured in the "
"Java remote_api handler for this call.")
else:
response.ParseFromString(response_pb.response())
def CreateRPC(self):
return apiproxy_rpc.RPC(stub=self)
class RemoteDatastoreStub(RemoteStub):
"""A specialised stub for accessing the App Engine datastore remotely.
A specialised stub is required because there are some datastore operations
that preserve state between calls. This stub makes queries possible.
Transactions on the remote datastore are unfortunately still impossible.
"""
def __init__(self, server, path, default_result_count=20,
_test_stub_map=None):
"""Constructor.
Args:
server: The server name to connect to.
path: The URI path on the server.
default_result_count: The number of items to fetch, by default, in a
datastore Query or Next operation. This affects the batch size of
query iterators.
"""
super(RemoteDatastoreStub, self).__init__(server, path, _test_stub_map)
self.default_result_count = default_result_count
self.__queries = {}
self.__transactions = {}
self.__next_local_cursor = 1
self.__local_cursor_lock = threading.Lock()
self.__next_local_tx = 1
self.__local_tx_lock = threading.Lock()
def MakeSyncCall(self, service, call, request, response):
assert service == 'datastore_v3'
explanation = []
assert request.IsInitialized(explanation), explanation
handler = getattr(self, '_Dynamic_' + call, None)
if handler:
handler(request, response)
else:
super(RemoteDatastoreStub, self).MakeSyncCall(service, call, request,
response)
assert response.IsInitialized(explanation), explanation
def _Dynamic_RunQuery(self, query, query_result, cursor_id = None):
if query.has_transaction():
txdata = self.__transactions[query.transaction().handle()]
tx_result = remote_api_pb.TransactionQueryResult()
super(RemoteDatastoreStub, self).MakeSyncCall(
'remote_datastore', 'TransactionQuery', query, tx_result)
query_result.CopyFrom(tx_result.result())
eg_key = tx_result.entity_group_key()
encoded_eg_key = eg_key.Encode()
eg_hash = None
if tx_result.has_entity_group():
eg_hash = HashEntity(tx_result.entity_group())
old_key, old_hash = txdata.preconditions.get(encoded_eg_key, (None, None))
if old_key is None:
txdata.preconditions[encoded_eg_key] = (eg_key, eg_hash)
elif old_hash != eg_hash:
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.CONCURRENT_TRANSACTION,
'Transaction precondition failed.')
else:
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'RunQuery', query, query_result)
if cursor_id is None:
self.__local_cursor_lock.acquire()
try:
cursor_id = self.__next_local_cursor
self.__next_local_cursor += 1
finally:
self.__local_cursor_lock.release()
if query_result.more_results():
query.set_offset(query.offset() + query_result.result_size())
if query.has_limit():
query.set_limit(query.limit() - query_result.result_size())
self.__queries[cursor_id] = query
else:
self.__queries[cursor_id] = None
query_result.mutable_cursor().set_cursor(cursor_id)
def _Dynamic_Next(self, next_request, query_result):
assert next_request.offset() == 0
cursor_id = next_request.cursor().cursor()
if cursor_id not in self.__queries:
raise apiproxy_errors.ApplicationError(datastore_pb.Error.BAD_REQUEST,
'Cursor %d not found' % cursor_id)
query = self.__queries[cursor_id]
if query is None:
query_result.set_more_results(False)
return
else:
if next_request.has_count():
query.set_count(next_request.count())
else:
query.clear_count()
self._Dynamic_RunQuery(query, query_result, cursor_id)
query_result.set_skipped_results(0)
def _Dynamic_Get(self, get_request, get_response):
txid = None
if get_request.has_transaction():
txid = get_request.transaction().handle()
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
keys = [(k, k.Encode()) for k in get_request.key_list()]
new_request = datastore_pb.GetRequest()
for key, enckey in keys:
if enckey not in txdata.entities:
new_request.add_key().CopyFrom(key)
else:
new_request = get_request
if new_request.key_size() > 0:
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'Get', new_request, get_response)
if txid is not None:
newkeys = new_request.key_list()
entities = get_response.entity_list()
for key, entity in zip(newkeys, entities):
entity_hash = None
if entity.has_entity():
entity_hash = HashEntity(entity.entity())
txdata.preconditions[key.Encode()] = (key, entity_hash)
new_response = datastore_pb.GetResponse()
it = iter(get_response.entity_list())
for key, enckey in keys:
if enckey in txdata.entities:
cached_entity = txdata.entities[enckey][1]
if cached_entity:
new_response.add_entity().mutable_entity().CopyFrom(cached_entity)
else:
new_response.add_entity()
else:
new_entity = it.next()
if new_entity.has_entity():
assert new_entity.entity().key() == key
new_response.add_entity().CopyFrom(new_entity)
else:
new_response.add_entity()
get_response.CopyFrom(new_response)
def _Dynamic_Put(self, put_request, put_response):
if put_request.has_transaction():
entities = put_request.entity_list()
requires_id = lambda x: x.id() == 0 and not x.has_name()
new_ents = [e for e in entities
if requires_id(e.key().path().element_list()[-1])]
id_request = datastore_pb.PutRequest()
txid = put_request.transaction().handle()
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
if new_ents:
for ent in new_ents:
e = id_request.add_entity()
e.mutable_key().CopyFrom(ent.key())
e.mutable_entity_group()
id_response = datastore_pb.PutResponse()
if txdata.is_xg:
rpc_name = 'GetIDsXG'
else:
rpc_name = 'GetIDs'
super(RemoteDatastoreStub, self).MakeSyncCall(
'remote_datastore', rpc_name, id_request, id_response)
assert id_request.entity_size() == id_response.key_size()
for key, ent in zip(id_response.key_list(), new_ents):
ent.mutable_key().CopyFrom(key)
ent.mutable_entity_group().add_element().CopyFrom(
key.path().element(0))
for entity in entities:
txdata.entities[entity.key().Encode()] = (entity.key(), entity)
put_response.add_key().CopyFrom(entity.key())
else:
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'Put', put_request, put_response)
def _Dynamic_Delete(self, delete_request, response):
if delete_request.has_transaction():
txid = delete_request.transaction().handle()
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
for key in delete_request.key_list():
txdata.entities[key.Encode()] = (key, None)
else:
super(RemoteDatastoreStub, self).MakeSyncCall(
'datastore_v3', 'Delete', delete_request, response)
def _Dynamic_BeginTransaction(self, request, transaction):
self.__local_tx_lock.acquire()
try:
txid = self.__next_local_tx
self.__transactions[txid] = TransactionData(thread.get_ident(),
request.allow_multiple_eg())
self.__next_local_tx += 1
finally:
self.__local_tx_lock.release()
transaction.set_handle(txid)
transaction.set_app(request.app())
def _Dynamic_Commit(self, transaction, transaction_response):
txid = transaction.handle()
if txid not in self.__transactions:
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.BAD_REQUEST,
'Transaction %d not found.' % (txid,))
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
del self.__transactions[txid]
tx = remote_api_pb.TransactionRequest()
tx.set_allow_multiple_eg(txdata.is_xg)
for key, hash in txdata.preconditions.values():
precond = tx.add_precondition()
precond.mutable_key().CopyFrom(key)
if hash:
precond.set_hash(hash)
puts = tx.mutable_puts()
deletes = tx.mutable_deletes()
for key, entity in txdata.entities.values():
if entity:
puts.add_entity().CopyFrom(entity)
else:
deletes.add_key().CopyFrom(key)
super(RemoteDatastoreStub, self).MakeSyncCall(
'remote_datastore', 'Transaction',
tx, datastore_pb.PutResponse())
def _Dynamic_Rollback(self, transaction, transaction_response):
txid = transaction.handle()
self.__local_tx_lock.acquire()
try:
if txid not in self.__transactions:
raise apiproxy_errors.ApplicationError(
datastore_pb.Error.BAD_REQUEST,
'Transaction %d not found.' % (txid,))
txdata = self.__transactions[txid]
assert (txdata.thread_id ==
thread.get_ident()), "Transactions are single-threaded."
del self.__transactions[txid]
finally:
self.__local_tx_lock.release()
def _Dynamic_CreateIndex(self, index, id_response):
raise apiproxy_errors.CapabilityDisabledError(
'The remote datastore does not support index manipulation.')
def _Dynamic_UpdateIndex(self, index, void):
raise apiproxy_errors.CapabilityDisabledError(
'The remote datastore does not support index manipulation.')
def _Dynamic_DeleteIndex(self, index, void):
raise apiproxy_errors.CapabilityDisabledError(
'The remote datastore does not support index manipulation.')
ALL_SERVICES = set(remote_api_services.SERVICE_PB_MAP)
def GetRemoteAppIdFromServer(server, path, remote_token=None):
"""Return the app id from a connection to an existing server.
Args:
server: An appengine_rpc.AbstractRpcServer
path: The path to the remote_api handler for your app
(for example, '/_ah/remote_api').
remote_token: Token to validate that the response was to this request.
Returns:
App ID as reported by the remote server.
Raises:
ConfigurationError: The server returned an invalid response.
"""
if not remote_token:
random.seed()
remote_token = str(random.random())[2:]
remote_token = str(remote_token)
urlargs = {'rtok': remote_token}
response = server.Send(path, payload=None, **urlargs)
if not response.startswith('{'):
raise ConfigurationError(
'Invalid response received from server: %s' % response)
app_info = yaml.load(response)
if not app_info or 'rtok' not in app_info or 'app_id' not in app_info:
raise ConfigurationError('Error parsing app_id lookup response')
if str(app_info['rtok']) != remote_token:
raise ConfigurationError('Token validation failed during app_id lookup. '
'(sent %s, got %s)' % (repr(remote_token),
repr(app_info['rtok'])))
return app_info['app_id']
def ConfigureRemoteApiFromServer(server, path, app_id, services=None,
default_auth_domain=None,
use_remote_datastore=True):
"""Does necessary setup to allow easy remote access to App Engine APIs.
Args:
server: An AbstractRpcServer
path: The path to the remote_api handler for your app
(for example, '/_ah/remote_api').
app_id: The app_id of your app, as declared in app.yaml.
services: A list of services to set up stubs for. If specified, only those
services are configured; by default all supported services are configured.
default_auth_domain: The authentication domain to use by default.
use_remote_datastore: Whether to use RemoteDatastoreStub instead of passing
through datastore requests. RemoteDatastoreStub batches transactional
datastore requests since, in production, datastore requires are scoped to
a single request.
Raises:
urllib2.HTTPError: if app_id is not provided and there is an error while
retrieving it.
ConfigurationError: if there is a error configuring the Remote API.
"""
if services is None:
services = set(ALL_SERVICES)
else:
services = set(services)
unsupported = services.difference(ALL_SERVICES)
if unsupported:
raise ConfigurationError('Unsupported service(s): %s'
% (', '.join(unsupported),))
os.environ['APPLICATION_ID'] = app_id
os.environ.setdefault('AUTH_DOMAIN', default_auth_domain or 'gmail.com')
apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap()
if 'datastore_v3' in services and use_remote_datastore:
services.remove('datastore_v3')
datastore_stub = RemoteDatastoreStub(server, path)
apiproxy_stub_map.apiproxy.RegisterStub('datastore_v3', datastore_stub)
stub = RemoteStub(server, path)
for service in services:
apiproxy_stub_map.apiproxy.RegisterStub(service, stub)
def GetRemoteAppId(servername,
path,
auth_func,
rpc_server_factory=appengine_rpc.HttpRpcServer,
rtok=None,
secure=False,
save_cookies=False):
"""Get the remote appid as reported at servername/path.
This will also return an AbstractRpcServer server, which can be used with
ConfigureRemoteApiFromServer.
Args:
servername: The hostname your app is deployed on.
path: The path to the remote_api handler for your app
(for example, '/_ah/remote_api').
auth_func: A function that takes no arguments and returns a
(username, password) tuple. This will be called if your application
requires authentication to access the remote_api handler (it should!)
and you do not already have a valid auth cookie.
<app_id>.appspot.com.
rpc_server_factory: A factory to construct the rpc server for the datastore.
rtok: The validation token to sent with app_id lookups. If None, a random
token is used.
secure: Use SSL when communicating with the server.
save_cookies: Forwarded to rpc_server_factory function.
Returns:
(app_id, server): The application ID and an AbstractRpcServer.
"""
server = rpc_server_factory(servername, auth_func, GetUserAgent(),
GetSourceName(), save_cookies=save_cookies,
debug_data=False, secure=secure)
app_id = GetRemoteAppIdFromServer(server, path, rtok)
return app_id, server
_OAUTH_SCOPES = [
'https://www.googleapis.com/auth/appengine.apis',
'https://www.googleapis.com/auth/userinfo.email',
]
def _ConfigureRemoteApiWithKeyFile(servername,
path,
service_account,
key_file_path):
"""Does necessary setup to allow easy remote access to App Engine APIs.
This function uses OAuth2 with a credential derived from service_account and
key_file_path to communicate with App Engine APIs.
Use of this method requires an encryption library to be installed.
Args:
servername: The hostname your app is deployed on (typically,
<app_id>.appspot.com).
path: The path to the remote_api handler for your app
(for example, '/_ah/remote_api').
service_account: The email address of the service account to use for
making OAuth requests.
key_file_path: The path to a .p12 file containing the private key for
service_account.
Returns:
server, a server which may be useful for calling the application directly.
Raises:
urllib2.HTTPError: if app_id is not provided and there is an error while
retrieving it.
ConfigurationError: if there is a error configuring the DatstoreFileStub.
ImportError: if the oauth2client module is not available or an appropriate
encryption library cannot not be found.
IOError: if key_file_path does not exist or cannot be read.
"""
try:
import oauth2client.client
except ImportError, e:
raise ImportError('Use of a key file to access the Remote API '
'requires the oauth2client module: %s' % e)
if not oauth2client.client.HAS_CRYPTO:
raise ImportError('Use of a key file to access the Remote API '
'requires an encryption library. Please install '
'either PyOpenSSL or PyCrypto 2.6 or later.')
with open(key_file_path, 'rb') as key_file:
key = key_file.read()
credentials = oauth2client.client.SignedJwtAssertionCredentials(
service_account,
key,
_OAUTH_SCOPES)
return _ConfigureRemoteApiWithOAuthCredentials(servername,
path,
credentials)
def _ConfigureRemoteApiWithComputeEngineCredential(servername,
path):
"""Does necessary setup to allow easy remote access to App Engine APIs.
This function uses OAuth2 with a credential from the Compute Engine metadata
server to communicate with App Engine APIs.
Args:
servername: The hostname your app is deployed on (typically,
<app_id>.appspot.com).
path: The path to the remote_api handler for your app
(for example, '/_ah/remote_api').
Returns:
server, a server which may be useful for calling the application directly.
Raises:
urllib2.HTTPError: if app_id is not provided and there is an error while
retrieving it.
ConfigurationError: if there is a error configuring the DatstoreFileStub.
ImportError: if the oauth2client or httplib2 module is not available.
"""
try:
import httplib2
import oauth2client
except ImportError, e:
raise ImportError('Use of Compute Engine credentials requires the '
'oauth2client and httplib2 modules: %s' % e)
credentials = oauth2client.gce.AppAssertionCredentials(_OAUTH_SCOPES)
http = httplib2.Http()
credentials.authorize(http)
credentials.refresh(http)
return _ConfigureRemoteApiWithOAuthCredentials(servername,
path,
credentials)
def _ConfigureRemoteApiWithOAuthCredentials(servername,
path,
credentials):
"""Does necessary setup to allow easy remote access to App Engine APIs.
Args:
servername: The hostname your app is deployed on (typically,
<app_id>.appspot.com).
path: The path to the remote_api handler for your app
(for example, '/_ah/remote_api').
credentials: An oauth2client.OAuth2Credentials object.
Returns:
server, a server which may be useful for calling the application directly.
Raises:
urllib2.HTTPError: if app_id is not provided and there is an error while
retrieving it.
ConfigurationError: if there is a error configuring the DatstoreFileStub.
ImportError: if the appengine_rpc_httplib2 module is not available.
"""
try:
from google.appengine.tools import appengine_rpc_httplib2
except ImportError, e:
raise ImportError('Use of OAuth credentials requires the '
'appengine_rpc_httplib2 module. %s' % e)
if not servername:
raise ConfigurationError('servername required')
oauth2_parameters = (
appengine_rpc_httplib2.HttpRpcServerOAuth2.OAuth2Parameters(
access_token=None,
client_id=None,
client_secret=None,
scope=None,
refresh_token=None,
credential_file=None,
credentials=credentials))
return ConfigureRemoteApi(
app_id=None,
path=path,
auth_func=oauth2_parameters,
servername=servername,
rpc_server_factory=appengine_rpc_httplib2.HttpRpcServerOAuth2)
def ConfigureRemoteApi(app_id,
path,
auth_func,
servername=None,
rpc_server_factory=appengine_rpc.HttpRpcServer,
rtok=None,
secure=False,
services=None,
default_auth_domain=None,
save_cookies=False,
use_remote_datastore=True):
"""Does necessary setup to allow easy remote access to App Engine APIs.
Either servername must be provided or app_id must not be None. If app_id
is None and a servername is provided, this function will send a request
to the server to retrieve the app_id.
Note that if the app_id is specified, the internal appid must be used;
this may include a partition and a domain. It is often easier to let
remote_api_stub retrieve the app_id automatically.
Args:
app_id: The app_id of your app, as declared in app.yaml, or None.
path: The path to the remote_api handler for your app
(for example, '/_ah/remote_api').
auth_func: A function that takes no arguments and returns a
(username, password) tuple. This will be called if your application
requires authentication to access the remote_api handler (it should!)
and you do not already have a valid auth cookie.
servername: The hostname your app is deployed on. Defaults to
<app_id>.appspot.com.
rpc_server_factory: A factory to construct the rpc server for the datastore.
rtok: The validation token to sent with app_id lookups. If None, a random
token is used.
secure: Use SSL when communicating with the server.
services: A list of services to set up stubs for. If specified, only those
services are configured; by default all supported services are configured.
default_auth_domain: The authentication domain to use by default.
save_cookies: Forwarded to rpc_server_factory function.
use_remote_datastore: Whether to use RemoteDatastoreStub instead of passing
through datastore requests. RemoteDatastoreStub batches transactional
datastore requests since, in production, datastore requires are scoped to
a single request.
Returns:
server, the server created by rpc_server_factory, which may be useful for
calling the application directly.
Raises:
urllib2.HTTPError: if app_id is not provided and there is an error while
retrieving it.
ConfigurationError: if there is a error configuring the DatstoreFileStub.
"""
if not servername and not app_id:
raise ConfigurationError('app_id or servername required')
if not servername:
servername = '%s.appspot.com' % (app_id,)
server = rpc_server_factory(servername, auth_func, GetUserAgent(),
GetSourceName(), save_cookies=save_cookies,
debug_data=False, secure=secure)
if not app_id:
app_id = GetRemoteAppIdFromServer(server, path, rtok)
ConfigureRemoteApiFromServer(server, path, app_id, services,
default_auth_domain, use_remote_datastore)
return server
def MaybeInvokeAuthentication():
"""Sends an empty request through to the configured end-point.
If authentication is necessary, this will cause the rpc_server to invoke
interactive authentication.
"""
datastore_stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
if isinstance(datastore_stub, RemoteStub):
datastore_stub._server.Send(datastore_stub._path, payload=None)
else:
raise ConfigurationError('remote_api is not configured.')
ConfigureRemoteDatastore = ConfigureRemoteApi
| mit |
chrismeyersfsu/webapp-bills | form.app.engine.py | 1 | 19113 | import cgi
import urllib, urllib2, Cookie
import cookielib
import re
import logging
from urlparse import urlparse
from django.utils import simplejson
from google.appengine.api import users
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.api import urlfetch
################################################################################
class URLOpener:
def __init__(self):
self.cookie = Cookie.SimpleCookie()
self.jSessionId = ""
def open(self, url, data = None):
if data is None:
method = urlfetch.GET
else:
method = urlfetch.POST
while url is not None:
try:
o = urlparse(url)
path = o.path
str = "Getting url ["+url+"] cookie ["+self._makeCookieHeader(path)+"]"
if data != None:
str += " data ["+data+"]"
logging.debug(str)
response = urlfetch.fetch(url=url,
payload=data,
method=method,
headers=self._getHeaders(path),
allow_truncated=False,
follow_redirects=False,
deadline=10
)
data = None # Next request will be a get, so no need to send the data again.
method = urlfetch.GET
cookieStr = response.headers.get('Set-cookie', '')
if self.jSessionId == "":
if cookieStr.find("JSESSIONID") != -1:
pattern = re.compile('JSESSIONID=(.*?);')
match = pattern.search(cookieStr)
if match != None:
self.jSessionId = match.group(1)
logging.debug("Received cookies: ["+cookieStr + "]\n")
self.cookie.load(response.headers.get('Set-cookie', '')) # Load the cookies from the response
# Change cookie to the gathered JSESSIONID
url = response.headers.get('location')
except urllib2.URLError, e:
logging.error("Generic error")
self.response.out.write("Error")
handleError(e)
#except DownloadError:
# logging.error("Download error")
#except:
# logging.error("Other error")
return response
def _getHeaders(self, path):
headers = {
# 'Content-Type': 'text/html',
'User-agent': "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12",
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Keep-Alive': '300',
'Connection': 'keep-alive',
'Accept-Language': 'en-us,en;q=0.5',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Cookie' : self._makeCookieHeader(path)
}
return headers
def _makeCookieHeader(self, path):
cookieHeader = ""
logStr = ""
for k,v in self.cookie.items():
# if v.key == "JSESSIONID":
# if self.jSessionId != "":
# logging.debug("\n==== Replaced jsession ====\n")
# v.value = self.jSessionId
if 'path' in v:
if path.find(v['path'], 0, len(v['path'])) != -1:
logging.debug("\n==== "+v['path']+" ====\n")
cookieHeader += "%s=%s; " % (v.key, v.value)
elif v["path"] == "/,":
logging.debug("\n==== "+v['path']+" ====\n")
cookieHeader += "%s=%s; " % (v.key, v.value)
else:
logging.debug("\n==== Not Including "+v['path']+" ====\n")
else:
cookieHeader += "%s=%s; " % (v.key, v.value)
# return self.cookie.output("")
return cookieHeader
################################################################################
class BooksParser:
result = ""
results = ""
posList = ""
posEndList = ""
data = ""
count = 0
regex = {
'requirement': "<tr class=\"book course-(?P<requirement>[\w ]+)\">",
'image': "<td class=\"book-cover\"><a href=\"(?P<image>.*?)\"",
'title': '<span class=\"book-title\">(?P<title>.*?)</span>',
'author': '<span class=\"book-meta book-author\">(?P<author>.*?)</span>',
'isbn': '<span class=\"isbn\">(?P<isbn>\d+)</span>',
'copyright': '<span class=\"book-meta book-copyright\">(?P<copyright>.*?)</span>',
'publisher': '<span class=\"book-meta book-publisher\">(?P<publisher>.*?)</span>',
'edition': '<span class=\"book-meta book-edition\">(?P<edition>.*?)</span>',
'binding': '<span class=\"book-meta book-binding\">(?P<binding>.*?)</span>',
'priceNew': "<input type=\"hidden\" name=\"product-new-price-\d+\" id=\"product-new-price-\d+\" value=\"(?P<priceNew>\d+)\" />",
'priceUsed': "<input type=\"hidden\" name=\"product-used-price-\d+\" id=\"product-used-price-\d+\" value=\"(?P<priceUsed>\d+)\" />",
'priceNewRent': "<input type=\"hidden\" name=\"product-new-rental-price-\d+\" id=\"product-new-rental-price-\d+\" value=\"(?P<priceNewRent>\d+)\" />",
'priceUsedRent': "<input type=\"hidden\" name=\"product-used-rental-price-\d+\" id=\"product-used-rental-price-\d+\" value=\"(?P<priceUsedRent>\d+)\" />",
'availNew': "<td class=\"price\"><label for=\"radio-sku-new_\d+\">(?P<availNew>.*?)</label>",
'availUsed': "<td class=\"price\"><label for=\"radio-sku-used_\d+\">(?P<availUsed>.*?)</label>",
'availNewRent': "<td class=\"price\"><label for=\"radio-radio-sku-new-rental_\d+\">(?P<availNewRent>.*?)</label>",
'availUsedRent': "<td class=\"price\"><label for=\"radio-radio-sku-used-rental_\d+\">(?P<availUsedRent>.*?)</label>"
}
regexKeys = [ 'requirement', 'image', 'title', 'author', 'isbn', 'copyright', 'publisher', 'edition', 'binding', 'priceNew', 'priceUsed', 'priceNewRent', 'priceUsedRent', 'priceUsed', 'availNew', 'availUsed', 'availNewRent', 'availUsedRent'];
def __init__(self):
self.results = list()
self.posList = list()
self.posEndList = list()
self.data = ""
self.count = 0
self.result = {
'requirement': "",
'image': "",
'priceNew': "",
'priceUsed': "",
'priceNewRent': "",
'priceUsedRent': "",
'availNew': "",
'availUsed': "",
'availNewRent': "",
'availUsedRent': "",
'isbn': ""
}
def setData(self, data):
self.data = data;
def setup(self):
# Cut down the size of data
# Remove the recommended products
# TO-DO: support Recommended Products
endOffset = self.data.find("Recommended Products")
if endOffset != -1:
self.data = self.data[0:endOffset]
count = 0
# For each item (book) all regex strings may not be found
# We take care to associate meta-data with the correct book
# Assume that the first regex is always found, search the whole
# data books for all occurences of the first regex and note the offsets
# Then, only search between these books
k = self.regexKeys[0]
matchIter = re.finditer(self.regex[k], self.data)
startPrev = 0
endPrev = 0
flagFirst = True
for match in matchIter:
start = match.start(0)
end = match.end(0)
if flagFirst == True:
flagFirst = False
else:
self.posList.append(startPrev)
self.posEndList.append(start)
startPrev = start
endPrev = end
count += 1
# Add the final entry
self.posList.append(start)
self.posEndList.append(len(self.data))
self.posList.reverse()
self.posEndList.reverse()
def next(self):
if len(self.posList) == 0:
return False
pos = self.posList.pop()
posEnd = self.posEndList.pop()
for k in self.regexKeys:
pattern = re.compile(self.regex[k])
match = pattern.search(self.data, pos, posEnd)
if match == None:
self.result[k] = "NOT_FOUND" # fill result with nothing
else:
self.result[k] = match.group(1)
# self.results.append(self.result)
self.results.append(dict(self.result))
# self.results.append("%d %s" % (len(self.posList), "World"))
return True
class CampusTerm(webapp.RequestHandler):
url = "http://www.billsbookstore.com/"
regexSelect = '<select name=\"selTerm\" id=\"fTerm\" class=\"box\" title=\"Select a campus term\">(?P<select>.*?)</select>'
regexOption = '<option value=\"(\d+)\|(\d+)\">(.*?)</option>'
def get(self):
urlFull = self.url
try:
result = urllib2.urlopen(urlFull)
data = result.read()
pattern = re.compile(self.regexSelect)
match = pattern.search(data)
datatmp = match.group(1)
pattern = re.compile(self.regexOption)
pos = 0;
results = list()
while True:
match = pattern.search(datatmp, pos)
if match == None:
break;
results.append({ 'campustermId' : match.group(1)+'|'+match.group(2), 'campusId' : match.group(1), 'termId' : match.group(2), 'campusName' : match.group(3) })
pos = match.end(0)
self.response.out.write(simplejson.dumps(results))
except urllib2.URLError, e:
handleError(e)
class Department(webapp.RequestHandler):
url = "http://www.billsbookstore.com/textbooks_xml.asp?control=campus"
regexDept = '<department id=\"(\d+)\" abrev=\"(.*?)\" name=\"(.*?)\" />'
def get(self):
dept = self.request.get('campus')
term = self.request.get('term')
urlFull = self.url+'&campus='+dept+'&term='+term
try:
result = urllib2.urlopen(urlFull)
data = result.read()
pattern = re.compile(self.regexDept)
pos = 0;
results = list()
while True:
match = pattern.search(data, pos)
if match == None:
break;
results.append({ 'deptId' : match.group(1), 'deptAbrev': match.group(2), 'deptName' : match.group(3) })
pos = match.end(0)
self.response.out.write(simplejson.dumps(results))
except urllib2.URLError, e:
handleError(e)
class Course(webapp.RequestHandler):
url = "http://www.billsbookstore.com/textbooks_xml.asp?control=department"
regexCourse = '<course id=\"(\d+)\" name=\"(\d+)\s+\" />'
def get(self):
dept = self.request.get('dept')
term = self.request.get('term')
urlFull = self.url+'&dept='+dept+'&term='+term
try:
result = urllib2.urlopen(urlFull)
data = result.read()
pattern = re.compile(self.regexCourse)
pos = 0;
results = list()
while True:
match = pattern.search(data, pos)
if match == None:
break;
results.append({ 'courseId' : match.group(1), 'courseNumber' : match.group(2) })
pos = match.end(0)
self.response.out.write(simplejson.dumps(results))
except urllib2.URLError, e:
handleError(e)
class Section(webapp.RequestHandler):
url = "http://www.billsbookstore.com/textbooks_xml.asp?control=course"
regexSection = '<section id=\"(\d+)\" name=\"(.*?)\" instructor=\"(.*?)\" />'
def get(self):
dept = self.request.get('course')
term = self.request.get('term')
urlFull = self.url+'&course='+dept+'&term='+term
try:
result = urllib2.urlopen(urlFull)
data = result.read()
pattern = re.compile(self.regexSection)
pos = 0;
results = list()
while True:
match = pattern.search(data, pos)
if match == None:
break;
results.append({ 'sectionId' : match.group(1), 'sectionName' : match.group(2), 'instructor' : match.group(3) })
pos = match.end(0)
self.response.out.write(simplejson.dumps(results))
except urllib2.URLError, e:
handleError(e)
class Books(webapp.RequestHandler):
url = "http://www.billsbookstore.com/textbooks_xml.asp?control=section§ion="
def get(self):
section = self.request.get('id');
# section = "53867" # Single book
# section = "53857" # Many books, and a PRS clicker
# section = "55512" # Multiple books, single section
urlFull = self.url+section
try:
sp = BooksParser()
result = urllib2.urlopen(urlFull)
data = result.read()
sp.setData(data)
sp.setup()
while sp.next():
True
# for k in sp.regexKeys:
# self.response.out.write(k + "=" + sp.result[k] + "<br>\n")
self.response.out.write(simplejson.dumps(sp.results))
except urllib2.URLError, e:
handleError(e)
class BlackBoard(webapp.RequestHandler):
urlLogin = 'https://bb5.fsu.edu/cas/'
urlSession = 'https://bb5.fsu.edu/cas/login?loginurl=https%3A%2F%2Fapps.oti.fsu.edu%2FSecureLogin%2FLogin&service=https%3A%2F%2Fapps.oti.fsu.edu%2FSecureLogin%2FAuthenticator%3Fnoheader%3Dtrue%26nofooter%3Dtrue'
urlSecureApps = 'https://apps.oti.fsu.edu/SecureLogin/servlet/PortalHandler'
urlSchedule = 'https://apps.oti.fsu.edu/StudentClassSchedule/Schedule'
def get(self):
username = self.request.get('username');
password = self.request.get('password');
urlhandler = URLOpener()
urlhandler.cookie.clear()
# Login
try:
post_data = urllib.urlencode({
'username':username,
'password': password,
'service': 'https://campus.fsu.edu/webapps/login/',
'loginurl': 'https://campus.fsu.edu/webapps/login/bb_bb60/logincas.jsp',
'x': 0,
'y': 0
})
result = urlhandler.open(self.urlSession, post_data)
data = result.content
logging.debug(data)
pattern = re.compile('window.location.href="(.*?)"')
match = pattern.search(data)
if match != None:
urlRedirect = match.group(1)
# Complete login process
# by following window.location.href
result = urlhandler.open(urlRedirect)
data = result.content
# logging.debug(data)
except urllib2.URLError, e:
self.response.out.write("Error")
handleError(e)
# Session
# try:
# result = urlhandler.open(self.urlSession)
# data = result.content
# self.response.out.write(data)
# except urllib2.URLError, e:
# self.response.out.write("Error")
# handleError(e)
# Secure apps
try:
# Setup the session
result = urlhandler.open(self.urlSecureApps)
data = result.content
# logging.debug(data)
# Submit accept
post_data = urllib.urlencode({'submit' : 'Accept'})
result = urlhandler.open(self.urlSecureApps, post_data)
data = result.content
# logging.debug(data)
except urllib2.URLError, e:
self.response.out.write("Error")
handleError(e)
# Get schedule
try:
# Grab the list of "secure apps" links
result = urlhandler.open(self.urlSchedule)
data = result.content
# logging.debug(data)
# pattern = re.compile('<a target="" href=".*?SESSIONID=(.*?)\&.*?">My Class Schedule</a>')
# match = pattern.search(data)
# sessionId = ""
# if match != None:
# sessionId = match.group(1)
# urlhandler.cookie.load("JSESSIONID="+sessionId)
# logging.debug("\n\n Using session ["+sessionId+"]\n\n")
# else:
# logging.debug("\n\n\n\n=============================Session not found\n\n\n\n")
# pattern = re.compile('<a target="" href="(.*?)">My Class Schedule</a>')
# match = pattern.search(data)
# if match != None:
# urlRedirect = match.group(1)
# result = urlhandler.open('https://apps.oti.fsu.edu/'+urlRedirect)
# data = result.content
# logging.debug(data)
post_data = urllib.urlencode({
'CALLINGURI' : '/jsp/schedule_index.jsp',
'refer' : 'null',
'YEAR' : 2011,
'TERM' : 9,
'genSched' : 'Generate A Schedule'
})
result = urlhandler.open(self.urlSchedule, post_data)
data = result.content
logging.debug(data)
# self.response.out.write(urlhandler._makeCookieHeader())
# self.response.out.write(data)
# result = urlhandler.open('https://campus.fsu.edu/webapps/login?action=logout')
matchIter = re.finditer('<td class="only" align=".*?">(.*?)</td>', data)
count=0
matchKeys = [ 'alert', 'num', 'course', 'section', 'session', 'sectionId', 'title', 'hours', 'building', 'room', 'days', 'begin', 'end' ]
countMax = len(matchKeys)
schedule = []
klass = {}
# TODO: alert element will have some extra html in it (<span></span>)
for match in matchIter:
klass[matchKeys[count]] = match.group(1)
count += 1
if count == countMax:
schedule.append(klass)
count = 0
klass = {}
self.response.out.write(simplejson.dumps(schedule))
except urllib2.URLError, e:
self.response.out.write("Error")
handleError(e)
application = webapp.WSGIApplication([('/campusterm', CampusTerm),
('/dept', Department),
('/course', Course),
('/section', Section),
('/books', Books),
('/bb', BlackBoard)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| gpl-3.0 |
barykaed/Pelican-Test | fsp_env/Lib/site-packages/pip/_vendor/distlib/index.py | 571 | 20976 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Vinay Sajip.
# Licensed to the Python Software Foundation under a contributor agreement.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import hashlib
import logging
import os
import shutil
import subprocess
import tempfile
try:
from threading import Thread
except ImportError:
from dummy_threading import Thread
from . import DistlibException
from .compat import (HTTPBasicAuthHandler, Request, HTTPPasswordMgr,
urlparse, build_opener, string_types)
from .util import cached_property, zip_dir, ServerProxy
logger = logging.getLogger(__name__)
DEFAULT_INDEX = 'https://pypi.python.org/pypi'
DEFAULT_REALM = 'pypi'
class PackageIndex(object):
"""
This class represents a package index compatible with PyPI, the Python
Package Index.
"""
boundary = b'----------ThIs_Is_tHe_distlib_index_bouNdaRY_$'
def __init__(self, url=None):
"""
Initialise an instance.
:param url: The URL of the index. If not specified, the URL for PyPI is
used.
"""
self.url = url or DEFAULT_INDEX
self.read_configuration()
scheme, netloc, path, params, query, frag = urlparse(self.url)
if params or query or frag or scheme not in ('http', 'https'):
raise DistlibException('invalid repository: %s' % self.url)
self.password_handler = None
self.ssl_verifier = None
self.gpg = None
self.gpg_home = None
self.rpc_proxy = None
with open(os.devnull, 'w') as sink:
for s in ('gpg2', 'gpg'):
try:
rc = subprocess.check_call([s, '--version'], stdout=sink,
stderr=sink)
if rc == 0:
self.gpg = s
break
except OSError:
pass
def _get_pypirc_command(self):
"""
Get the distutils command for interacting with PyPI configurations.
:return: the command.
"""
from distutils.core import Distribution
from distutils.config import PyPIRCCommand
d = Distribution()
return PyPIRCCommand(d)
def read_configuration(self):
"""
Read the PyPI access configuration as supported by distutils, getting
PyPI to do the acutal work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration.
"""
# get distutils to do the work
c = self._get_pypirc_command()
c.repository = self.url
cfg = c._read_pypirc()
self.username = cfg.get('username')
self.password = cfg.get('password')
self.realm = cfg.get('realm', 'pypi')
self.url = cfg.get('repository', self.url)
def save_configuration(self):
"""
Save the PyPI access configuration. You must have set ``username`` and
``password`` attributes before calling this method.
Again, distutils is used to do the actual work.
"""
self.check_credentials()
# get distutils to do the work
c = self._get_pypirc_command()
c._store_pypirc(self.username, self.password)
def check_credentials(self):
"""
Check that ``username`` and ``password`` have been set, and raise an
exception if not.
"""
if self.username is None or self.password is None:
raise DistlibException('username and password must be set')
pm = HTTPPasswordMgr()
_, netloc, _, _, _, _ = urlparse(self.url)
pm.add_password(self.realm, netloc, self.username, self.password)
self.password_handler = HTTPBasicAuthHandler(pm)
def register(self, metadata):
"""
Register a distribution on PyPI, using the provided metadata.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the distribution to be
registered.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
metadata.validate()
d = metadata.todict()
d[':action'] = 'verify'
request = self.encode_request(d.items(), [])
response = self.send_request(request)
d[':action'] = 'submit'
request = self.encode_request(d.items(), [])
return self.send_request(request)
def _reader(self, name, stream, outbuf):
"""
Thread runner for reading lines of from a subprocess into a buffer.
:param name: The logical name of the stream (used for logging only).
:param stream: The stream to read from. This will typically a pipe
connected to the output stream of a subprocess.
:param outbuf: The list to append the read lines to.
"""
while True:
s = stream.readline()
if not s:
break
s = s.decode('utf-8').rstrip()
outbuf.append(s)
logger.debug('%s: %s' % (name, s))
stream.close()
def get_sign_command(self, filename, signer, sign_password,
keystore=None):
"""
Return a suitable command for signing a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The signing command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
if sign_password is not None:
cmd.extend(['--batch', '--passphrase-fd', '0'])
td = tempfile.mkdtemp()
sf = os.path.join(td, os.path.basename(filename) + '.asc')
cmd.extend(['--detach-sign', '--armor', '--local-user',
signer, '--output', sf, filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd, sf
def run_command(self, cmd, input_data=None):
"""
Run a command in a child process , passing it any input data specified.
:param cmd: The command to run.
:param input_data: If specified, this must be a byte string containing
data to be sent to the child process.
:return: A tuple consisting of the subprocess' exit code, a list of
lines read from the subprocess' ``stdout``, and a list of
lines read from the subprocess' ``stderr``.
"""
kwargs = {
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE,
}
if input_data is not None:
kwargs['stdin'] = subprocess.PIPE
stdout = []
stderr = []
p = subprocess.Popen(cmd, **kwargs)
# We don't use communicate() here because we may need to
# get clever with interacting with the command
t1 = Thread(target=self._reader, args=('stdout', p.stdout, stdout))
t1.start()
t2 = Thread(target=self._reader, args=('stderr', p.stderr, stderr))
t2.start()
if input_data is not None:
p.stdin.write(input_data)
p.stdin.close()
p.wait()
t1.join()
t2.join()
return p.returncode, stdout, stderr
def sign_file(self, filename, signer, sign_password, keystore=None):
"""
Sign a file.
:param filename: The pathname to the file to be signed.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The absolute pathname of the file where the signature is
stored.
"""
cmd, sig_file = self.get_sign_command(filename, signer, sign_password,
keystore)
rc, stdout, stderr = self.run_command(cmd,
sign_password.encode('utf-8'))
if rc != 0:
raise DistlibException('sign command failed with error '
'code %s' % rc)
return sig_file
def upload_file(self, metadata, filename, signer=None, sign_password=None,
filetype='sdist', pyversion='source', keystore=None):
"""
Upload a release file to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the file to be uploaded.
:param filename: The pathname of the file to be uploaded.
:param signer: The identifier of the signer of the file.
:param sign_password: The passphrase for the signer's
private key used for signing.
:param filetype: The type of the file being uploaded. This is the
distutils command which produced that file, e.g.
``sdist`` or ``bdist_wheel``.
:param pyversion: The version of Python which the release relates
to. For code compatible with any Python, this would
be ``source``, otherwise it would be e.g. ``3.2``.
:param keystore: The path to a directory which contains the keys
used in signing. If not specified, the instance's
``gpg_home`` attribute is used instead.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.exists(filename):
raise DistlibException('not found: %s' % filename)
metadata.validate()
d = metadata.todict()
sig_file = None
if signer:
if not self.gpg:
logger.warning('no signing program available - not signed')
else:
sig_file = self.sign_file(filename, signer, sign_password,
keystore)
with open(filename, 'rb') as f:
file_data = f.read()
md5_digest = hashlib.md5(file_data).hexdigest()
sha256_digest = hashlib.sha256(file_data).hexdigest()
d.update({
':action': 'file_upload',
'protcol_version': '1',
'filetype': filetype,
'pyversion': pyversion,
'md5_digest': md5_digest,
'sha256_digest': sha256_digest,
})
files = [('content', os.path.basename(filename), file_data)]
if sig_file:
with open(sig_file, 'rb') as f:
sig_data = f.read()
files.append(('gpg_signature', os.path.basename(sig_file),
sig_data))
shutil.rmtree(os.path.dirname(sig_file))
request = self.encode_request(d.items(), files)
return self.send_request(request)
def upload_documentation(self, metadata, doc_dir):
"""
Upload documentation to the index.
:param metadata: A :class:`Metadata` instance defining at least a name
and version number for the documentation to be
uploaded.
:param doc_dir: The pathname of the directory which contains the
documentation. This should be the directory that
contains the ``index.html`` for the documentation.
:return: The HTTP response received from PyPI upon submission of the
request.
"""
self.check_credentials()
if not os.path.isdir(doc_dir):
raise DistlibException('not a directory: %r' % doc_dir)
fn = os.path.join(doc_dir, 'index.html')
if not os.path.exists(fn):
raise DistlibException('not found: %r' % fn)
metadata.validate()
name, version = metadata.name, metadata.version
zip_data = zip_dir(doc_dir).getvalue()
fields = [(':action', 'doc_upload'),
('name', name), ('version', version)]
files = [('content', name, zip_data)]
request = self.encode_request(fields, files)
return self.send_request(request)
def get_verify_command(self, signature_filename, data_filename,
keystore=None):
"""
Return a suitable command for verifying a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: The verifying command as a list suitable to be
passed to :class:`subprocess.Popen`.
"""
cmd = [self.gpg, '--status-fd', '2', '--no-tty']
if keystore is None:
keystore = self.gpg_home
if keystore:
cmd.extend(['--homedir', keystore])
cmd.extend(['--verify', signature_filename, data_filename])
logger.debug('invoking: %s', ' '.join(cmd))
return cmd
def verify_signature(self, signature_filename, data_filename,
keystore=None):
"""
Verify a signature for a file.
:param signature_filename: The pathname to the file containing the
signature.
:param data_filename: The pathname to the file containing the
signed data.
:param keystore: The path to a directory which contains the keys
used in verification. If not specified, the
instance's ``gpg_home`` attribute is used instead.
:return: True if the signature was verified, else False.
"""
if not self.gpg:
raise DistlibException('verification unavailable because gpg '
'unavailable')
cmd = self.get_verify_command(signature_filename, data_filename,
keystore)
rc, stdout, stderr = self.run_command(cmd)
if rc not in (0, 1):
raise DistlibException('verify command failed with error '
'code %s' % rc)
return rc == 0
def download_file(self, url, destfile, digest=None, reporthook=None):
"""
This is a convenience method for downloading a file from an URL.
Normally, this will be a file from the index, though currently
no check is made for this (i.e. a file can be downloaded from
anywhere).
The method is just like the :func:`urlretrieve` function in the
standard library, except that it allows digest computation to be
done during download and checking that the downloaded data
matched any expected value.
:param url: The URL of the file to be downloaded (assumed to be
available via an HTTP GET request).
:param destfile: The pathname where the downloaded file is to be
saved.
:param digest: If specified, this must be a (hasher, value)
tuple, where hasher is the algorithm used (e.g.
``'md5'``) and ``value`` is the expected value.
:param reporthook: The same as for :func:`urlretrieve` in the
standard library.
"""
if digest is None:
digester = None
logger.debug('No digest specified')
else:
if isinstance(digest, (list, tuple)):
hasher, digest = digest
else:
hasher = 'md5'
digester = getattr(hashlib, hasher)()
logger.debug('Digest specified: %s' % digest)
# The following code is equivalent to urlretrieve.
# We need to do it this way so that we can compute the
# digest of the file as we go.
with open(destfile, 'wb') as dfp:
# addinfourl is not a context manager on 2.x
# so we have to use try/finally
sfp = self.send_request(Request(url))
try:
headers = sfp.info()
blocksize = 8192
size = -1
read = 0
blocknum = 0
if "content-length" in headers:
size = int(headers["Content-Length"])
if reporthook:
reporthook(blocknum, blocksize, size)
while True:
block = sfp.read(blocksize)
if not block:
break
read += len(block)
dfp.write(block)
if digester:
digester.update(block)
blocknum += 1
if reporthook:
reporthook(blocknum, blocksize, size)
finally:
sfp.close()
# check that we got the whole file, if we can
if size >= 0 and read < size:
raise DistlibException(
'retrieval incomplete: got only %d out of %d bytes'
% (read, size))
# if we have a digest, it must match.
if digester:
actual = digester.hexdigest()
if digest != actual:
raise DistlibException('%s digest mismatch for %s: expected '
'%s, got %s' % (hasher, destfile,
digest, actual))
logger.debug('Digest verified: %s', digest)
def send_request(self, req):
"""
Send a standard library :class:`Request` to PyPI and return its
response.
:param req: The request to send.
:return: The HTTP response from PyPI (a standard library HTTPResponse).
"""
handlers = []
if self.password_handler:
handlers.append(self.password_handler)
if self.ssl_verifier:
handlers.append(self.ssl_verifier)
opener = build_opener(*handlers)
return opener.open(req)
def encode_request(self, fields, files):
"""
Encode fields and files for posting to an HTTP server.
:param fields: The fields to send as a list of (fieldname, value)
tuples.
:param files: The files to send as a list of (fieldname, filename,
file_bytes) tuple.
"""
# Adapted from packaging, which in turn was adapted from
# http://code.activestate.com/recipes/146306
parts = []
boundary = self.boundary
for k, values in fields:
if not isinstance(values, (list, tuple)):
values = [values]
for v in values:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"' %
k).encode('utf-8'),
b'',
v.encode('utf-8')))
for key, filename, value in files:
parts.extend((
b'--' + boundary,
('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, filename)).encode('utf-8'),
b'',
value))
parts.extend((b'--' + boundary + b'--', b''))
body = b'\r\n'.join(parts)
ct = b'multipart/form-data; boundary=' + boundary
headers = {
'Content-type': ct,
'Content-length': str(len(body))
}
return Request(self.url, body, headers)
def search(self, terms, operator=None):
if isinstance(terms, string_types):
terms = {'name': terms}
if self.rpc_proxy is None:
self.rpc_proxy = ServerProxy(self.url, timeout=3.0)
return self.rpc_proxy.search(terms, operator or 'and')
| mit |
coders4help/volunteer_planner | shiftmailer/excelexport.py | 5 | 2641 | # coding: utf-8
import logging
import os
import tempfile
from django.conf import settings
from django.core.mail.message import EmailMessage
from excel_renderer import ExcelRenderer
log = logging.getLogger(__name__)
class GenerateExcelSheet:
def __init__(self, shifts, mailer):
if not shifts:
raise AssertionError(u'No shifts given. Cannot generate Excel file for {}.'.format(mailer.facility))
self.shifts = shifts
self.mailer = mailer
self.tmpdir = tempfile.mkdtemp()
self.tmpfiles = []
def __del__(self):
for filename in self.tmpfiles:
if filename and os.path.exists(filename):
os.remove(filename)
if self.tmpdir is not None and os.path.exists(self.tmpdir):
log.debug(u'Removing tmpdir %s', self.tmpdir)
os.removedirs(self.tmpdir)
def generate_excel(self):
if self.shifts is None or self.shifts.count() == 0:
log.warn(u'No shifts, not shift schedule.')
return None
log.debug(u'About to generate XLS for facility "%s"', self.shifts[0].facility)
log.debug(u'Shifts query: %s', self.shifts.query)
filename = os.path.join(self.tmpdir, u'Dienstplan_{}_{}.xls'.format(self.mailer.organization,
self.shifts[0].starting_time
.strftime('%Y%m%d')))
self.tmpfiles.append(filename)
renderer = ExcelRenderer()
renderer.generate_shift_overview(self.mailer.organization, self.mailer.facility, self.shifts, filename)
return filename
def send_file(self):
attachment = self.generate_excel()
if not self.mailer:
log.error(u'Cannot create and send email without mailer information')
return
mail = EmailMessage()
mail.body = "Hallo " + self.mailer.first_name + " " + self.mailer.last_name + "\n\n"\
"Anbei die Liste zum Dienstplan der Freiwilligen.\nDies ist ein Service von volunteer-planner.org"
mail.subject = "Dienstplan fuer den " + self.shifts[0].starting_time.strftime("%d.%m.%Y") + \
" der Freiwilligen in der Unterkunft " + self.shifts[0].facility.name
mail.from_email = settings.DEFAULT_FROM_EMAIL
mail.to = [str(self.mailer.email)]
if attachment is not None:
mail.attach_file(path=attachment, mimetype='application/vnd.ms-excel')
mail.send()
else:
log.warn(u'No attachment, not mail.')
| agpl-3.0 |
ryanjmccall/nupic | nupic/frameworks/opf/expdescriptionhelpers.py | 7 | 15073 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
import imp
from nupic.data.dictutils import rUpdate
# This file contains utility functions that are used
# internally by the prediction framework and may be imported
# by description files. Functions that are used only by
# the prediction framework should be in utils.py
#
# This file provides support for the following experiment description features:
#
# 1. Sub-experiment support
# 2. Lazy evaluators (e.g., DeferredDictLookup, applyValueGettersToContainer)
###############################################################################
# Sub-experiment support
###############################################################################
#########################################################
# Utility methods for description files are organized as a base description
# and an experiment based on that base description.
# The base description calls getConfig to get the configuration from the
# specific experiment, and the specific experiment calls importBaseDescription
# empty initial config allows base experiment to run by itself
_config = dict()
# Save the path to the current sub-experiment here during importBaseDescription()
subExpDir = None
# We will load the description file as a module, which allows us to
# use the debugger and see source code. But description files are frequently
# modified and we want to be able to easily reload them. To facilitate this,
# we reload with a unique module name ("pf_description%d") each time.
baseDescriptionImportCount = 0
#########################################################
def importBaseDescription(path, config):
global baseDescriptionImportCount, _config, subExpDir
if not os.path.isabs(path):
# grab the path to the file doing the import
import inspect
callingFrame = inspect.stack()[1][0]
callingFile = callingFrame.f_globals['__file__']
subExpDir = os.path.dirname(callingFile)
path = os.path.join(subExpDir, path)
#print "Importing from: %s" % path
# stash the config in a place where the loading module can find it.
_config = config
mod = imp.load_source("pf_base_description%d" % baseDescriptionImportCount,
path)
# don't want to override __file__ in our caller
mod.__base_file__ = mod.__file__
del mod.__file__
baseDescriptionImportCount += 1
return mod
#########################################################
# Newer method just updates from sub-experiment
def updateConfigFromSubConfig(config):
# _config is the configuration provided by the sub-experiment
global _config
rUpdate(config, _config)
_config = dict()
#########################################################
def getSubExpDir():
global subExpDir
return subExpDir
###############################################################################
# Lazy evaluators (DeferredDictLookup, applyValueGettersToContainer, and friends)
###############################################################################
###############################################################################
class ValueGetterBase(object):
""" Base class for "value getters" (e.g., class DictValueGetter) that are used
to resolve values of sub-fields after the experiment's config dictionary (in
description.py) is defined and possibly updated from a sub-experiment.
This solves the problem of referencing the config dictionary's field from within
the definition of the dictionary itself (before the dictionary's own defintion
is complete).
NOTE: its possible that the referenced value does not yet exist at the
time of instantiation of a given value-getter future. It will be
resolved when the base description.py calls
applyValueGettersToContainer().
NOTE: The constructor of the derived classes MUST call our constructor.
NOTE: The derived classes MUST override handleGetValue(self).
NOTE: may be used by base and sub-experiments to derive their own custom value
getters; however, their use is applicapble only where permitted, as
described in comments within descriptionTemplate.tpl. See class
DictValueGetter for implementation example.
"""
class __NoResult(object):
""" A private class that we use as a special unique value to indicate that
our result cache instance variable does not hold a valid result.
"""
pass
def __init__(self):
#print("NOTE: ValueGetterBase INITIALIZING")
self.__inLookup = False
self.__cachedResult = self.__NoResult
def __call__(self, topContainer):
""" Resolves the referenced value. If the result is already cached,
returns it to caller. Otherwise, invokes the pure virtual method
handleGetValue. If handleGetValue() returns another value-getter, calls
that value-getter to resolve the value. This may result in a chain of calls
that terminates once the value is fully resolved to a non-value-getter value.
Upon return, the value is fully resolved and cached, so subsequent calls will
always return the cached value reference.
topContainer: The top-level container (dict, tuple, or list [sub-]instance)
within whose context the value-getter is applied.
Returns: The fully-resolved value that was referenced by the value-getter
instance
"""
#print("IN ValueGetterBase.__CAll__()")
assert(not self.__inLookup)
if self.__cachedResult is not self.__NoResult:
return self.__cachedResult
self.__cachedResult = self.handleGetValue(topContainer)
if isinstance(self.__cachedResult, ValueGetterBase):
valueGetter = self.__cachedResult
self.__inLookup = True
self.__cachedResult = valueGetter(topContainer)
self.__inLookup = False
# The value should be full resolved at this point
assert(self.__cachedResult is not self.__NoResult)
assert(not isinstance(self.__cachedResult, ValueGetterBase))
return self.__cachedResult
def handleGetValue(self, topContainer):
""" A "pure virtual" method. The derived class MUST override this method
and return the referenced value. The derived class is NOT responsible for
fully resolving the reference'd value in the event the value resolves to
another ValueGetterBase-based instance -- this is handled automatically
within ValueGetterBase implementation.
topContainer: The top-level container (dict, tuple, or list [sub-]instance)
within whose context the value-getter is applied.
Returns: The value referenced by this instance (which may be another
value-getter instance)
"""
raise NotImplementedError("ERROR: ValueGetterBase is an abstract " + \
"class; base class MUST override handleGetValue()")
###############################################################################
class DictValueGetter(ValueGetterBase):
"""
Creates a "future" reference to a value within a top-level or a nested
dictionary. See also class DeferredDictLookup.
"""
def __init__(self, referenceDict, *dictKeyChain):
"""
referenceDict: Explicit reference dictionary that contains the field
corresonding to the first key name in dictKeyChain. This may
be the result returned by the built-in globals() function,
when we desire to look up a dictionary value from a dictionary
referenced by a global variable within the calling module.
If None is passed for referenceDict, then the topContainer
parameter supplied to handleGetValue() will be used as the
reference dictionary instead (this allows the desired module
to designate the appropriate reference dictionary for the
value-getters when it calls applyValueGettersToContainer())
dictKeyChain: One or more strings; the first string is a key (that will
eventually be defined) in the reference dictionary. If
additional strings are supplied, then the values
correspnding to prior key strings must be dictionaries, and
each additionl string references a sub-dictionary of the
former. The final string is the key of the field whose value
will be returned by handleGetValue().
NOTE: Its possible that the referenced value does not yet exist at the
time of instantiation of this class. It will be resolved when the
base description.py calls applyValueGettersToContainer().
Example:
config = dict(
_dsEncoderFieldName2_N = 70,
_dsEncoderFieldName2_W = 5,
dsEncoderSchema = [
dict(
base=dict(
fieldname='Name2', type='ScalarEncoder',
name='Name2', minval=0, maxval=270, clipInput=True,
n=DictValueGetter(None, '_dsEncoderFieldName2_N'),
w=DictValueGetter(None, '_dsEncoderFieldName2_W')),
),
],
)
updateConfigFromSubConfig(config)
applyValueGettersToContainer(config)
"""
# First, invoke base constructor
ValueGetterBase.__init__(self)
assert(referenceDict is None or isinstance(referenceDict, dict))
assert(len(dictKeyChain) >= 1)
self.__referenceDict = referenceDict
self.__dictKeyChain = dictKeyChain
def handleGetValue(self, topContainer):
""" This method overrides ValueGetterBase's "pure virtual" method. It
returns the referenced value. The derived class is NOT responsible for
fully resolving the reference'd value in the event the value resolves to
another ValueGetterBase-based instance -- this is handled automatically
within ValueGetterBase implementation.
topContainer: The top-level container (dict, tuple, or list [sub-]instance)
within whose context the value-getter is applied. If
self.__referenceDict is None, then topContainer will be used
as the reference dictionary for resolving our dictionary key
chain.
Returns: The value referenced by this instance (which may be another
value-getter instance)
"""
value = self.__referenceDict if self.__referenceDict is not None else topContainer
for key in self.__dictKeyChain:
value = value[key]
return value
###############################################################################
class DeferredDictLookup(DictValueGetter):
"""
Creates a "future" reference to a value within an implicit dictionary that
will be passed to applyValueGettersToContainer() in the future (typically
called by description.py after its config dictionary has been updated from
the sub-experiment). The reference is relative to the dictionary that will
be passed to applyValueGettersToContainer()
"""
def __init__(self, *dictKeyChain):
"""
dictKeyChain: One or more strings; the first string is a key (that will
eventually be defined) in the dictionary that will be passed
to applyValueGettersToContainer(). If additional strings are
supplied, then the values correspnding to prior key strings
must be dictionaries, and each additionl string references a
sub-dictionary of the former. The final string is the key of
the field whose value will be returned by this value-getter
NOTE: its possible that the referenced value does not yet exist at the
time of instantiation of this class. It will be resolved when the
base description.py calls applyValueGettersToContainer().
Example:
config = dict(
_dsEncoderFieldName2_N = 70,
_dsEncoderFieldName2_W = 5,
dsEncoderSchema = [
dict(
base=dict(
fieldname='Name2', type='ScalarEncoder',
name='Name2', minval=0, maxval=270, clipInput=True,
n=DeferredDictLookup('_dsEncoderFieldName2_N'),
w=DeferredDictLookup('_dsEncoderFieldName2_W')),
),
],
)
updateConfigFromSubConfig(config)
applyValueGettersToContainer(config)
"""
# Invoke base (DictValueGetter constructor), passing None for referenceDict,
# which will force it use the dictionary passed via
# applyValueGettersToContainer(), instead.
DictValueGetter.__init__(self, None, *dictKeyChain)
###############################################################################
def applyValueGettersToContainer(container):
"""
"""
_applyValueGettersImpl(container=container, currentObj=container,
recursionStack=[])
###############################################################################
def _applyValueGettersImpl(container, currentObj, recursionStack):
"""
"""
# Detect cycles
if currentObj in recursionStack:
return
# Sanity-check of our cycle-detection logic
assert(len(recursionStack) < 1000)
# Push the current object on our cycle-detection stack
recursionStack.append(currentObj)
# Resolve value-getters within dictionaries, tuples and lists
if isinstance(currentObj, dict):
for (key, value) in currentObj.items():
if isinstance(value, ValueGetterBase):
currentObj[key] = value(container)
_applyValueGettersImpl(container, currentObj[key], recursionStack)
elif isinstance(currentObj, tuple) or isinstance(currentObj, list):
for (i, value) in enumerate(currentObj):
# NOTE: values within a tuple should never be value-getters, since
# the top-level elements within a tuple are immutable. However,
# if any nested sub-elements might be mutable
if isinstance(value, ValueGetterBase):
currentObj[i] = value(container)
_applyValueGettersImpl(container, currentObj[i], recursionStack)
else:
pass
recursionStack.pop()
return
| gpl-3.0 |
SeattleTestbed/repy_v2 | safe_check.py | 1 | 1136 | """
Author: Armon Dadgar
Date: June 11th, 2009
Description:
This simple script reads "code" from stdin, and runs safe.safe_check() on it.
The resulting return value or exception is serialized and written to stdout.
The purpose of this script is to be called from the main repy.py script so that the
memory used by the safe.safe_check() will be reclaimed when this process quits.
"""
import safe
import sys
import encoding_header
if __name__ == "__main__":
# Get the user "code"
usercode = sys.stdin.read()
# Output buffer
output = ""
# Check the code
try:
value = safe.safe_check(usercode)
output += str(value)
except Exception, e:
# Adjust traceback line numbers, see SeattleTestbed/repy_v2#95.
try:
e.lineno = e.lineno - \
len(encoding_header.ENCODING_DECLARATION.splitlines())
except (TypeError, AttributeError):
# Ignore exceptions with line numbers that are non-numeric (i.e.
# `None`), or have no `lineno` attribute altogether.
pass
output += str(type(e)) + " " + str(e)
# Write out
sys.stdout.write(output)
sys.stdout.flush()
| mit |
ViaQ/watches-cli | tests/commands/test_just_indices_stats.py | 1 | 1384 | """Tests for our `watches just_indices_stats` subcommand."""
import json
from subprocess import PIPE, Popen as popen
from secure_support import TestSecureSupport
class TestJustIndicesStats(TestSecureSupport):
def test_returns_index_per_line(self):
cmd = self.appendSecurityCommands(['watches', 'just_indices_stats', '-l', '--level=indices', '--timestamp'])
output = popen(cmd, stdout=PIPE).communicate()[0].decode('ascii')
self.assertTrue(len(output) > 0)
lines = 0
for line in output.splitlines():
lines += 1
o = json.loads(line)
self.assertTrue('index' in o)
self.assertTrue('timestamp' in o)
self.assertTrue('total' in o)
self.assertTrue('primaries' in o)
self.assertTrue(lines > 0)
def test_returns_index_per_line_just__all(self):
cmd = self.appendSecurityCommands(['watches', 'just_indices_stats', '-l'])
output = popen(cmd, stdout=PIPE).communicate()[0].decode('ascii')
self.assertTrue(len(output) > 0)
lines = 0
for line in output.splitlines():
lines += 1
o = json.loads(line)
self.assertTrue('index' in o)
self.assertTrue(o['index'] == '_all')
# Without specifying --level=indices we get only _all index stats
self.assertTrue(lines == 1)
| apache-2.0 |
tomekwojcik/Team-O-Matic | teamomatic/app.py | 1 | 2057 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2011 by Tomasz Wójcik <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Application factory."""
import tornado.web
import tornado.locale
import teamomatic.handlers
import logging
import os
# Routing table.
routes = [
(r'/', teamomatic.handlers.IndexHandler)
]
def create_app(config=None):
"""Instantiates and initializes the app according to config dict."""
if config == None:
raise RuntimeError('No configuration given.')
config['static_path'] = os.path.join(os.path.dirname(__file__), 'static')
tornado.locale.load_translations(os.path.join(os.path.dirname(__file__), 'translations'))
logging_config = {
'format': "%(asctime)s %(name)s <%(levelname)s>: %(message)s",
'level': logging.INFO
}
if config.get('debug', False):
logging_config['level'] = logging.DEBUG
logging.basicConfig(**logging_config)
app = tornado.web.Application(routes, **config)
return app | mit |
zhulin2609/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/tool/bot/commitqueuetask_unittest.py | 119 | 29255 | # Copyright (c) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from datetime import datetime
import logging
import unittest2 as unittest
from webkitpy.common.net import bugzilla
from webkitpy.common.net.layouttestresults import LayoutTestResults
from webkitpy.common.system.executive import ScriptError
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.layout_tests.models import test_results
from webkitpy.layout_tests.models import test_failures
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.bot.commitqueuetask import *
from webkitpy.tool.bot.expectedfailures import ExpectedFailures
from webkitpy.tool.mocktool import MockTool
_log = logging.getLogger(__name__)
class MockCommitQueue(CommitQueueTaskDelegate):
def __init__(self, error_plan):
self._error_plan = error_plan
self._failure_status_id = 0
def run_command(self, command):
_log.info("run_webkit_patch: %s" % command)
if self._error_plan:
error = self._error_plan.pop(0)
if error:
raise error
def command_passed(self, success_message, patch):
_log.info("command_passed: success_message='%s' patch='%s'" % (
success_message, patch.id()))
def command_failed(self, failure_message, script_error, patch):
_log.info("command_failed: failure_message='%s' script_error='%s' patch='%s'" % (
failure_message, script_error, patch.id()))
self._failure_status_id += 1
return self._failure_status_id
def refetch_patch(self, patch):
return patch
def expected_failures(self):
return ExpectedFailures()
def test_results(self):
return None
def report_flaky_tests(self, patch, flaky_results, results_archive):
flaky_tests = [result.filename for result in flaky_results]
_log.info("report_flaky_tests: patch='%s' flaky_tests='%s' archive='%s'" % (patch.id(), flaky_tests, results_archive.filename))
def archive_last_test_results(self, patch):
_log.info("archive_last_test_results: patch='%s'" % patch.id())
archive = Mock()
archive.filename = "mock-archive-%s.zip" % patch.id()
return archive
def build_style(self):
return "both"
def did_pass_testing_ews(self, patch):
return False
class FailingTestCommitQueue(MockCommitQueue):
def __init__(self, error_plan, test_failure_plan):
MockCommitQueue.__init__(self, error_plan)
self._test_run_counter = -1 # Special value to indicate tests have never been run.
self._test_failure_plan = test_failure_plan
def run_command(self, command):
if command[0] == "build-and-test":
self._test_run_counter += 1
MockCommitQueue.run_command(self, command)
def _mock_test_result(self, testname):
return test_results.TestResult(testname, [test_failures.FailureTextMismatch()])
def test_results(self):
# Doesn't make sense to ask for the test_results until the tests have run at least once.
assert(self._test_run_counter >= 0)
failures_for_run = self._test_failure_plan[self._test_run_counter]
results = LayoutTestResults(map(self._mock_test_result, failures_for_run))
# This makes the results trustable by ExpectedFailures.
results.set_failure_limit_count(10)
return results
# We use GoldenScriptError to make sure that the code under test throws the
# correct (i.e., golden) exception.
class GoldenScriptError(ScriptError):
pass
class CommitQueueTaskTest(unittest.TestCase):
def _run_through_task(self, commit_queue, expected_logs, expected_exception=None, expect_retry=False):
self.maxDiff = None
tool = MockTool(log_executive=True)
patch = tool.bugs.fetch_attachment(10000)
task = CommitQueueTask(commit_queue, patch)
success = OutputCapture().assert_outputs(self, task.run, expected_logs=expected_logs, expected_exception=expected_exception)
if not expected_exception:
self.assertEqual(success, not expect_retry)
return task
def test_success_case(self):
commit_queue = MockCommitQueue([])
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_passed: success_message='Passed tests' patch='10000'
run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
command_passed: success_message='Landed patch' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs)
def test_fast_success_case(self):
commit_queue = MockCommitQueue([])
commit_queue.did_pass_testing_ews = lambda patch: True
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
command_passed: success_message='Landed patch' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs)
def test_clean_failure(self):
commit_queue = MockCommitQueue([
ScriptError("MOCK clean failure"),
])
expected_logs = """run_webkit_patch: ['clean']
command_failed: failure_message='Unable to clean working directory' script_error='MOCK clean failure' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs, expect_retry=True)
def test_update_failure(self):
commit_queue = MockCommitQueue([
None,
ScriptError("MOCK update failure"),
])
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_failed: failure_message='Unable to update working directory' script_error='MOCK update failure' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs, expect_retry=True)
def test_apply_failure(self):
commit_queue = MockCommitQueue([
None,
None,
GoldenScriptError("MOCK apply failure"),
])
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_failed: failure_message='Patch does not apply' script_error='MOCK apply failure' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
def test_validate_changelog_failure(self):
commit_queue = MockCommitQueue([
None,
None,
None,
GoldenScriptError("MOCK validate failure"),
])
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_failed: failure_message='ChangeLog did not pass validation' script_error='MOCK validate failure' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
def test_build_failure(self):
commit_queue = MockCommitQueue([
None,
None,
None,
None,
GoldenScriptError("MOCK build failure"),
])
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_failed: failure_message='Patch does not build' script_error='MOCK build failure' patch='10000'
run_webkit_patch: ['build', '--force-clean', '--no-update', '--build-style=both']
command_passed: success_message='Able to build without patch' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
def test_red_build_failure(self):
commit_queue = MockCommitQueue([
None,
None,
None,
None,
ScriptError("MOCK build failure"),
ScriptError("MOCK clean build failure"),
])
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_failed: failure_message='Patch does not build' script_error='MOCK build failure' patch='10000'
run_webkit_patch: ['build', '--force-clean', '--no-update', '--build-style=both']
command_failed: failure_message='Unable to build without patch' script_error='MOCK clean build failure' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs, expect_retry=True)
def test_flaky_test_failure(self):
commit_queue = MockCommitQueue([
None,
None,
None,
None,
None,
ScriptError("MOCK tests failure"),
])
# CommitQueueTask will only report flaky tests if we successfully parsed
# results.json and returned a LayoutTestResults object, so we fake one.
commit_queue.test_results = lambda: LayoutTestResults([])
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK tests failure' patch='10000'
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_passed: success_message='Passed tests' patch='10000'
report_flaky_tests: patch='10000' flaky_tests='[]' archive='mock-archive-10000.zip'
run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
command_passed: success_message='Landed patch' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs)
def test_failed_archive(self):
commit_queue = MockCommitQueue([
None,
None,
None,
None,
None,
ScriptError("MOCK tests failure"),
])
commit_queue.test_results = lambda: LayoutTestResults([])
# It's possible delegate to fail to archive layout tests, don't try to report
# flaky tests when that happens.
commit_queue.archive_last_test_results = lambda patch: None
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK tests failure' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_passed: success_message='Passed tests' patch='10000'
run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
command_passed: success_message='Landed patch' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs)
def test_double_flaky_test_failure(self):
commit_queue = FailingTestCommitQueue([
None,
None,
None,
None,
None,
ScriptError("MOCK test failure"),
ScriptError("MOCK test failure again"),
], [
"foo.html",
"bar.html",
"foo.html",
])
# The (subtle) point of this test is that report_flaky_tests does not appear
# in the expected_logs for this run.
# Note also that there is no attempt to run the tests w/o the patch.
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
"""
tool = MockTool(log_executive=True)
patch = tool.bugs.fetch_attachment(10000)
task = CommitQueueTask(commit_queue, patch)
success = OutputCapture().assert_outputs(self, task.run, expected_logs=expected_logs)
self.assertFalse(success)
def test_test_failure(self):
commit_queue = MockCommitQueue([
None,
None,
None,
None,
None,
GoldenScriptError("MOCK test failure"),
ScriptError("MOCK test failure again"),
])
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
command_passed: success_message='Able to pass tests without patch' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
def test_red_test_failure(self):
commit_queue = FailingTestCommitQueue([
None,
None,
None,
None,
None,
ScriptError("MOCK test failure"),
ScriptError("MOCK test failure again"),
ScriptError("MOCK clean test failure"),
], [
"foo.html",
"foo.html",
"foo.html",
])
# Tests always fail, and always return the same results, but we
# should still be able to land in this case!
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='10000'
run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
command_passed: success_message='Landed patch' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs)
def test_very_red_tree_retry(self):
lots_of_failing_tests = map(lambda num: "test-%s.html" % num, range(0, 100))
commit_queue = FailingTestCommitQueue([
None,
None,
None,
None,
None,
ScriptError("MOCK test failure"),
ScriptError("MOCK test failure again"),
ScriptError("MOCK clean test failure"),
], [
lots_of_failing_tests,
lots_of_failing_tests,
lots_of_failing_tests,
])
# Tests always fail, and return so many failures that we do not
# trust the results (see ExpectedFailures._can_trust_results) so we
# just give up and retry the patch.
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='10000'
"""
self._run_through_task(commit_queue, expected_logs, expect_retry=True)
def test_red_tree_patch_rejection(self):
commit_queue = FailingTestCommitQueue([
None,
None,
None,
None,
None,
GoldenScriptError("MOCK test failure"),
ScriptError("MOCK test failure again"),
ScriptError("MOCK clean test failure"),
], [
["foo.html", "bar.html"],
["foo.html", "bar.html"],
["foo.html"],
])
# Tests always fail, but the clean tree only fails one test
# while the patch fails two. So we should reject the patch!
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure' patch='10000'
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_failed: failure_message='Patch does not pass tests' script_error='MOCK test failure again' patch='10000'
archive_last_test_results: patch='10000'
run_webkit_patch: ['build-and-test', '--force-clean', '--no-update', '--build', '--test', '--non-interactive']
command_failed: failure_message='Unable to pass tests without patch (tree is red?)' script_error='MOCK clean test failure' patch='10000'
"""
task = self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
self.assertEqual(task.results_from_patch_test_run(task._patch).failing_tests(), ["foo.html", "bar.html"])
# failure_status_id should be of the test with patch (1), not the test without patch (2).
self.assertEqual(task.failure_status_id, 1)
def test_land_failure(self):
commit_queue = MockCommitQueue([
None,
None,
None,
None,
None,
None,
GoldenScriptError("MOCK land failure"),
])
expected_logs = """run_webkit_patch: ['clean']
command_passed: success_message='Cleaned working directory' patch='10000'
run_webkit_patch: ['update']
command_passed: success_message='Updated working directory' patch='10000'
run_webkit_patch: ['apply-attachment', '--no-update', '--non-interactive', 10000]
command_passed: success_message='Applied patch' patch='10000'
run_webkit_patch: ['validate-changelog', '--check-oops', '--non-interactive', 10000]
command_passed: success_message='ChangeLog validated' patch='10000'
run_webkit_patch: ['build', '--no-clean', '--no-update', '--build-style=both']
command_passed: success_message='Built patch' patch='10000'
run_webkit_patch: ['build-and-test', '--no-clean', '--no-update', '--test', '--non-interactive']
command_passed: success_message='Passed tests' patch='10000'
run_webkit_patch: ['land-attachment', '--force-clean', '--non-interactive', '--parent-command=commit-queue', 10000]
command_failed: failure_message='Unable to land patch' script_error='MOCK land failure' patch='10000'
"""
# FIXME: This should really be expect_retry=True for a better user experiance.
self._run_through_task(commit_queue, expected_logs, GoldenScriptError)
def _expect_validate(self, patch, is_valid):
class MockDelegate(object):
def refetch_patch(self, patch):
return patch
def expected_failures(self):
return ExpectedFailures()
task = CommitQueueTask(MockDelegate(), patch)
self.assertEqual(task.validate(), is_valid)
def _mock_patch(self, attachment_dict={}, bug_dict={'bug_status': 'NEW'}, committer="fake"):
bug = bugzilla.Bug(bug_dict, None)
patch = bugzilla.Attachment(attachment_dict, bug)
patch._committer = committer
return patch
def test_validate(self):
self._expect_validate(self._mock_patch(), True)
self._expect_validate(self._mock_patch({'is_obsolete': True}), False)
self._expect_validate(self._mock_patch(bug_dict={'bug_status': 'CLOSED'}), False)
self._expect_validate(self._mock_patch(committer=None), False)
self._expect_validate(self._mock_patch({'review': '-'}), False)
| bsd-3-clause |
blacklin/kbengine | kbe/res/scripts/common/Lib/encodings/cp864.py | 272 | 33663 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP864.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp864',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0025: 0x066a, # ARABIC PERCENT SIGN
0x0080: 0x00b0, # DEGREE SIGN
0x0081: 0x00b7, # MIDDLE DOT
0x0082: 0x2219, # BULLET OPERATOR
0x0083: 0x221a, # SQUARE ROOT
0x0084: 0x2592, # MEDIUM SHADE
0x0085: 0x2500, # FORMS LIGHT HORIZONTAL
0x0086: 0x2502, # FORMS LIGHT VERTICAL
0x0087: 0x253c, # FORMS LIGHT VERTICAL AND HORIZONTAL
0x0088: 0x2524, # FORMS LIGHT VERTICAL AND LEFT
0x0089: 0x252c, # FORMS LIGHT DOWN AND HORIZONTAL
0x008a: 0x251c, # FORMS LIGHT VERTICAL AND RIGHT
0x008b: 0x2534, # FORMS LIGHT UP AND HORIZONTAL
0x008c: 0x2510, # FORMS LIGHT DOWN AND LEFT
0x008d: 0x250c, # FORMS LIGHT DOWN AND RIGHT
0x008e: 0x2514, # FORMS LIGHT UP AND RIGHT
0x008f: 0x2518, # FORMS LIGHT UP AND LEFT
0x0090: 0x03b2, # GREEK SMALL BETA
0x0091: 0x221e, # INFINITY
0x0092: 0x03c6, # GREEK SMALL PHI
0x0093: 0x00b1, # PLUS-OR-MINUS SIGN
0x0094: 0x00bd, # FRACTION 1/2
0x0095: 0x00bc, # FRACTION 1/4
0x0096: 0x2248, # ALMOST EQUAL TO
0x0097: 0x00ab, # LEFT POINTING GUILLEMET
0x0098: 0x00bb, # RIGHT POINTING GUILLEMET
0x0099: 0xfef7, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM
0x009a: 0xfef8, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM
0x009b: None, # UNDEFINED
0x009c: None, # UNDEFINED
0x009d: 0xfefb, # ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM
0x009e: 0xfefc, # ARABIC LIGATURE LAM WITH ALEF FINAL FORM
0x009f: None, # UNDEFINED
0x00a1: 0x00ad, # SOFT HYPHEN
0x00a2: 0xfe82, # ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM
0x00a5: 0xfe84, # ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM
0x00a6: None, # UNDEFINED
0x00a7: None, # UNDEFINED
0x00a8: 0xfe8e, # ARABIC LETTER ALEF FINAL FORM
0x00a9: 0xfe8f, # ARABIC LETTER BEH ISOLATED FORM
0x00aa: 0xfe95, # ARABIC LETTER TEH ISOLATED FORM
0x00ab: 0xfe99, # ARABIC LETTER THEH ISOLATED FORM
0x00ac: 0x060c, # ARABIC COMMA
0x00ad: 0xfe9d, # ARABIC LETTER JEEM ISOLATED FORM
0x00ae: 0xfea1, # ARABIC LETTER HAH ISOLATED FORM
0x00af: 0xfea5, # ARABIC LETTER KHAH ISOLATED FORM
0x00b0: 0x0660, # ARABIC-INDIC DIGIT ZERO
0x00b1: 0x0661, # ARABIC-INDIC DIGIT ONE
0x00b2: 0x0662, # ARABIC-INDIC DIGIT TWO
0x00b3: 0x0663, # ARABIC-INDIC DIGIT THREE
0x00b4: 0x0664, # ARABIC-INDIC DIGIT FOUR
0x00b5: 0x0665, # ARABIC-INDIC DIGIT FIVE
0x00b6: 0x0666, # ARABIC-INDIC DIGIT SIX
0x00b7: 0x0667, # ARABIC-INDIC DIGIT SEVEN
0x00b8: 0x0668, # ARABIC-INDIC DIGIT EIGHT
0x00b9: 0x0669, # ARABIC-INDIC DIGIT NINE
0x00ba: 0xfed1, # ARABIC LETTER FEH ISOLATED FORM
0x00bb: 0x061b, # ARABIC SEMICOLON
0x00bc: 0xfeb1, # ARABIC LETTER SEEN ISOLATED FORM
0x00bd: 0xfeb5, # ARABIC LETTER SHEEN ISOLATED FORM
0x00be: 0xfeb9, # ARABIC LETTER SAD ISOLATED FORM
0x00bf: 0x061f, # ARABIC QUESTION MARK
0x00c0: 0x00a2, # CENT SIGN
0x00c1: 0xfe80, # ARABIC LETTER HAMZA ISOLATED FORM
0x00c2: 0xfe81, # ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
0x00c3: 0xfe83, # ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM
0x00c4: 0xfe85, # ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
0x00c5: 0xfeca, # ARABIC LETTER AIN FINAL FORM
0x00c6: 0xfe8b, # ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
0x00c7: 0xfe8d, # ARABIC LETTER ALEF ISOLATED FORM
0x00c8: 0xfe91, # ARABIC LETTER BEH INITIAL FORM
0x00c9: 0xfe93, # ARABIC LETTER TEH MARBUTA ISOLATED FORM
0x00ca: 0xfe97, # ARABIC LETTER TEH INITIAL FORM
0x00cb: 0xfe9b, # ARABIC LETTER THEH INITIAL FORM
0x00cc: 0xfe9f, # ARABIC LETTER JEEM INITIAL FORM
0x00cd: 0xfea3, # ARABIC LETTER HAH INITIAL FORM
0x00ce: 0xfea7, # ARABIC LETTER KHAH INITIAL FORM
0x00cf: 0xfea9, # ARABIC LETTER DAL ISOLATED FORM
0x00d0: 0xfeab, # ARABIC LETTER THAL ISOLATED FORM
0x00d1: 0xfead, # ARABIC LETTER REH ISOLATED FORM
0x00d2: 0xfeaf, # ARABIC LETTER ZAIN ISOLATED FORM
0x00d3: 0xfeb3, # ARABIC LETTER SEEN INITIAL FORM
0x00d4: 0xfeb7, # ARABIC LETTER SHEEN INITIAL FORM
0x00d5: 0xfebb, # ARABIC LETTER SAD INITIAL FORM
0x00d6: 0xfebf, # ARABIC LETTER DAD INITIAL FORM
0x00d7: 0xfec1, # ARABIC LETTER TAH ISOLATED FORM
0x00d8: 0xfec5, # ARABIC LETTER ZAH ISOLATED FORM
0x00d9: 0xfecb, # ARABIC LETTER AIN INITIAL FORM
0x00da: 0xfecf, # ARABIC LETTER GHAIN INITIAL FORM
0x00db: 0x00a6, # BROKEN VERTICAL BAR
0x00dc: 0x00ac, # NOT SIGN
0x00dd: 0x00f7, # DIVISION SIGN
0x00de: 0x00d7, # MULTIPLICATION SIGN
0x00df: 0xfec9, # ARABIC LETTER AIN ISOLATED FORM
0x00e0: 0x0640, # ARABIC TATWEEL
0x00e1: 0xfed3, # ARABIC LETTER FEH INITIAL FORM
0x00e2: 0xfed7, # ARABIC LETTER QAF INITIAL FORM
0x00e3: 0xfedb, # ARABIC LETTER KAF INITIAL FORM
0x00e4: 0xfedf, # ARABIC LETTER LAM INITIAL FORM
0x00e5: 0xfee3, # ARABIC LETTER MEEM INITIAL FORM
0x00e6: 0xfee7, # ARABIC LETTER NOON INITIAL FORM
0x00e7: 0xfeeb, # ARABIC LETTER HEH INITIAL FORM
0x00e8: 0xfeed, # ARABIC LETTER WAW ISOLATED FORM
0x00e9: 0xfeef, # ARABIC LETTER ALEF MAKSURA ISOLATED FORM
0x00ea: 0xfef3, # ARABIC LETTER YEH INITIAL FORM
0x00eb: 0xfebd, # ARABIC LETTER DAD ISOLATED FORM
0x00ec: 0xfecc, # ARABIC LETTER AIN MEDIAL FORM
0x00ed: 0xfece, # ARABIC LETTER GHAIN FINAL FORM
0x00ee: 0xfecd, # ARABIC LETTER GHAIN ISOLATED FORM
0x00ef: 0xfee1, # ARABIC LETTER MEEM ISOLATED FORM
0x00f0: 0xfe7d, # ARABIC SHADDA MEDIAL FORM
0x00f1: 0x0651, # ARABIC SHADDAH
0x00f2: 0xfee5, # ARABIC LETTER NOON ISOLATED FORM
0x00f3: 0xfee9, # ARABIC LETTER HEH ISOLATED FORM
0x00f4: 0xfeec, # ARABIC LETTER HEH MEDIAL FORM
0x00f5: 0xfef0, # ARABIC LETTER ALEF MAKSURA FINAL FORM
0x00f6: 0xfef2, # ARABIC LETTER YEH FINAL FORM
0x00f7: 0xfed0, # ARABIC LETTER GHAIN MEDIAL FORM
0x00f8: 0xfed5, # ARABIC LETTER QAF ISOLATED FORM
0x00f9: 0xfef5, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM
0x00fa: 0xfef6, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM
0x00fb: 0xfedd, # ARABIC LETTER LAM ISOLATED FORM
0x00fc: 0xfed9, # ARABIC LETTER KAF ISOLATED FORM
0x00fd: 0xfef1, # ARABIC LETTER YEH ISOLATED FORM
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: None, # UNDEFINED
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'\u066a' # 0x0025 -> ARABIC PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\xb0' # 0x0080 -> DEGREE SIGN
'\xb7' # 0x0081 -> MIDDLE DOT
'\u2219' # 0x0082 -> BULLET OPERATOR
'\u221a' # 0x0083 -> SQUARE ROOT
'\u2592' # 0x0084 -> MEDIUM SHADE
'\u2500' # 0x0085 -> FORMS LIGHT HORIZONTAL
'\u2502' # 0x0086 -> FORMS LIGHT VERTICAL
'\u253c' # 0x0087 -> FORMS LIGHT VERTICAL AND HORIZONTAL
'\u2524' # 0x0088 -> FORMS LIGHT VERTICAL AND LEFT
'\u252c' # 0x0089 -> FORMS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x008a -> FORMS LIGHT VERTICAL AND RIGHT
'\u2534' # 0x008b -> FORMS LIGHT UP AND HORIZONTAL
'\u2510' # 0x008c -> FORMS LIGHT DOWN AND LEFT
'\u250c' # 0x008d -> FORMS LIGHT DOWN AND RIGHT
'\u2514' # 0x008e -> FORMS LIGHT UP AND RIGHT
'\u2518' # 0x008f -> FORMS LIGHT UP AND LEFT
'\u03b2' # 0x0090 -> GREEK SMALL BETA
'\u221e' # 0x0091 -> INFINITY
'\u03c6' # 0x0092 -> GREEK SMALL PHI
'\xb1' # 0x0093 -> PLUS-OR-MINUS SIGN
'\xbd' # 0x0094 -> FRACTION 1/2
'\xbc' # 0x0095 -> FRACTION 1/4
'\u2248' # 0x0096 -> ALMOST EQUAL TO
'\xab' # 0x0097 -> LEFT POINTING GUILLEMET
'\xbb' # 0x0098 -> RIGHT POINTING GUILLEMET
'\ufef7' # 0x0099 -> ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM
'\ufef8' # 0x009a -> ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM
'\ufffe' # 0x009b -> UNDEFINED
'\ufffe' # 0x009c -> UNDEFINED
'\ufefb' # 0x009d -> ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM
'\ufefc' # 0x009e -> ARABIC LIGATURE LAM WITH ALEF FINAL FORM
'\ufffe' # 0x009f -> UNDEFINED
'\xa0' # 0x00a0 -> NON-BREAKING SPACE
'\xad' # 0x00a1 -> SOFT HYPHEN
'\ufe82' # 0x00a2 -> ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM
'\xa3' # 0x00a3 -> POUND SIGN
'\xa4' # 0x00a4 -> CURRENCY SIGN
'\ufe84' # 0x00a5 -> ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM
'\ufffe' # 0x00a6 -> UNDEFINED
'\ufffe' # 0x00a7 -> UNDEFINED
'\ufe8e' # 0x00a8 -> ARABIC LETTER ALEF FINAL FORM
'\ufe8f' # 0x00a9 -> ARABIC LETTER BEH ISOLATED FORM
'\ufe95' # 0x00aa -> ARABIC LETTER TEH ISOLATED FORM
'\ufe99' # 0x00ab -> ARABIC LETTER THEH ISOLATED FORM
'\u060c' # 0x00ac -> ARABIC COMMA
'\ufe9d' # 0x00ad -> ARABIC LETTER JEEM ISOLATED FORM
'\ufea1' # 0x00ae -> ARABIC LETTER HAH ISOLATED FORM
'\ufea5' # 0x00af -> ARABIC LETTER KHAH ISOLATED FORM
'\u0660' # 0x00b0 -> ARABIC-INDIC DIGIT ZERO
'\u0661' # 0x00b1 -> ARABIC-INDIC DIGIT ONE
'\u0662' # 0x00b2 -> ARABIC-INDIC DIGIT TWO
'\u0663' # 0x00b3 -> ARABIC-INDIC DIGIT THREE
'\u0664' # 0x00b4 -> ARABIC-INDIC DIGIT FOUR
'\u0665' # 0x00b5 -> ARABIC-INDIC DIGIT FIVE
'\u0666' # 0x00b6 -> ARABIC-INDIC DIGIT SIX
'\u0667' # 0x00b7 -> ARABIC-INDIC DIGIT SEVEN
'\u0668' # 0x00b8 -> ARABIC-INDIC DIGIT EIGHT
'\u0669' # 0x00b9 -> ARABIC-INDIC DIGIT NINE
'\ufed1' # 0x00ba -> ARABIC LETTER FEH ISOLATED FORM
'\u061b' # 0x00bb -> ARABIC SEMICOLON
'\ufeb1' # 0x00bc -> ARABIC LETTER SEEN ISOLATED FORM
'\ufeb5' # 0x00bd -> ARABIC LETTER SHEEN ISOLATED FORM
'\ufeb9' # 0x00be -> ARABIC LETTER SAD ISOLATED FORM
'\u061f' # 0x00bf -> ARABIC QUESTION MARK
'\xa2' # 0x00c0 -> CENT SIGN
'\ufe80' # 0x00c1 -> ARABIC LETTER HAMZA ISOLATED FORM
'\ufe81' # 0x00c2 -> ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
'\ufe83' # 0x00c3 -> ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM
'\ufe85' # 0x00c4 -> ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
'\ufeca' # 0x00c5 -> ARABIC LETTER AIN FINAL FORM
'\ufe8b' # 0x00c6 -> ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
'\ufe8d' # 0x00c7 -> ARABIC LETTER ALEF ISOLATED FORM
'\ufe91' # 0x00c8 -> ARABIC LETTER BEH INITIAL FORM
'\ufe93' # 0x00c9 -> ARABIC LETTER TEH MARBUTA ISOLATED FORM
'\ufe97' # 0x00ca -> ARABIC LETTER TEH INITIAL FORM
'\ufe9b' # 0x00cb -> ARABIC LETTER THEH INITIAL FORM
'\ufe9f' # 0x00cc -> ARABIC LETTER JEEM INITIAL FORM
'\ufea3' # 0x00cd -> ARABIC LETTER HAH INITIAL FORM
'\ufea7' # 0x00ce -> ARABIC LETTER KHAH INITIAL FORM
'\ufea9' # 0x00cf -> ARABIC LETTER DAL ISOLATED FORM
'\ufeab' # 0x00d0 -> ARABIC LETTER THAL ISOLATED FORM
'\ufead' # 0x00d1 -> ARABIC LETTER REH ISOLATED FORM
'\ufeaf' # 0x00d2 -> ARABIC LETTER ZAIN ISOLATED FORM
'\ufeb3' # 0x00d3 -> ARABIC LETTER SEEN INITIAL FORM
'\ufeb7' # 0x00d4 -> ARABIC LETTER SHEEN INITIAL FORM
'\ufebb' # 0x00d5 -> ARABIC LETTER SAD INITIAL FORM
'\ufebf' # 0x00d6 -> ARABIC LETTER DAD INITIAL FORM
'\ufec1' # 0x00d7 -> ARABIC LETTER TAH ISOLATED FORM
'\ufec5' # 0x00d8 -> ARABIC LETTER ZAH ISOLATED FORM
'\ufecb' # 0x00d9 -> ARABIC LETTER AIN INITIAL FORM
'\ufecf' # 0x00da -> ARABIC LETTER GHAIN INITIAL FORM
'\xa6' # 0x00db -> BROKEN VERTICAL BAR
'\xac' # 0x00dc -> NOT SIGN
'\xf7' # 0x00dd -> DIVISION SIGN
'\xd7' # 0x00de -> MULTIPLICATION SIGN
'\ufec9' # 0x00df -> ARABIC LETTER AIN ISOLATED FORM
'\u0640' # 0x00e0 -> ARABIC TATWEEL
'\ufed3' # 0x00e1 -> ARABIC LETTER FEH INITIAL FORM
'\ufed7' # 0x00e2 -> ARABIC LETTER QAF INITIAL FORM
'\ufedb' # 0x00e3 -> ARABIC LETTER KAF INITIAL FORM
'\ufedf' # 0x00e4 -> ARABIC LETTER LAM INITIAL FORM
'\ufee3' # 0x00e5 -> ARABIC LETTER MEEM INITIAL FORM
'\ufee7' # 0x00e6 -> ARABIC LETTER NOON INITIAL FORM
'\ufeeb' # 0x00e7 -> ARABIC LETTER HEH INITIAL FORM
'\ufeed' # 0x00e8 -> ARABIC LETTER WAW ISOLATED FORM
'\ufeef' # 0x00e9 -> ARABIC LETTER ALEF MAKSURA ISOLATED FORM
'\ufef3' # 0x00ea -> ARABIC LETTER YEH INITIAL FORM
'\ufebd' # 0x00eb -> ARABIC LETTER DAD ISOLATED FORM
'\ufecc' # 0x00ec -> ARABIC LETTER AIN MEDIAL FORM
'\ufece' # 0x00ed -> ARABIC LETTER GHAIN FINAL FORM
'\ufecd' # 0x00ee -> ARABIC LETTER GHAIN ISOLATED FORM
'\ufee1' # 0x00ef -> ARABIC LETTER MEEM ISOLATED FORM
'\ufe7d' # 0x00f0 -> ARABIC SHADDA MEDIAL FORM
'\u0651' # 0x00f1 -> ARABIC SHADDAH
'\ufee5' # 0x00f2 -> ARABIC LETTER NOON ISOLATED FORM
'\ufee9' # 0x00f3 -> ARABIC LETTER HEH ISOLATED FORM
'\ufeec' # 0x00f4 -> ARABIC LETTER HEH MEDIAL FORM
'\ufef0' # 0x00f5 -> ARABIC LETTER ALEF MAKSURA FINAL FORM
'\ufef2' # 0x00f6 -> ARABIC LETTER YEH FINAL FORM
'\ufed0' # 0x00f7 -> ARABIC LETTER GHAIN MEDIAL FORM
'\ufed5' # 0x00f8 -> ARABIC LETTER QAF ISOLATED FORM
'\ufef5' # 0x00f9 -> ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM
'\ufef6' # 0x00fa -> ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM
'\ufedd' # 0x00fb -> ARABIC LETTER LAM ISOLATED FORM
'\ufed9' # 0x00fc -> ARABIC LETTER KAF ISOLATED FORM
'\ufef1' # 0x00fd -> ARABIC LETTER YEH ISOLATED FORM
'\u25a0' # 0x00fe -> BLACK SQUARE
'\ufffe' # 0x00ff -> UNDEFINED
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00a0, # NON-BREAKING SPACE
0x00a2: 0x00c0, # CENT SIGN
0x00a3: 0x00a3, # POUND SIGN
0x00a4: 0x00a4, # CURRENCY SIGN
0x00a6: 0x00db, # BROKEN VERTICAL BAR
0x00ab: 0x0097, # LEFT POINTING GUILLEMET
0x00ac: 0x00dc, # NOT SIGN
0x00ad: 0x00a1, # SOFT HYPHEN
0x00b0: 0x0080, # DEGREE SIGN
0x00b1: 0x0093, # PLUS-OR-MINUS SIGN
0x00b7: 0x0081, # MIDDLE DOT
0x00bb: 0x0098, # RIGHT POINTING GUILLEMET
0x00bc: 0x0095, # FRACTION 1/4
0x00bd: 0x0094, # FRACTION 1/2
0x00d7: 0x00de, # MULTIPLICATION SIGN
0x00f7: 0x00dd, # DIVISION SIGN
0x03b2: 0x0090, # GREEK SMALL BETA
0x03c6: 0x0092, # GREEK SMALL PHI
0x060c: 0x00ac, # ARABIC COMMA
0x061b: 0x00bb, # ARABIC SEMICOLON
0x061f: 0x00bf, # ARABIC QUESTION MARK
0x0640: 0x00e0, # ARABIC TATWEEL
0x0651: 0x00f1, # ARABIC SHADDAH
0x0660: 0x00b0, # ARABIC-INDIC DIGIT ZERO
0x0661: 0x00b1, # ARABIC-INDIC DIGIT ONE
0x0662: 0x00b2, # ARABIC-INDIC DIGIT TWO
0x0663: 0x00b3, # ARABIC-INDIC DIGIT THREE
0x0664: 0x00b4, # ARABIC-INDIC DIGIT FOUR
0x0665: 0x00b5, # ARABIC-INDIC DIGIT FIVE
0x0666: 0x00b6, # ARABIC-INDIC DIGIT SIX
0x0667: 0x00b7, # ARABIC-INDIC DIGIT SEVEN
0x0668: 0x00b8, # ARABIC-INDIC DIGIT EIGHT
0x0669: 0x00b9, # ARABIC-INDIC DIGIT NINE
0x066a: 0x0025, # ARABIC PERCENT SIGN
0x2219: 0x0082, # BULLET OPERATOR
0x221a: 0x0083, # SQUARE ROOT
0x221e: 0x0091, # INFINITY
0x2248: 0x0096, # ALMOST EQUAL TO
0x2500: 0x0085, # FORMS LIGHT HORIZONTAL
0x2502: 0x0086, # FORMS LIGHT VERTICAL
0x250c: 0x008d, # FORMS LIGHT DOWN AND RIGHT
0x2510: 0x008c, # FORMS LIGHT DOWN AND LEFT
0x2514: 0x008e, # FORMS LIGHT UP AND RIGHT
0x2518: 0x008f, # FORMS LIGHT UP AND LEFT
0x251c: 0x008a, # FORMS LIGHT VERTICAL AND RIGHT
0x2524: 0x0088, # FORMS LIGHT VERTICAL AND LEFT
0x252c: 0x0089, # FORMS LIGHT DOWN AND HORIZONTAL
0x2534: 0x008b, # FORMS LIGHT UP AND HORIZONTAL
0x253c: 0x0087, # FORMS LIGHT VERTICAL AND HORIZONTAL
0x2592: 0x0084, # MEDIUM SHADE
0x25a0: 0x00fe, # BLACK SQUARE
0xfe7d: 0x00f0, # ARABIC SHADDA MEDIAL FORM
0xfe80: 0x00c1, # ARABIC LETTER HAMZA ISOLATED FORM
0xfe81: 0x00c2, # ARABIC LETTER ALEF WITH MADDA ABOVE ISOLATED FORM
0xfe82: 0x00a2, # ARABIC LETTER ALEF WITH MADDA ABOVE FINAL FORM
0xfe83: 0x00c3, # ARABIC LETTER ALEF WITH HAMZA ABOVE ISOLATED FORM
0xfe84: 0x00a5, # ARABIC LETTER ALEF WITH HAMZA ABOVE FINAL FORM
0xfe85: 0x00c4, # ARABIC LETTER WAW WITH HAMZA ABOVE ISOLATED FORM
0xfe8b: 0x00c6, # ARABIC LETTER YEH WITH HAMZA ABOVE INITIAL FORM
0xfe8d: 0x00c7, # ARABIC LETTER ALEF ISOLATED FORM
0xfe8e: 0x00a8, # ARABIC LETTER ALEF FINAL FORM
0xfe8f: 0x00a9, # ARABIC LETTER BEH ISOLATED FORM
0xfe91: 0x00c8, # ARABIC LETTER BEH INITIAL FORM
0xfe93: 0x00c9, # ARABIC LETTER TEH MARBUTA ISOLATED FORM
0xfe95: 0x00aa, # ARABIC LETTER TEH ISOLATED FORM
0xfe97: 0x00ca, # ARABIC LETTER TEH INITIAL FORM
0xfe99: 0x00ab, # ARABIC LETTER THEH ISOLATED FORM
0xfe9b: 0x00cb, # ARABIC LETTER THEH INITIAL FORM
0xfe9d: 0x00ad, # ARABIC LETTER JEEM ISOLATED FORM
0xfe9f: 0x00cc, # ARABIC LETTER JEEM INITIAL FORM
0xfea1: 0x00ae, # ARABIC LETTER HAH ISOLATED FORM
0xfea3: 0x00cd, # ARABIC LETTER HAH INITIAL FORM
0xfea5: 0x00af, # ARABIC LETTER KHAH ISOLATED FORM
0xfea7: 0x00ce, # ARABIC LETTER KHAH INITIAL FORM
0xfea9: 0x00cf, # ARABIC LETTER DAL ISOLATED FORM
0xfeab: 0x00d0, # ARABIC LETTER THAL ISOLATED FORM
0xfead: 0x00d1, # ARABIC LETTER REH ISOLATED FORM
0xfeaf: 0x00d2, # ARABIC LETTER ZAIN ISOLATED FORM
0xfeb1: 0x00bc, # ARABIC LETTER SEEN ISOLATED FORM
0xfeb3: 0x00d3, # ARABIC LETTER SEEN INITIAL FORM
0xfeb5: 0x00bd, # ARABIC LETTER SHEEN ISOLATED FORM
0xfeb7: 0x00d4, # ARABIC LETTER SHEEN INITIAL FORM
0xfeb9: 0x00be, # ARABIC LETTER SAD ISOLATED FORM
0xfebb: 0x00d5, # ARABIC LETTER SAD INITIAL FORM
0xfebd: 0x00eb, # ARABIC LETTER DAD ISOLATED FORM
0xfebf: 0x00d6, # ARABIC LETTER DAD INITIAL FORM
0xfec1: 0x00d7, # ARABIC LETTER TAH ISOLATED FORM
0xfec5: 0x00d8, # ARABIC LETTER ZAH ISOLATED FORM
0xfec9: 0x00df, # ARABIC LETTER AIN ISOLATED FORM
0xfeca: 0x00c5, # ARABIC LETTER AIN FINAL FORM
0xfecb: 0x00d9, # ARABIC LETTER AIN INITIAL FORM
0xfecc: 0x00ec, # ARABIC LETTER AIN MEDIAL FORM
0xfecd: 0x00ee, # ARABIC LETTER GHAIN ISOLATED FORM
0xfece: 0x00ed, # ARABIC LETTER GHAIN FINAL FORM
0xfecf: 0x00da, # ARABIC LETTER GHAIN INITIAL FORM
0xfed0: 0x00f7, # ARABIC LETTER GHAIN MEDIAL FORM
0xfed1: 0x00ba, # ARABIC LETTER FEH ISOLATED FORM
0xfed3: 0x00e1, # ARABIC LETTER FEH INITIAL FORM
0xfed5: 0x00f8, # ARABIC LETTER QAF ISOLATED FORM
0xfed7: 0x00e2, # ARABIC LETTER QAF INITIAL FORM
0xfed9: 0x00fc, # ARABIC LETTER KAF ISOLATED FORM
0xfedb: 0x00e3, # ARABIC LETTER KAF INITIAL FORM
0xfedd: 0x00fb, # ARABIC LETTER LAM ISOLATED FORM
0xfedf: 0x00e4, # ARABIC LETTER LAM INITIAL FORM
0xfee1: 0x00ef, # ARABIC LETTER MEEM ISOLATED FORM
0xfee3: 0x00e5, # ARABIC LETTER MEEM INITIAL FORM
0xfee5: 0x00f2, # ARABIC LETTER NOON ISOLATED FORM
0xfee7: 0x00e6, # ARABIC LETTER NOON INITIAL FORM
0xfee9: 0x00f3, # ARABIC LETTER HEH ISOLATED FORM
0xfeeb: 0x00e7, # ARABIC LETTER HEH INITIAL FORM
0xfeec: 0x00f4, # ARABIC LETTER HEH MEDIAL FORM
0xfeed: 0x00e8, # ARABIC LETTER WAW ISOLATED FORM
0xfeef: 0x00e9, # ARABIC LETTER ALEF MAKSURA ISOLATED FORM
0xfef0: 0x00f5, # ARABIC LETTER ALEF MAKSURA FINAL FORM
0xfef1: 0x00fd, # ARABIC LETTER YEH ISOLATED FORM
0xfef2: 0x00f6, # ARABIC LETTER YEH FINAL FORM
0xfef3: 0x00ea, # ARABIC LETTER YEH INITIAL FORM
0xfef5: 0x00f9, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE ISOLATED FORM
0xfef6: 0x00fa, # ARABIC LIGATURE LAM WITH ALEF WITH MADDA ABOVE FINAL FORM
0xfef7: 0x0099, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE ISOLATED FORM
0xfef8: 0x009a, # ARABIC LIGATURE LAM WITH ALEF WITH HAMZA ABOVE FINAL FORM
0xfefb: 0x009d, # ARABIC LIGATURE LAM WITH ALEF ISOLATED FORM
0xfefc: 0x009e, # ARABIC LIGATURE LAM WITH ALEF FINAL FORM
}
| lgpl-3.0 |
vrv/tensorflow | tensorflow/contrib/tensor_forest/hybrid/python/kernel_tests/k_feature_routing_function_op_test.py | 103 | 3383 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the routing function op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.tensor_forest.hybrid.ops import gen_training_ops
from tensorflow.contrib.tensor_forest.hybrid.python.ops import training_ops
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class KFeatureRoutingFunctionTest(test_util.TensorFlowTestCase):
def setUp(self):
self.input_data = [[-1., 0.], [-1., 2.],
[1., 0.], [1., -2.]]
self.input_labels = [0., 1., 2., 3.]
self.tree = [[1, 0], [-1, 0], [-1, 0]]
self.tree_weights = [[1.0, 0.0], [1.0, 0.0], [1.0, 0.0]]
self.tree_thresholds = [0., 0., 0.]
self.ops = training_ops.Load()
self.params = tensor_forest.ForestHParams(
num_features=2,
hybrid_tree_depth=2,
base_random_seed=10,
feature_bagging_fraction=1.0,
regularization_strength=0.01,
regularization="",
weight_init_mean=0.0,
weight_init_std=0.1)
self.params.num_nodes = 2**self.params.hybrid_tree_depth - 1
self.params.num_leaves = 2**(self.params.hybrid_tree_depth - 1)
self.params.num_features_per_node = (
self.params.feature_bagging_fraction * self.params.num_features)
self.params.regression = False
def testParams(self):
self.assertEquals(self.params.num_nodes, 3)
self.assertEquals(self.params.num_features, 2)
self.assertEquals(self.params.num_features_per_node, 2)
def testRoutingFunction(self):
with self.test_session():
route_tensor = gen_training_ops.k_feature_routing_function(
self.input_data,
self.tree_weights,
self.tree_thresholds,
max_nodes=self.params.num_nodes,
num_features_per_node=self.params.num_features_per_node,
layer_num=0,
random_seed=self.params.base_random_seed)
route_tensor_shape = route_tensor.get_shape()
self.assertEquals(len(route_tensor_shape), 2)
self.assertEquals(route_tensor_shape[0], 4)
self.assertEquals(route_tensor_shape[1], 3)
routes = route_tensor.eval()
print(routes)
# Point 1
# Node 1 is a decision node => probability = 1.0
self.assertAlmostEquals(1.0, routes[0, 0])
# Probability left output = 1.0 / (1.0 + exp(1.0)) = 0.26894142
self.assertAlmostEquals(0.26894142, routes[0, 1])
# Probability right = 1 - 0.2689414 = 0.73105858
self.assertAlmostEquals(0.73105858, routes[0, 2])
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
Trixter69/BookMarka | lib/cherrypy/_cptools.py | 55 | 19093 | """CherryPy tools. A "tool" is any helper, adapted to CP.
Tools are usually designed to be used in a variety of ways (although some
may only offer one if they choose):
Library calls
All tools are callables that can be used wherever needed.
The arguments are straightforward and should be detailed within the
docstring.
Function decorators
All tools, when called, may be used as decorators which configure
individual CherryPy page handlers (methods on the CherryPy tree).
That is, "@tools.anytool()" should "turn on" the tool via the
decorated function's _cp_config attribute.
CherryPy config
If a tool exposes a "_setup" callable, it will be called
once per Request (if the feature is "turned on" via config).
Tools may be implemented as any object with a namespace. The builtins
are generally either modules or instances of the tools.Tool class.
"""
import sys
import warnings
import cherrypy
def _getargs(func):
"""Return the names of all static arguments to the given function."""
# Use this instead of importing inspect for less mem overhead.
import types
if sys.version_info >= (3, 0):
if isinstance(func, types.MethodType):
func = func.__func__
co = func.__code__
else:
if isinstance(func, types.MethodType):
func = func.im_func
co = func.func_code
return co.co_varnames[:co.co_argcount]
_attr_error = (
"CherryPy Tools cannot be turned on directly. Instead, turn them "
"on via config, or use them as decorators on your page handlers."
)
class Tool(object):
"""A registered function for use with CherryPy request-processing hooks.
help(tool.callable) should give you more information about this Tool.
"""
namespace = "tools"
def __init__(self, point, callable, name=None, priority=50):
self._point = point
self.callable = callable
self._name = name
self._priority = priority
self.__doc__ = self.callable.__doc__
self._setargs()
def _get_on(self):
raise AttributeError(_attr_error)
def _set_on(self, value):
raise AttributeError(_attr_error)
on = property(_get_on, _set_on)
def _setargs(self):
"""Copy func parameter names to obj attributes."""
try:
for arg in _getargs(self.callable):
setattr(self, arg, None)
except (TypeError, AttributeError):
if hasattr(self.callable, "__call__"):
for arg in _getargs(self.callable.__call__):
setattr(self, arg, None)
# IronPython 1.0 raises NotImplementedError because
# inspect.getargspec tries to access Python bytecode
# in co_code attribute.
except NotImplementedError:
pass
# IronPython 1B1 may raise IndexError in some cases,
# but if we trap it here it doesn't prevent CP from working.
except IndexError:
pass
def _merged_args(self, d=None):
"""Return a dict of configuration entries for this Tool."""
if d:
conf = d.copy()
else:
conf = {}
tm = cherrypy.serving.request.toolmaps[self.namespace]
if self._name in tm:
conf.update(tm[self._name])
if "on" in conf:
del conf["on"]
return conf
def __call__(self, *args, **kwargs):
"""Compile-time decorator (turn on the tool in config).
For example::
@tools.proxy()
def whats_my_base(self):
return cherrypy.request.base
whats_my_base.exposed = True
"""
if args:
raise TypeError("The %r Tool does not accept positional "
"arguments; you must use keyword arguments."
% self._name)
def tool_decorator(f):
if not hasattr(f, "_cp_config"):
f._cp_config = {}
subspace = self.namespace + "." + self._name + "."
f._cp_config[subspace + "on"] = True
for k, v in kwargs.items():
f._cp_config[subspace + k] = v
return f
return tool_decorator
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call this
method when the tool is "turned on" in config.
"""
conf = self._merged_args()
p = conf.pop("priority", None)
if p is None:
p = getattr(self.callable, "priority", self._priority)
cherrypy.serving.request.hooks.attach(self._point, self.callable,
priority=p, **conf)
class HandlerTool(Tool):
"""Tool which is called 'before main', that may skip normal handlers.
If the tool successfully handles the request (by setting response.body),
if should return True. This will cause CherryPy to skip any 'normal' page
handler. If the tool did not handle the request, it should return False
to tell CherryPy to continue on and call the normal page handler. If the
tool is declared AS a page handler (see the 'handler' method), returning
False will raise NotFound.
"""
def __init__(self, callable, name=None):
Tool.__init__(self, 'before_handler', callable, name)
def handler(self, *args, **kwargs):
"""Use this tool as a CherryPy page handler.
For example::
class Root:
nav = tools.staticdir.handler(section="/nav", dir="nav",
root=absDir)
"""
def handle_func(*a, **kw):
handled = self.callable(*args, **self._merged_args(kwargs))
if not handled:
raise cherrypy.NotFound()
return cherrypy.serving.response.body
handle_func.exposed = True
return handle_func
def _wrapper(self, **kwargs):
if self.callable(**kwargs):
cherrypy.serving.request.handler = None
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call this
method when the tool is "turned on" in config.
"""
conf = self._merged_args()
p = conf.pop("priority", None)
if p is None:
p = getattr(self.callable, "priority", self._priority)
cherrypy.serving.request.hooks.attach(self._point, self._wrapper,
priority=p, **conf)
class HandlerWrapperTool(Tool):
"""Tool which wraps request.handler in a provided wrapper function.
The 'newhandler' arg must be a handler wrapper function that takes a
'next_handler' argument, plus ``*args`` and ``**kwargs``. Like all
page handler
functions, it must return an iterable for use as cherrypy.response.body.
For example, to allow your 'inner' page handlers to return dicts
which then get interpolated into a template::
def interpolator(next_handler, *args, **kwargs):
filename = cherrypy.request.config.get('template')
cherrypy.response.template = env.get_template(filename)
response_dict = next_handler(*args, **kwargs)
return cherrypy.response.template.render(**response_dict)
cherrypy.tools.jinja = HandlerWrapperTool(interpolator)
"""
def __init__(self, newhandler, point='before_handler', name=None,
priority=50):
self.newhandler = newhandler
self._point = point
self._name = name
self._priority = priority
def callable(self, *args, **kwargs):
innerfunc = cherrypy.serving.request.handler
def wrap(*args, **kwargs):
return self.newhandler(innerfunc, *args, **kwargs)
cherrypy.serving.request.handler = wrap
class ErrorTool(Tool):
"""Tool which is used to replace the default request.error_response."""
def __init__(self, callable, name=None):
Tool.__init__(self, None, callable, name)
def _wrapper(self):
self.callable(**self._merged_args())
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call this
method when the tool is "turned on" in config.
"""
cherrypy.serving.request.error_response = self._wrapper
# Builtin tools #
from cherrypy.lib import cptools, encoding, auth, static, jsontools
from cherrypy.lib import sessions as _sessions, xmlrpcutil as _xmlrpc
from cherrypy.lib import caching as _caching
from cherrypy.lib import auth_basic, auth_digest
class SessionTool(Tool):
"""Session Tool for CherryPy.
sessions.locking
When 'implicit' (the default), the session will be locked for you,
just before running the page handler.
When 'early', the session will be locked before reading the request
body. This is off by default for safety reasons; for example,
a large upload would block the session, denying an AJAX
progress meter
(`issue <https://bitbucket.org/cherrypy/cherrypy/issue/630>`_).
When 'explicit' (or any other value), you need to call
cherrypy.session.acquire_lock() yourself before using
session data.
"""
def __init__(self):
# _sessions.init must be bound after headers are read
Tool.__init__(self, 'before_request_body', _sessions.init)
def _lock_session(self):
cherrypy.serving.session.acquire_lock()
def _setup(self):
"""Hook this tool into cherrypy.request.
The standard CherryPy request object will automatically call this
method when the tool is "turned on" in config.
"""
hooks = cherrypy.serving.request.hooks
conf = self._merged_args()
p = conf.pop("priority", None)
if p is None:
p = getattr(self.callable, "priority", self._priority)
hooks.attach(self._point, self.callable, priority=p, **conf)
locking = conf.pop('locking', 'implicit')
if locking == 'implicit':
hooks.attach('before_handler', self._lock_session)
elif locking == 'early':
# Lock before the request body (but after _sessions.init runs!)
hooks.attach('before_request_body', self._lock_session,
priority=60)
else:
# Don't lock
pass
hooks.attach('before_finalize', _sessions.save)
hooks.attach('on_end_request', _sessions.close)
def regenerate(self):
"""Drop the current session and make a new one (with a new id)."""
sess = cherrypy.serving.session
sess.regenerate()
# Grab cookie-relevant tool args
conf = dict([(k, v) for k, v in self._merged_args().items()
if k in ('path', 'path_header', 'name', 'timeout',
'domain', 'secure')])
_sessions.set_response_cookie(**conf)
class XMLRPCController(object):
"""A Controller (page handler collection) for XML-RPC.
To use it, have your controllers subclass this base class (it will
turn on the tool for you).
You can also supply the following optional config entries::
tools.xmlrpc.encoding: 'utf-8'
tools.xmlrpc.allow_none: 0
XML-RPC is a rather discontinuous layer over HTTP; dispatching to the
appropriate handler must first be performed according to the URL, and
then a second dispatch step must take place according to the RPC method
specified in the request body. It also allows a superfluous "/RPC2"
prefix in the URL, supplies its own handler args in the body, and
requires a 200 OK "Fault" response instead of 404 when the desired
method is not found.
Therefore, XML-RPC cannot be implemented for CherryPy via a Tool alone.
This Controller acts as the dispatch target for the first half (based
on the URL); it then reads the RPC method from the request body and
does its own second dispatch step based on that method. It also reads
body params, and returns a Fault on error.
The XMLRPCDispatcher strips any /RPC2 prefix; if you aren't using /RPC2
in your URL's, you can safely skip turning on the XMLRPCDispatcher.
Otherwise, you need to use declare it in config::
request.dispatch: cherrypy.dispatch.XMLRPCDispatcher()
"""
# Note we're hard-coding this into the 'tools' namespace. We could do
# a huge amount of work to make it relocatable, but the only reason why
# would be if someone actually disabled the default_toolbox. Meh.
_cp_config = {'tools.xmlrpc.on': True}
def default(self, *vpath, **params):
rpcparams, rpcmethod = _xmlrpc.process_body()
subhandler = self
for attr in str(rpcmethod).split('.'):
subhandler = getattr(subhandler, attr, None)
if subhandler and getattr(subhandler, "exposed", False):
body = subhandler(*(vpath + rpcparams), **params)
else:
# https://bitbucket.org/cherrypy/cherrypy/issue/533
# if a method is not found, an xmlrpclib.Fault should be returned
# raising an exception here will do that; see
# cherrypy.lib.xmlrpcutil.on_error
raise Exception('method "%s" is not supported' % attr)
conf = cherrypy.serving.request.toolmaps['tools'].get("xmlrpc", {})
_xmlrpc.respond(body,
conf.get('encoding', 'utf-8'),
conf.get('allow_none', 0))
return cherrypy.serving.response.body
default.exposed = True
class SessionAuthTool(HandlerTool):
def _setargs(self):
for name in dir(cptools.SessionAuth):
if not name.startswith("__"):
setattr(self, name, None)
class CachingTool(Tool):
"""Caching Tool for CherryPy."""
def _wrapper(self, **kwargs):
request = cherrypy.serving.request
if _caching.get(**kwargs):
request.handler = None
else:
if request.cacheable:
# Note the devious technique here of adding hooks on the fly
request.hooks.attach('before_finalize', _caching.tee_output,
priority=90)
_wrapper.priority = 20
def _setup(self):
"""Hook caching into cherrypy.request."""
conf = self._merged_args()
p = conf.pop("priority", None)
cherrypy.serving.request.hooks.attach('before_handler', self._wrapper,
priority=p, **conf)
class Toolbox(object):
"""A collection of Tools.
This object also functions as a config namespace handler for itself.
Custom toolboxes should be added to each Application's toolboxes dict.
"""
def __init__(self, namespace):
self.namespace = namespace
def __setattr__(self, name, value):
# If the Tool._name is None, supply it from the attribute name.
if isinstance(value, Tool):
if value._name is None:
value._name = name
value.namespace = self.namespace
object.__setattr__(self, name, value)
def __enter__(self):
"""Populate request.toolmaps from tools specified in config."""
cherrypy.serving.request.toolmaps[self.namespace] = map = {}
def populate(k, v):
toolname, arg = k.split(".", 1)
bucket = map.setdefault(toolname, {})
bucket[arg] = v
return populate
def __exit__(self, exc_type, exc_val, exc_tb):
"""Run tool._setup() for each tool in our toolmap."""
map = cherrypy.serving.request.toolmaps.get(self.namespace)
if map:
for name, settings in map.items():
if settings.get("on", False):
tool = getattr(self, name)
tool._setup()
class DeprecatedTool(Tool):
_name = None
warnmsg = "This Tool is deprecated."
def __init__(self, point, warnmsg=None):
self.point = point
if warnmsg is not None:
self.warnmsg = warnmsg
def __call__(self, *args, **kwargs):
warnings.warn(self.warnmsg)
def tool_decorator(f):
return f
return tool_decorator
def _setup(self):
warnings.warn(self.warnmsg)
default_toolbox = _d = Toolbox("tools")
_d.session_auth = SessionAuthTool(cptools.session_auth)
_d.allow = Tool('on_start_resource', cptools.allow)
_d.proxy = Tool('before_request_body', cptools.proxy, priority=30)
_d.response_headers = Tool('on_start_resource', cptools.response_headers)
_d.log_tracebacks = Tool('before_error_response', cptools.log_traceback)
_d.log_headers = Tool('before_error_response', cptools.log_request_headers)
_d.log_hooks = Tool('on_end_request', cptools.log_hooks, priority=100)
_d.err_redirect = ErrorTool(cptools.redirect)
_d.etags = Tool('before_finalize', cptools.validate_etags, priority=75)
_d.decode = Tool('before_request_body', encoding.decode)
# the order of encoding, gzip, caching is important
_d.encode = Tool('before_handler', encoding.ResponseEncoder, priority=70)
_d.gzip = Tool('before_finalize', encoding.gzip, priority=80)
_d.staticdir = HandlerTool(static.staticdir)
_d.staticfile = HandlerTool(static.staticfile)
_d.sessions = SessionTool()
_d.xmlrpc = ErrorTool(_xmlrpc.on_error)
_d.caching = CachingTool('before_handler', _caching.get, 'caching')
_d.expires = Tool('before_finalize', _caching.expires)
_d.tidy = DeprecatedTool(
'before_finalize',
"The tidy tool has been removed from the standard distribution of "
"CherryPy. The most recent version can be found at "
"http://tools.cherrypy.org/browser.")
_d.nsgmls = DeprecatedTool(
'before_finalize',
"The nsgmls tool has been removed from the standard distribution of "
"CherryPy. The most recent version can be found at "
"http://tools.cherrypy.org/browser.")
_d.ignore_headers = Tool('before_request_body', cptools.ignore_headers)
_d.referer = Tool('before_request_body', cptools.referer)
_d.basic_auth = Tool('on_start_resource', auth.basic_auth)
_d.digest_auth = Tool('on_start_resource', auth.digest_auth)
_d.trailing_slash = Tool('before_handler', cptools.trailing_slash, priority=60)
_d.flatten = Tool('before_finalize', cptools.flatten)
_d.accept = Tool('on_start_resource', cptools.accept)
_d.redirect = Tool('on_start_resource', cptools.redirect)
_d.autovary = Tool('on_start_resource', cptools.autovary, priority=0)
_d.json_in = Tool('before_request_body', jsontools.json_in, priority=30)
_d.json_out = Tool('before_handler', jsontools.json_out, priority=30)
_d.auth_basic = Tool('before_handler', auth_basic.basic_auth, priority=1)
_d.auth_digest = Tool('before_handler', auth_digest.digest_auth, priority=1)
del _d, cptools, encoding, auth, static
| gpl-3.0 |
DARKPOP/external_chromium_org_third_party_skia | gm/rebaseline_server/download_actuals_test.py | 20 | 1491 | #!/usr/bin/python
"""
Copyright 2014 Google Inc.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
Test download.py
TODO(epoger): Create a command to update the expected results (in
self._output_dir_expected) when appropriate. For now, you should:
1. examine the results in self.output_dir_actual and make sure they are ok
2. rm -rf self._output_dir_expected
3. mv self.output_dir_actual self._output_dir_expected
Although, if you're using an SVN checkout, this will blow away .svn directories
within self._output_dir_expected, which wouldn't be good...
"""
# System-level imports
import os
# Must fix up PYTHONPATH before importing from within Skia
import rs_fixpypath # pylint: disable=W0611
# Imports from within Skia
from py.utils import url_utils
import base_unittest
import download_actuals
class DownloadTest(base_unittest.TestCase):
def test_fetch(self):
"""Tests fetch() of GM results from actual-results.json ."""
downloader = download_actuals.Download(
actuals_base_url=url_utils.create_filepath_url(
os.path.join(self.input_dir, 'gm-actuals')),
gm_actuals_root_url=url_utils.create_filepath_url(
os.path.join(self.input_dir, 'fake-gm-imagefiles')))
downloader.fetch(
builder_name='Test-Android-GalaxyNexus-SGX540-Arm7-Release',
dest_dir=self.output_dir_actual)
def main():
base_unittest.main(DownloadTest)
if __name__ == '__main__':
main()
| bsd-3-clause |
piiswrong/mxnet | python/mxnet/gluon/model_zoo/vision/densenet.py | 10 | 7848 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable= arguments-differ
"""DenseNet, implemented in Gluon."""
__all__ = ['DenseNet', 'densenet121', 'densenet161', 'densenet169', 'densenet201']
from ....context import cpu
from ...block import HybridBlock
from ... import nn
from ..custom_layers import HybridConcurrent, Identity
# Helpers
def _make_dense_block(num_layers, bn_size, growth_rate, dropout, stage_index):
out = nn.HybridSequential(prefix='stage%d_'%stage_index)
with out.name_scope():
for _ in range(num_layers):
out.add(_make_dense_layer(growth_rate, bn_size, dropout))
return out
def _make_dense_layer(growth_rate, bn_size, dropout):
new_features = nn.HybridSequential(prefix='')
new_features.add(nn.BatchNorm())
new_features.add(nn.Activation('relu'))
new_features.add(nn.Conv2D(bn_size * growth_rate, kernel_size=1, use_bias=False))
new_features.add(nn.BatchNorm())
new_features.add(nn.Activation('relu'))
new_features.add(nn.Conv2D(growth_rate, kernel_size=3, padding=1, use_bias=False))
if dropout:
new_features.add(nn.Dropout(dropout))
out = HybridConcurrent(concat_dim=1, prefix='')
out.add(Identity())
out.add(new_features)
return out
def _make_transition(num_output_features):
out = nn.HybridSequential(prefix='')
out.add(nn.BatchNorm())
out.add(nn.Activation('relu'))
out.add(nn.Conv2D(num_output_features, kernel_size=1, use_bias=False))
out.add(nn.AvgPool2D(pool_size=2, strides=2))
return out
# Net
class DenseNet(HybridBlock):
r"""Densenet-BC model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
num_init_features : int
Number of filters to learn in the first convolution layer.
growth_rate : int
Number of filters to add each layer (`k` in the paper).
block_config : list of int
List of integers for numbers of layers in each pooling block.
bn_size : int, default 4
Multiplicative factor for number of bottle neck layers.
(i.e. bn_size * k features in the bottleneck layer)
dropout : float, default 0
Rate of dropout after each dense layer.
classes : int, default 1000
Number of classification classes.
"""
def __init__(self, num_init_features, growth_rate, block_config,
bn_size=4, dropout=0, classes=1000, **kwargs):
super(DenseNet, self).__init__(**kwargs)
with self.name_scope():
self.features = nn.HybridSequential(prefix='')
self.features.add(nn.Conv2D(num_init_features, kernel_size=7,
strides=2, padding=3, use_bias=False))
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.MaxPool2D(pool_size=3, strides=2, padding=1))
# Add dense blocks
num_features = num_init_features
for i, num_layers in enumerate(block_config):
self.features.add(_make_dense_block(num_layers, bn_size, growth_rate, dropout, i+1))
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
self.features.add(_make_transition(num_features // 2))
num_features = num_features // 2
self.features.add(nn.BatchNorm())
self.features.add(nn.Activation('relu'))
self.features.add(nn.AvgPool2D(pool_size=7))
self.features.add(nn.Flatten())
self.output = nn.Dense(classes)
def hybrid_forward(self, F, x):
x = self.features(x)
x = self.output(x)
return x
# Specification
densenet_spec = {121: (64, 32, [6, 12, 24, 16]),
161: (96, 48, [6, 12, 36, 24]),
169: (64, 32, [6, 12, 32, 32]),
201: (64, 32, [6, 12, 48, 32])}
# Constructor
def get_densenet(num_layers, pretrained=False, ctx=cpu(), root='~/.mxnet/models', **kwargs):
r"""Densenet-BC model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
num_layers : int
Number of layers for the variant of densenet. Options are 121, 161, 169, 201.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
num_init_features, growth_rate, block_config = densenet_spec[num_layers]
net = DenseNet(num_init_features, growth_rate, block_config, **kwargs)
if pretrained:
from ..model_store import get_model_file
net.load_params(get_model_file('densenet%d'%(num_layers), root=root), ctx=ctx)
return net
def densenet121(**kwargs):
r"""Densenet-BC 121-layer model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(121, **kwargs)
def densenet161(**kwargs):
r"""Densenet-BC 161-layer model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(161, **kwargs)
def densenet169(**kwargs):
r"""Densenet-BC 169-layer model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(169, **kwargs)
def densenet201(**kwargs):
r"""Densenet-BC 201-layer model from the
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_ paper.
Parameters
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default '~/.mxnet/models'
Location for keeping the model parameters.
"""
return get_densenet(201, **kwargs)
| apache-2.0 |
catapult-project/catapult | third_party/gsutil/third_party/pyu2f/pyu2f/convenience/baseauthenticator.py | 13 | 2202 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interface to handle end to end flow of U2F signing."""
import sys
class BaseAuthenticator(object):
"""Interface to handle end to end flow of U2F signing."""
def Authenticate(self, app_id, challenge_data,
print_callback=sys.stderr.write):
"""Authenticates app_id with a security key.
Executes the U2F authentication/signature flow with a security key.
Args:
app_id: The app_id to register the security key against.
challenge_data: List of dictionaries containing a RegisteredKey ('key')
and the raw challenge data ('challenge') for this key.
print_callback: Callback to print a message to the user. The callback
function takes one argument--the message to display.
Returns:
A dictionary with the following fields:
'clientData': url-safe base64 encoded ClientData JSON signed by the key.
'signatureData': url-safe base64 encoded signature.
'applicationId': application id.
'keyHandle': url-safe base64 encoded handle of the key used to sign.
Raises:
U2FError: There was some kind of problem with registration (e.g.
the device was already registered or there was a timeout waiting
for the test of user presence).
"""
raise NotImplementedError
def IsAvailable(self):
"""Indicates whether the authenticator implementation is available to sign.
The caller should not call Authenticate() if IsAvailable() returns False
Returns:
True if the authenticator is available to sign and False otherwise.
"""
raise NotImplementedError
| bsd-3-clause |
jreut/Switcharoo | scraper/scraper/entryqueue.py | 2 | 2041 | # Copyright 2015 Adam Greenstein <[email protected]>
#
# Switcharoo Cartographer is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Switcharoo Cartographer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Switcharoo Cartographer. If not, see <http://www.gnu.org/licenses/>.
from data import Access, Entry, EntryError
from Queue import Queue
class EntryQueue(Queue):
def __init__(self, transverse, maxsize=0):
self.reddit = transverse.reddit
self.events = transverse.events
Queue.__init__(self, maxsize)
def _init(self, maxsize):
Queue._init(self, maxsize)
nodes = Access(self.events).get_entry(maxsize)
for node in nodes:
try:
self.queue.append(Entry(node['raw_url'], self.reddit))
self.events.on_adding_to_queue(node['raw_url'])
except EntryError:
# TODO Remove old entry from DB
pass
def _put(self, url):
try:
entry = Entry(url, self.reddit)
if self._is_unique(entry):
self.events.on_adding_to_queue(url)
self.queue.append(entry)
else:
self.events.on_not_adding_to_queue(url)
except EntryError:
self.events.on_not_adding_to_queue(url)
def _get(self):
return self.queue.popleft()
def _is_unique(self, entry):
# TODO Logic here to determine if new url found
if entry not in self.queue:
return Access(self.events).is_unique_entry(entry)
else:
return False | gpl-3.0 |
sunshine027/mic | mic/utils/misc.py | 5 | 35129 | #!/usr/bin/python -tt
#
# Copyright (c) 2010, 2011 Intel Inc.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from __future__ import with_statement
import os
import sys
import time
import tempfile
import re
import shutil
import glob
import hashlib
import subprocess
import platform
import traceback
try:
import sqlite3 as sqlite
except ImportError:
import sqlite
try:
from xml.etree import cElementTree
except ImportError:
import cElementTree
xmlparse = cElementTree.parse
from mic import msger
from mic.utils.errors import CreatorError, SquashfsError
from mic.utils.fs_related import find_binary_path, makedirs
from mic.utils.grabber import myurlgrab
from mic.utils.proxy import get_proxy_for
from mic.utils import runner
from mic.utils import rpmmisc
from mic.utils.safeurl import SafeURL
RPM_RE = re.compile("(.*)\.(.*) (.*)-(.*)")
RPM_FMT = "%(name)s.%(arch)s %(version)s-%(release)s"
SRPM_RE = re.compile("(.*)-(\d+.*)-(\d+\.\d+).src.rpm")
def build_name(kscfg, release=None, prefix = None, suffix = None):
"""Construct and return an image name string.
This is a utility function to help create sensible name and fslabel
strings. The name is constructed using the sans-prefix-and-extension
kickstart filename and the supplied prefix and suffix.
kscfg -- a path to a kickstart file
release -- a replacement to suffix for image release
prefix -- a prefix to prepend to the name; defaults to None, which causes
no prefix to be used
suffix -- a suffix to append to the name; defaults to None, which causes
a YYYYMMDDHHMM suffix to be used
Note, if maxlen is less then the len(suffix), you get to keep both pieces.
"""
name = os.path.basename(kscfg)
idx = name.rfind('.')
if idx >= 0:
name = name[:idx]
if release is not None:
suffix = ""
if prefix is None:
prefix = ""
if suffix is None:
suffix = time.strftime("%Y%m%d%H%M")
if name.startswith(prefix):
name = name[len(prefix):]
prefix = "%s-" % prefix if prefix else ""
suffix = "-%s" % suffix if suffix else ""
ret = prefix + name + suffix
return ret
def get_distro():
"""Detect linux distribution, support "meego"
"""
support_dists = ('SuSE',
'debian',
'fedora',
'redhat',
'centos',
'meego',
'moblin',
'tizen')
try:
(dist, ver, id) = platform.linux_distribution( \
supported_dists = support_dists)
except:
(dist, ver, id) = platform.dist( \
supported_dists = support_dists)
return (dist, ver, id)
def get_hostname():
"""Get hostname
"""
return platform.node()
def get_hostname_distro_str():
"""Get composited string for current linux distribution
"""
(dist, ver, id) = get_distro()
hostname = get_hostname()
if not dist:
return "%s(Unknown Linux Distribution)" % hostname
else:
distro_str = ' '.join(map(str.strip, (hostname, dist, ver, id)))
return distro_str.strip()
_LOOP_RULE_PTH = None
def hide_loopdev_presentation():
udev_rules = "80-prevent-loop-present.rules"
udev_rules_dir = [
'/usr/lib/udev/rules.d/',
'/lib/udev/rules.d/',
'/etc/udev/rules.d/'
]
global _LOOP_RULE_PTH
for rdir in udev_rules_dir:
if os.path.exists(rdir):
_LOOP_RULE_PTH = os.path.join(rdir, udev_rules)
if not _LOOP_RULE_PTH:
return
try:
with open(_LOOP_RULE_PTH, 'w') as wf:
wf.write('KERNEL=="loop*", ENV{UDISKS_PRESENTATION_HIDE}="1"')
runner.quiet('udevadm trigger')
except:
pass
def unhide_loopdev_presentation():
global _LOOP_RULE_PTH
if not _LOOP_RULE_PTH:
return
try:
os.unlink(_LOOP_RULE_PTH)
runner.quiet('udevadm trigger')
except:
pass
def extract_rpm(rpmfile, targetdir):
rpm2cpio = find_binary_path("rpm2cpio")
cpio = find_binary_path("cpio")
olddir = os.getcwd()
os.chdir(targetdir)
msger.verbose("Extract rpm file with cpio: %s" % rpmfile)
p1 = subprocess.Popen([rpm2cpio, rpmfile], stdout=subprocess.PIPE)
p2 = subprocess.Popen([cpio, "-idv"], stdin=p1.stdout,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p1.stdout.close()
(sout, serr) = p2.communicate()
msger.verbose(sout or serr)
os.chdir(olddir)
def human_size(size):
"""Return human readable string for Bytes size
"""
if size <= 0:
return "0M"
import math
measure = ['B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y']
expo = int(math.log(size, 1024))
mant = float(size/math.pow(1024, expo))
return "{0:.1f}{1:s}".format(mant, measure[expo])
def get_block_size(file_obj):
""" Returns block size for file object 'file_obj'. Errors are indicated by
the 'IOError' exception. """
from fcntl import ioctl
import struct
# Get the block size of the host file-system for the image file by calling
# the FIGETBSZ ioctl (number 2).
binary_data = ioctl(file_obj, 2, struct.pack('I', 0))
return struct.unpack('I', binary_data)[0]
def check_space_pre_cp(src, dst):
"""Check whether disk space is enough before 'cp' like
operations, else exception will be raised.
"""
srcsize = get_file_size(src) * 1024 * 1024
freesize = get_filesystem_avail(dst)
if srcsize > freesize:
raise CreatorError("space on %s(%s) is not enough for about %s files"
% (dst, human_size(freesize), human_size(srcsize)))
def calc_hashes(file_path, hash_names, start = 0, end = None):
""" Calculate hashes for a file. The 'file_path' argument is the file
to calculate hash functions for, 'start' and 'end' are the starting and
ending file offset to calculate the has functions for. The 'hash_names'
argument is a list of hash names to calculate. Returns the the list
of calculated hash values in the hexadecimal form in the same order
as 'hash_names'.
"""
if end == None:
end = os.path.getsize(file_path)
chunk_size = 65536
to_read = end - start
read = 0
hashes = []
for hash_name in hash_names:
hashes.append(hashlib.new(hash_name))
with open(file_path, "rb") as f:
f.seek(start)
while read < to_read:
if read + chunk_size > to_read:
chunk_size = to_read - read
chunk = f.read(chunk_size)
for hash_obj in hashes:
hash_obj.update(chunk)
read += chunk_size
result = []
for hash_obj in hashes:
result.append(hash_obj.hexdigest())
return result
def get_md5sum(fpath):
return calc_hashes(fpath, ('md5', ))[0]
def get_sha1sum(fpath):
return calc_hashes(fpath, ('sha1', ))[0]
def get_sha256sum(fpath):
return calc_hashes(fpath, ('sha256', ))[0]
def normalize_ksfile(ksconf, release, arch):
'''
Return the name of a normalized ks file in which macro variables
@BUILD_ID@ and @ARCH@ are replace with real values.
The original ks file is returned if no special macro is used, otherwise
a temp file is created and returned, which will be deleted when program
exits normally.
'''
if not release:
release = "latest"
if not arch or re.match(r'i.86', arch):
arch = "ia32"
with open(ksconf) as f:
ksc = f.read()
if "@ARCH@" not in ksc and "@BUILD_ID@" not in ksc:
return ksconf
msger.info("Substitute macro variable @BUILD_ID@/@ARCH@ in ks: %s" % ksconf)
ksc = ksc.replace("@ARCH@", arch)
ksc = ksc.replace("@BUILD_ID@", release)
fd, ksconf = tempfile.mkstemp(prefix=os.path.basename(ksconf))
os.write(fd, ksc)
os.close(fd)
msger.debug('normalized ks file:%s' % ksconf)
def remove_temp_ks():
try:
os.unlink(ksconf)
except OSError, err:
msger.warning('Failed to remove temp ks file:%s:%s' % (ksconf, err))
import atexit
atexit.register(remove_temp_ks)
return ksconf
def _check_mic_chroot(rootdir):
def _path(path):
return rootdir.rstrip('/') + path
release_files = map(_path, [ "/etc/moblin-release",
"/etc/meego-release",
"/etc/tizen-release"])
if not any(map(os.path.exists, release_files)):
msger.warning("Dir %s is not a MeeGo/Tizen chroot env" % rootdir)
if not glob.glob(rootdir + "/boot/vmlinuz-*"):
msger.warning("Failed to find kernel module under %s" % rootdir)
return
def selinux_check(arch, fstypes):
try:
getenforce = find_binary_path('getenforce')
except CreatorError:
return
selinux_status = runner.outs([getenforce])
if arch and arch.startswith("arm") and selinux_status == "Enforcing":
raise CreatorError("Can't create arm image if selinux is enabled, "
"please run 'setenforce 0' to disable selinux")
use_btrfs = filter(lambda typ: typ == 'btrfs', fstypes)
if use_btrfs and selinux_status == "Enforcing":
raise CreatorError("Can't create btrfs image if selinux is enabled,"
" please run 'setenforce 0' to disable selinux")
def get_image_type(path):
def _get_extension_name(path):
match = re.search("(?<=\.)\w+$", path)
if match:
return match.group(0)
else:
return None
if os.path.isdir(path):
_check_mic_chroot(path)
return "fs"
maptab = {
"tar": "loop",
"raw":"raw",
"vmdk":"vmdk",
"vdi":"vdi",
"iso":"livecd",
"usbimg":"liveusb",
}
extension = _get_extension_name(path)
if extension in maptab:
return maptab[extension]
fd = open(path, "rb")
file_header = fd.read(1024)
fd.close()
vdi_flag = "<<< Sun VirtualBox Disk Image >>>"
if file_header[0:len(vdi_flag)] == vdi_flag:
return maptab["vdi"]
output = runner.outs(['file', path])
isoptn = re.compile(r".*ISO 9660 CD-ROM filesystem.*(bootable).*")
usbimgptn = re.compile(r".*x86 boot sector.*active.*")
rawptn = re.compile(r".*x86 boot sector.*")
vmdkptn = re.compile(r".*VMware. disk image.*")
ext3fsimgptn = re.compile(r".*Linux.*ext3 filesystem data.*")
ext4fsimgptn = re.compile(r".*Linux.*ext4 filesystem data.*")
btrfsimgptn = re.compile(r".*BTRFS.*")
if isoptn.match(output):
return maptab["iso"]
elif usbimgptn.match(output):
return maptab["usbimg"]
elif rawptn.match(output):
return maptab["raw"]
elif vmdkptn.match(output):
return maptab["vmdk"]
elif ext3fsimgptn.match(output):
return "ext3fsimg"
elif ext4fsimgptn.match(output):
return "ext4fsimg"
elif btrfsimgptn.match(output):
return "btrfsimg"
else:
raise CreatorError("Cannot detect the type of image: %s" % path)
def get_file_size(filename):
""" Return size in MB unit """
cmd = ['du', "-s", "-b", "-B", "1M", filename]
rc, duOutput = runner.runtool(cmd)
if rc != 0:
raise CreatorError("Failed to run: %s" % ' '.join(cmd))
size1 = int(duOutput.split()[0])
cmd = ['du', "-s", "-B", "1M", filename]
rc, duOutput = runner.runtool(cmd)
if rc != 0:
raise CreatorError("Failed to run: %s" % ' '.join(cmd))
size2 = int(duOutput.split()[0])
return max(size1, size2)
def get_filesystem_avail(fs):
vfstat = os.statvfs(fs)
return vfstat.f_bavail * vfstat.f_bsize
def convert_image(srcimg, srcfmt, dstimg, dstfmt):
#convert disk format
if dstfmt != "raw":
raise CreatorError("Invalid destination image format: %s" % dstfmt)
msger.debug("converting %s image to %s" % (srcimg, dstimg))
if srcfmt == "vmdk":
path = find_binary_path("qemu-img")
argv = [path, "convert", "-f", "vmdk", srcimg, "-O", dstfmt, dstimg]
elif srcfmt == "vdi":
path = find_binary_path("VBoxManage")
argv = [path, "internalcommands", "converttoraw", srcimg, dstimg]
else:
raise CreatorError("Invalid soure image format: %s" % srcfmt)
rc = runner.show(argv)
if rc == 0:
msger.debug("convert successful")
if rc != 0:
raise CreatorError("Unable to convert disk to %s" % dstfmt)
def uncompress_squashfs(squashfsimg, outdir):
"""Uncompress file system from squshfs image"""
unsquashfs = find_binary_path("unsquashfs")
args = [ unsquashfs, "-d", outdir, squashfsimg ]
rc = runner.show(args)
if (rc != 0):
raise SquashfsError("Failed to uncompress %s." % squashfsimg)
def mkdtemp(dir = "/var/tmp", prefix = "mic-tmp-"):
""" FIXME: use the dir in mic.conf instead """
makedirs(dir)
return tempfile.mkdtemp(dir = dir, prefix = prefix)
def get_repostrs_from_ks(ks):
def _get_temp_reponame(baseurl):
md5obj = hashlib.md5(baseurl)
tmpreponame = "%s" % md5obj.hexdigest()
return tmpreponame
kickstart_repos = []
for repodata in ks.handler.repo.repoList:
repo = {}
for attr in ('name',
'baseurl',
'mirrorlist',
'includepkgs', # val is list
'excludepkgs', # val is list
'cost', # int
'priority',# int
'save',
'proxy',
'proxyuser',
'proxypasswd',
'proxypasswd',
'debuginfo',
'source',
'gpgkey',
'ssl_verify'):
if hasattr(repodata, attr) and getattr(repodata, attr):
repo[attr] = getattr(repodata, attr)
if 'name' not in repo:
repo['name'] = _get_temp_reponame(repodata.baseurl)
if hasattr(repodata, 'baseurl') and getattr(repodata, 'baseurl'):
repo['baseurl'] = SafeURL(getattr(repodata, 'baseurl'),
getattr(repodata, 'user', None),
getattr(repodata, 'passwd', None))
kickstart_repos.append(repo)
return kickstart_repos
def _get_uncompressed_data_from_url(url, filename, proxies):
filename = myurlgrab(url.full, filename, proxies)
suffix = None
if filename.endswith(".gz"):
suffix = ".gz"
runner.quiet(['gunzip', "-f", filename])
elif filename.endswith(".bz2"):
suffix = ".bz2"
runner.quiet(['bunzip2', "-f", filename])
if suffix:
filename = filename.replace(suffix, "")
return filename
def _get_metadata_from_repo(baseurl, proxies, cachedir, reponame, filename,
sumtype=None, checksum=None):
url = baseurl.join(filename)
filename_tmp = str("%s/%s/%s" % (cachedir, reponame, os.path.basename(filename)))
if os.path.splitext(filename_tmp)[1] in (".gz", ".bz2"):
filename = os.path.splitext(filename_tmp)[0]
else:
filename = filename_tmp
if sumtype and checksum and os.path.exists(filename):
try:
sumcmd = find_binary_path("%ssum" % sumtype)
except:
file_checksum = None
else:
file_checksum = runner.outs([sumcmd, filename]).split()[0]
if file_checksum and file_checksum == checksum:
return filename
return _get_uncompressed_data_from_url(url,filename_tmp,proxies)
def get_metadata_from_repos(repos, cachedir):
my_repo_metadata = []
for repo in repos:
reponame = repo.name
baseurl = repo.baseurl
if hasattr(repo, 'proxy'):
proxy = repo.proxy
else:
proxy = get_proxy_for(baseurl)
proxies = None
if proxy:
proxies = {str(baseurl.split(":")[0]): str(proxy)}
makedirs(os.path.join(cachedir, reponame))
url = baseurl.join("repodata/repomd.xml")
filename = os.path.join(cachedir, reponame, 'repomd.xml')
repomd = myurlgrab(url.full, filename, proxies)
try:
root = xmlparse(repomd)
except SyntaxError:
raise CreatorError("repomd.xml syntax error.")
ns = root.getroot().tag
ns = ns[0:ns.rindex("}")+1]
filepaths = {}
checksums = {}
sumtypes = {}
for elm in root.getiterator("%sdata" % ns):
if elm.attrib["type"] == "patterns":
filepaths['patterns'] = elm.find("%slocation" % ns).attrib['href']
checksums['patterns'] = elm.find("%sopen-checksum" % ns).text
sumtypes['patterns'] = elm.find("%sopen-checksum" % ns).attrib['type']
break
for elm in root.getiterator("%sdata" % ns):
if elm.attrib["type"] in ("group_gz", "group"):
filepaths['comps'] = elm.find("%slocation" % ns).attrib['href']
checksums['comps'] = elm.find("%sopen-checksum" % ns).text
sumtypes['comps'] = elm.find("%sopen-checksum" % ns).attrib['type']
break
primary_type = None
for elm in root.getiterator("%sdata" % ns):
if elm.attrib["type"] in ("primary_db", "primary"):
primary_type = elm.attrib["type"]
filepaths['primary'] = elm.find("%slocation" % ns).attrib['href']
checksums['primary'] = elm.find("%sopen-checksum" % ns).text
sumtypes['primary'] = elm.find("%sopen-checksum" % ns).attrib['type']
break
if not primary_type:
continue
for item in ("primary", "patterns", "comps"):
if item not in filepaths:
filepaths[item] = None
continue
if not filepaths[item]:
continue
filepaths[item] = _get_metadata_from_repo(baseurl,
proxies,
cachedir,
reponame,
filepaths[item],
sumtypes[item],
checksums[item])
""" Get repo key """
try:
repokey = _get_metadata_from_repo(baseurl,
proxies,
cachedir,
reponame,
"repodata/repomd.xml.key")
except CreatorError:
repokey = None
msger.debug("\ncan't get %s/%s" % (baseurl, "repodata/repomd.xml.key"))
my_repo_metadata.append({"name":reponame,
"baseurl":baseurl,
"repomd":repomd,
"primary":filepaths['primary'],
"cachedir":cachedir,
"proxies":proxies,
"patterns":filepaths['patterns'],
"comps":filepaths['comps'],
"repokey":repokey})
return my_repo_metadata
def get_rpmver_in_repo(repometadata):
for repo in repometadata:
if repo["primary"].endswith(".xml"):
root = xmlparse(repo["primary"])
ns = root.getroot().tag
ns = ns[0:ns.rindex("}")+1]
versionlist = []
for elm in root.getiterator("%spackage" % ns):
if elm.find("%sname" % ns).text == 'rpm':
for node in elm.getchildren():
if node.tag == "%sversion" % ns:
versionlist.append(node.attrib['ver'])
if versionlist:
return reversed(
sorted(
versionlist,
key = lambda ver: map(int, ver.split('.')))).next()
elif repo["primary"].endswith(".sqlite"):
con = sqlite.connect(repo["primary"])
for row in con.execute("select version from packages where "
"name=\"rpm\" ORDER by version DESC"):
con.close()
return row[0]
return None
def get_arch(repometadata):
archlist = []
for repo in repometadata:
if repo["primary"].endswith(".xml"):
root = xmlparse(repo["primary"])
ns = root.getroot().tag
ns = ns[0:ns.rindex("}")+1]
for elm in root.getiterator("%spackage" % ns):
if elm.find("%sarch" % ns).text not in ("noarch", "src"):
arch = elm.find("%sarch" % ns).text
if arch not in archlist:
archlist.append(arch)
elif repo["primary"].endswith(".sqlite"):
con = sqlite.connect(repo["primary"])
for row in con.execute("select arch from packages where arch not in (\"src\", \"noarch\")"):
if row[0] not in archlist:
archlist.append(row[0])
con.close()
uniq_arch = []
for i in range(len(archlist)):
if archlist[i] not in rpmmisc.archPolicies.keys():
continue
need_append = True
j = 0
while j < len(uniq_arch):
if archlist[i] in rpmmisc.archPolicies[uniq_arch[j]].split(':'):
need_append = False
break
if uniq_arch[j] in rpmmisc.archPolicies[archlist[i]].split(':'):
if need_append:
uniq_arch[j] = archlist[i]
need_append = False
else:
uniq_arch.remove(uniq_arch[j])
continue
j += 1
if need_append:
uniq_arch.append(archlist[i])
return uniq_arch, archlist
def get_package(pkg, repometadata, arch = None):
ver = ""
target_repo = None
if not arch:
arches = []
elif arch not in rpmmisc.archPolicies:
arches = [arch]
else:
arches = rpmmisc.archPolicies[arch].split(':')
arches.append('noarch')
for repo in repometadata:
if repo["primary"].endswith(".xml"):
root = xmlparse(repo["primary"])
ns = root.getroot().tag
ns = ns[0:ns.rindex("}")+1]
for elm in root.getiterator("%spackage" % ns):
if elm.find("%sname" % ns).text == pkg:
if elm.find("%sarch" % ns).text in arches:
version = elm.find("%sversion" % ns)
tmpver = "%s-%s" % (version.attrib['ver'], version.attrib['rel'])
if tmpver > ver:
ver = tmpver
location = elm.find("%slocation" % ns)
pkgpath = "%s" % location.attrib['href']
target_repo = repo
break
if repo["primary"].endswith(".sqlite"):
con = sqlite.connect(repo["primary"])
if arch:
sql = 'select version, release, location_href from packages ' \
'where name = "%s" and arch IN ("%s")' % \
(pkg, '","'.join(arches))
for row in con.execute(sql):
tmpver = "%s-%s" % (row[0], row[1])
if tmpver > ver:
ver = tmpver
pkgpath = "%s" % row[2]
target_repo = repo
break
else:
sql = 'select version, release, location_href from packages ' \
'where name = "%s"' % pkg
for row in con.execute(sql):
tmpver = "%s-%s" % (row[0], row[1])
if tmpver > ver:
ver = tmpver
pkgpath = "%s" % row[2]
target_repo = repo
break
con.close()
if target_repo:
makedirs("%s/packages/%s" % (target_repo["cachedir"], target_repo["name"]))
url = target_repo["baseurl"].join(pkgpath)
filename = str("%s/packages/%s/%s" % (target_repo["cachedir"], target_repo["name"], os.path.basename(pkgpath)))
if os.path.exists(filename):
ret = rpmmisc.checkRpmIntegrity('rpm', filename)
if ret == 0:
return filename
msger.warning("package %s is damaged: %s" %
(os.path.basename(filename), filename))
os.unlink(filename)
pkg = myurlgrab(url.full, filename, target_repo["proxies"])
return pkg
else:
return None
def get_source_name(pkg, repometadata):
def get_bin_name(pkg):
m = RPM_RE.match(pkg)
if m:
return m.group(1)
return None
def get_src_name(srpm):
m = SRPM_RE.match(srpm)
if m:
return m.group(1)
return None
ver = ""
target_repo = None
pkg_name = get_bin_name(pkg)
if not pkg_name:
return None
for repo in repometadata:
if repo["primary"].endswith(".xml"):
root = xmlparse(repo["primary"])
ns = root.getroot().tag
ns = ns[0:ns.rindex("}")+1]
for elm in root.getiterator("%spackage" % ns):
if elm.find("%sname" % ns).text == pkg_name:
if elm.find("%sarch" % ns).text != "src":
version = elm.find("%sversion" % ns)
tmpver = "%s-%s" % (version.attrib['ver'], version.attrib['rel'])
if tmpver > ver:
ver = tmpver
fmt = elm.find("%sformat" % ns)
if fmt:
fns = fmt.getchildren()[0].tag
fns = fns[0:fns.rindex("}")+1]
pkgpath = fmt.find("%ssourcerpm" % fns).text
target_repo = repo
break
if repo["primary"].endswith(".sqlite"):
con = sqlite.connect(repo["primary"])
for row in con.execute("select version, release, rpm_sourcerpm from packages where name = \"%s\" and arch != \"src\"" % pkg_name):
tmpver = "%s-%s" % (row[0], row[1])
if tmpver > ver:
pkgpath = "%s" % row[2]
target_repo = repo
break
con.close()
if target_repo:
return get_src_name(pkgpath)
else:
return None
def get_pkglist_in_patterns(group, patterns):
found = False
pkglist = []
try:
root = xmlparse(patterns)
except SyntaxError:
raise SyntaxError("%s syntax error." % patterns)
for elm in list(root.getroot()):
ns = elm.tag
ns = ns[0:ns.rindex("}")+1]
name = elm.find("%sname" % ns)
summary = elm.find("%ssummary" % ns)
if name.text == group or summary.text == group:
found = True
break
if not found:
return pkglist
found = False
for requires in list(elm):
if requires.tag.endswith("requires"):
found = True
break
if not found:
return pkglist
for pkg in list(requires):
pkgname = pkg.attrib["name"]
if pkgname not in pkglist:
pkglist.append(pkgname)
return pkglist
def get_pkglist_in_comps(group, comps):
found = False
pkglist = []
try:
root = xmlparse(comps)
except SyntaxError:
raise SyntaxError("%s syntax error." % comps)
for elm in root.getiterator("group"):
id = elm.find("id")
name = elm.find("name")
if id.text == group or name.text == group:
packagelist = elm.find("packagelist")
found = True
break
if not found:
return pkglist
for require in elm.getiterator("packagereq"):
if require.tag.endswith("packagereq"):
pkgname = require.text
if pkgname not in pkglist:
pkglist.append(pkgname)
return pkglist
def is_statically_linked(binary):
return ", statically linked, " in runner.outs(['file', binary])
def setup_qemu_emulator(rootdir, arch):
# mount binfmt_misc if it doesn't exist
if not os.path.exists("/proc/sys/fs/binfmt_misc"):
modprobecmd = find_binary_path("modprobe")
runner.show([modprobecmd, "binfmt_misc"])
if not os.path.exists("/proc/sys/fs/binfmt_misc/register"):
mountcmd = find_binary_path("mount")
runner.show([mountcmd, "-t", "binfmt_misc", "none", "/proc/sys/fs/binfmt_misc"])
# qemu_emulator is a special case, we can't use find_binary_path
# qemu emulator should be a statically-linked executable file
if arch == "aarch64":
node = "/proc/sys/fs/binfmt_misc/aarch64"
if os.path.exists("/usr/bin/qemu-arm64") and is_statically_linked("/usr/bin/qemu-arm64"):
arm_binary = "qemu-arm64"
elif os.path.exists("/usr/bin/qemu-aarch64") and is_statically_linked("/usr/bin/qemu-aarch64"):
arm_binary = "qemu-aarch64"
elif os.path.exists("/usr/bin/qemu-arm64-static"):
arm_binary = "qemu-arm64-static"
elif os.path.exists("/usr/bin/qemu-aarch64-static"):
arm_binary = "qemu-aarch64-static"
else:
raise CreatorError("Please install a statically-linked %s" % arm_binary)
elif arch == "mipsel":
node = "/proc/sys/fs/binfmt_misc/mipsel"
arm_binary = "qemu-mipsel"
if not os.path.exists("/usr/bin/%s" % arm_binary) or not is_statically_linked("/usr/bin/%s"):
arm_binary = "qemu-mipsel-static"
if not os.path.exists("/usr/bin/%s" % arm_binary):
raise CreatorError("Please install a statically-linked %s" % arm_binary)
else:
node = "/proc/sys/fs/binfmt_misc/arm"
arm_binary = "qemu-arm"
if not os.path.exists("/usr/bin/qemu-arm") or not is_statically_linked("/usr/bin/qemu-arm"):
arm_binary = "qemu-arm-static"
if not os.path.exists("/usr/bin/%s" % arm_binary):
raise CreatorError("Please install a statically-linked %s" % arm_binary)
qemu_emulator = "/usr/bin/%s" % arm_binary
if not os.path.exists(rootdir + "/usr/bin"):
makedirs(rootdir + "/usr/bin")
shutil.copy(qemu_emulator, rootdir + qemu_emulator)
# disable selinux, selinux will block qemu emulator to run
if os.path.exists("/usr/sbin/setenforce"):
msger.info('Try to disable selinux')
runner.show(["/usr/sbin/setenforce", "0"])
# unregister it if it has been registered and is a dynamically-linked executable
if os.path.exists(node):
qemu_unregister_string = "-1\n"
with open(node, "w") as fd:
fd.write(qemu_unregister_string)
# register qemu emulator for interpreting other arch executable file
if not os.path.exists(node):
if arch == "aarch64":
qemu_arm_string = ":aarch64:M::\\x7fELF\\x02\\x01\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\xb7:\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xfe\\xff\\xff:%s:\n" % qemu_emulator
elif arch == "mipsel":
qemu_arm_string = ":mipsel:M::\\x7fELF\\x01\\x01\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x08\\x00:\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x00\\xfe\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xfe\\xff\\xff\\xff:%s:\n" % qemu_emulator
else:
qemu_arm_string = ":arm:M::\\x7fELF\\x01\\x01\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x02\\x00\\x28\\x00:\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\x00\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xff\\xfa\\xff\\xff\\xff:%s:\n" % qemu_emulator
with open("/proc/sys/fs/binfmt_misc/register", "w") as fd:
fd.write(qemu_arm_string)
return qemu_emulator
def SrcpkgsDownload(pkgs, repometadata, instroot, cachedir):
def get_source_repometadata(repometadata):
src_repometadata=[]
for repo in repometadata:
if repo["name"].endswith("-source"):
src_repometadata.append(repo)
if src_repometadata:
return src_repometadata
return None
def get_src_name(srpm):
m = SRPM_RE.match(srpm)
if m:
return m.group(1)
return None
src_repometadata = get_source_repometadata(repometadata)
if not src_repometadata:
msger.warning("No source repo found")
return None
src_pkgs = []
lpkgs_dict = {}
lpkgs_path = []
for repo in src_repometadata:
cachepath = "%s/%s/packages/*.src.rpm" %(cachedir, repo["name"])
lpkgs_path += glob.glob(cachepath)
for lpkg in lpkgs_path:
lpkg_name = get_src_name(os.path.basename(lpkg))
lpkgs_dict[lpkg_name] = lpkg
localpkgs = lpkgs_dict.keys()
cached_count = 0
destdir = instroot+'/usr/src/SRPMS'
if not os.path.exists(destdir):
os.makedirs(destdir)
srcpkgset = set()
for _pkg in pkgs:
srcpkg_name = get_source_name(_pkg, repometadata)
if not srcpkg_name:
continue
srcpkgset.add(srcpkg_name)
for pkg in list(srcpkgset):
if pkg in localpkgs:
cached_count += 1
shutil.copy(lpkgs_dict[pkg], destdir)
src_pkgs.append(os.path.basename(lpkgs_dict[pkg]))
else:
src_pkg = get_package(pkg, src_repometadata, 'src')
if src_pkg:
shutil.copy(src_pkg, destdir)
src_pkgs.append(src_pkg)
msger.info("%d source packages gotten from cache" % cached_count)
return src_pkgs
def strip_end(text, suffix):
if not text.endswith(suffix):
return text
return text[:-len(suffix)]
| gpl-2.0 |
Endika/django | tests/multiple_database/models.py | 282 | 2472 | from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Review(models.Model):
source = models.CharField(max_length=100)
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
def __str__(self):
return self.source
class Meta:
ordering = ('source',)
class PersonManager(models.Manager):
def get_by_natural_key(self, name):
return self.get(name=name)
@python_2_unicode_compatible
class Person(models.Model):
objects = PersonManager()
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
# This book manager doesn't do anything interesting; it just
# exists to strip out the 'extra_arg' argument to certain
# calls. This argument is used to establish that the BookManager
# is actually getting used when it should be.
class BookManager(models.Manager):
def create(self, *args, **kwargs):
kwargs.pop('extra_arg', None)
return super(BookManager, self).create(*args, **kwargs)
def get_or_create(self, *args, **kwargs):
kwargs.pop('extra_arg', None)
return super(BookManager, self).get_or_create(*args, **kwargs)
@python_2_unicode_compatible
class Book(models.Model):
objects = BookManager()
title = models.CharField(max_length=100)
published = models.DateField()
authors = models.ManyToManyField(Person)
editor = models.ForeignKey(Person, models.SET_NULL, null=True, related_name='edited')
reviews = GenericRelation(Review)
pages = models.IntegerField(default=100)
def __str__(self):
return self.title
class Meta:
ordering = ('title',)
@python_2_unicode_compatible
class Pet(models.Model):
name = models.CharField(max_length=100)
owner = models.ForeignKey(Person, models.CASCADE)
def __str__(self):
return self.name
class Meta:
ordering = ('name',)
class UserProfile(models.Model):
user = models.OneToOneField(User, models.SET_NULL, null=True)
flavor = models.CharField(max_length=100)
class Meta:
ordering = ('flavor',)
| bsd-3-clause |
janewangfb/spark | examples/src/main/python/transitive_closure.py | 128 | 2408 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
from random import Random
from pyspark.sql import SparkSession
numEdges = 200
numVertices = 100
rand = Random(42)
def generateGraph():
edges = set()
while len(edges) < numEdges:
src = rand.randrange(0, numVertices)
dst = rand.randrange(0, numVertices)
if src != dst:
edges.add((src, dst))
return edges
if __name__ == "__main__":
"""
Usage: transitive_closure [partitions]
"""
spark = SparkSession\
.builder\
.appName("PythonTransitiveClosure")\
.getOrCreate()
partitions = int(sys.argv[1]) if len(sys.argv) > 1 else 2
tc = spark.sparkContext.parallelize(generateGraph(), partitions).cache()
# Linear transitive closure: each round grows paths by one edge,
# by joining the graph's edges with the already-discovered paths.
# e.g. join the path (y, z) from the TC with the edge (x, y) from
# the graph to obtain the path (x, z).
# Because join() joins on keys, the edges are stored in reversed order.
edges = tc.map(lambda x_y: (x_y[1], x_y[0]))
oldCount = 0
nextCount = tc.count()
while True:
oldCount = nextCount
# Perform the join, obtaining an RDD of (y, (z, x)) pairs,
# then project the result to obtain the new (x, z) paths.
new_edges = tc.join(edges).map(lambda __a_b: (__a_b[1][1], __a_b[1][0]))
tc = tc.union(new_edges).distinct().cache()
nextCount = tc.count()
if nextCount == oldCount:
break
print("TC has %i edges" % tc.count())
spark.stop()
| apache-2.0 |
nilsgrabbert/spark | python/pyspark/mllib/__init__.py | 123 | 1412 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
RDD-based machine learning APIs for Python (in maintenance mode).
The `pyspark.mllib` package is in maintenance mode as of the Spark 2.0.0 release to encourage
migration to the DataFrame-based APIs under the `pyspark.ml` package.
"""
from __future__ import absolute_import
# MLlib currently needs NumPy 1.4+, so complain if lower
import numpy
ver = [int(x) for x in numpy.version.version.split('.')[:2]]
if ver < [1, 4]:
raise Exception("MLlib requires NumPy 1.4+")
__all__ = ['classification', 'clustering', 'feature', 'fpm', 'linalg', 'random',
'recommendation', 'regression', 'stat', 'tree', 'util']
| apache-2.0 |
YanTangZhai/tf | tensorflow/python/training/training_ops.py | 1 | 5435 | # Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrappers for training ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.training import gen_training_ops
# pylint: disable=wildcard-import
from tensorflow.python.training.gen_training_ops import *
# pylint: enable=wildcard-import
# Shape functions for fused training ops
# --------------------------------------
#
# The fused training ops all have the same basic structure: they take
# one or more variables with the same shape, and emit a reference to
# the original variable (which has the same shape as the first
# input). In addition, they take one or more scalar tensors containing
# hyperparameters.
#
# The sparse ops take the gradients as a Python IndexedSlices, which
# means that the indices are a vector of length N, and the gradient
# values are a tensor whose size is the same as the original variable,
# except for the 0th dimension, which has size N.
def _AssertInputIsScalar(op, index):
"""Raises ValueError if `op.inputs[index]` is not scalar."""
op.inputs[index].get_shape().assert_is_compatible_with(tensor_shape.scalar())
@ops.RegisterShape("ApplyAdagrad")
def _ApplyAdagradShape(op):
"""Shape function for the ApplyAdagrad op."""
var_shape = op.inputs[0].get_shape()
accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
_AssertInputIsScalar(op, 2) # lr
grad_shape = op.inputs[3].get_shape().merge_with(accum_shape)
return [grad_shape]
@ops.RegisterShape("ApplyAdam")
def _ApplyAdamShape(op):
"""Shape function for the ApplyAdam op."""
var_shape = op.inputs[0].get_shape()
m_shape = op.inputs[1].get_shape().merge_with(var_shape)
v_shape = op.inputs[2].get_shape().merge_with(m_shape)
_AssertInputIsScalar(op, 3) # beta1_power
_AssertInputIsScalar(op, 4) # beta2_power
_AssertInputIsScalar(op, 5) # lr
_AssertInputIsScalar(op, 6) # beta1
_AssertInputIsScalar(op, 7) # beta2
_AssertInputIsScalar(op, 8) # epsilon
grad_shape = op.inputs[9].get_shape().merge_with(v_shape)
return [grad_shape]
@ops.RegisterShape("ApplyMomentum")
def _ApplyMomentumShape(op):
"""Shape function for the ApplyMomentum op."""
var_shape = op.inputs[0].get_shape()
accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
_AssertInputIsScalar(op, 2) # lr
grad_shape = op.inputs[3].get_shape().merge_with(accum_shape)
_AssertInputIsScalar(op, 4) # momentum
return [grad_shape]
@ops.RegisterShape("ApplyRMSProp")
def _ApplyRMSPropShape(op):
"""Shape function for the ApplyRMSProp op."""
var_shape = op.inputs[0].get_shape()
ms_shape = op.inputs[1].get_shape().merge_with(var_shape)
mom_shape = op.inputs[2].get_shape().merge_with(ms_shape)
_AssertInputIsScalar(op, 3) # lr
_AssertInputIsScalar(op, 4) # rho
_AssertInputIsScalar(op, 5) # momentum
_AssertInputIsScalar(op, 6) # epsilon
grad_shape = op.inputs[7].get_shape().merge_with(mom_shape)
return [grad_shape]
@ops.RegisterShape("ApplyGradientDescent")
def _ApplyGradientDescentShape(op):
"""Shape function for the ApplyGradientDescent op."""
var_shape = op.inputs[0].get_shape()
_AssertInputIsScalar(op, 1) # alpha
delta_shape = op.inputs[2].get_shape().merge_with(var_shape)
return [delta_shape]
@ops.RegisterShape("ApplyDistGradientDescent")
def _ApplyDistGradientDescentShape(op):
"""Shape function for the ApplyDistGradientDescent op."""
var_shape = op.inputs[0].get_shape()
_AssertInputIsScalar(op, 1) # alpha
delta_shape = op.inputs[2].get_shape().merge_with(var_shape)
return [delta_shape]
@ops.RegisterShape("SparseApplyAdagrad")
def _SparseApplyAdagradShape(op):
"""Shape function for the SparseApplyAdagrad op."""
var_shape = op.inputs[0].get_shape()
accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
_AssertInputIsScalar(op, 2) # lr
grad_shape = op.inputs[3].get_shape().merge_with(
tensor_shape.TensorShape([None]).concatenate(accum_shape[1:]))
unused_indices_shape = op.inputs[4].get_shape().merge_with(
tensor_shape.vector(grad_shape[0]))
return [accum_shape]
@ops.RegisterShape("SparseApplyMomentum")
def _SparseApplyMomentumShape(op):
"""Shape function for the SparseApplyMomentum op."""
var_shape = op.inputs[0].get_shape()
accum_shape = op.inputs[1].get_shape().merge_with(var_shape)
_AssertInputIsScalar(op, 2) # lr
grad_shape = op.inputs[3].get_shape().merge_with(
tensor_shape.TensorShape([None]).concatenate(accum_shape[1:]))
unused_indices_shape = op.inputs[4].get_shape().merge_with(
tensor_shape.vector(grad_shape[0]))
_AssertInputIsScalar(op, 5) # momentum
return [accum_shape]
| apache-2.0 |
ashang/calibre | src/calibre/gui2/actions/preferences.py | 14 | 2810 | #!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2010, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
from functools import partial
from PyQt5.Qt import QIcon, Qt
from calibre.gui2.actions import InterfaceAction
from calibre.gui2.preferences.main import Preferences
from calibre.gui2 import error_dialog, show_restart_warning
from calibre.constants import DEBUG, isosx
class PreferencesAction(InterfaceAction):
name = 'Preferences'
action_spec = (_('Preferences'), 'config.png', _('Configure calibre'), 'Ctrl+P')
action_add_menu = True
action_menu_clone_qaction = _('Change calibre behavior')
def genesis(self):
pm = self.qaction.menu()
cm = partial(self.create_menu_action, pm)
if isosx:
pm.addAction(QIcon(I('config.png')), _('Preferences'), self.do_config)
cm('welcome wizard', _('Run welcome wizard'),
icon='wizard.png', triggered=self.gui.run_wizard)
cm('plugin updater', _('Get plugins to enhance calibre'),
icon='plugins/plugin_updater.png', triggered=self.get_plugins)
if not DEBUG:
pm.addSeparator()
cm('restart', _('Restart in debug mode'), icon='debug.png',
triggered=self.debug_restart, shortcut='Ctrl+Shift+R')
self.preferences_menu = pm
for x in (self.gui.preferences_action, self.qaction):
x.triggered.connect(self.do_config)
def get_plugins(self):
from calibre.gui2.dialogs.plugin_updater import (PluginUpdaterDialog,
FILTER_NOT_INSTALLED)
d = PluginUpdaterDialog(self.gui,
initial_filter=FILTER_NOT_INSTALLED)
d.exec_()
if d.do_restart:
self.gui.quit(restart=True)
def do_config(self, checked=False, initial_plugin=None,
close_after_initial=False):
if self.gui.job_manager.has_jobs():
d = error_dialog(self.gui, _('Cannot configure'),
_('Cannot configure while there are running jobs.'))
d.exec_()
return
if self.gui.must_restart_before_config:
do_restart = show_restart_warning(_('Cannot configure before calibre is restarted.'))
if do_restart:
self.gui.quit(restart=True)
return
d = Preferences(self.gui, initial_plugin=initial_plugin,
close_after_initial=close_after_initial)
d.run_wizard_requested.connect(self.gui.run_wizard,
type=Qt.QueuedConnection)
d.exec_()
if d.do_restart:
self.gui.quit(restart=True)
def debug_restart(self, *args):
self.gui.quit(restart=True, debug_on_restart=True)
| gpl-3.0 |
mlq/fMBT | utils/setup.py | 5 | 4003 | #!/usr/bin/env python2
from distutils.core import setup, Extension
import os
import subprocess
import sys
def pkg_config(package):
if os.name == "nt":
if package == "MagickCore":
# pkg-config cannot be used, try to find needed libraries from the filesystem
import fnmatch
libraries = ["CORE_RL_magick_"]
library_dirs = []
include_dirs = []
missing_libs = set(["kernel32.lib"] + [l + ".lib" for l in libraries])
missing_headers = set(["magick/MagickCore.h"])
for rootdir, dirnames, filenames in os.walk(os.environ["ProgramFiles"]):
for library in sorted(missing_libs):
if fnmatch.filter(filenames, library) and not "x64" in rootdir:
library_dirs.append(rootdir)
missing_libs.remove(library)
if not missing_libs:
break
for header in missing_headers:
if fnmatch.filter(filenames, os.path.basename(header)):
if os.path.dirname(header) == "":
include_dirs.append(rootdir)
elif os.path.dirname(header) == os.path.basename(rootdir):
include_dirs.append(os.path.dirname(rootdir))
missing_headers.remove(header)
if not missing_headers:
break
if not missing_libs and not missing_headers:
break
ext_args = {
"libraries": libraries,
"library_dirs": sorted(set(library_dirs)),
"include_dirs": include_dirs,
}
else:
sys.stderr.write('Unknown build parameters for package "%s"\n' % (package,))
sys.exit(1)
else:
_pkg_config = os.getenv("PKG_CONFIG","pkg-config")
o = subprocess.check_output([_pkg_config, "--libs", "--cflags", package])
ext_args = {"libraries": [], "library_dirs": [], "include_dirs": [], "extra_compile_args": []}
for arg in o.split():
if arg.startswith("-L"):
ext_args["library_dirs"].append(arg[2:])
elif arg.startswith("-l"):
ext_args["libraries"].append(arg[2:])
elif arg.startswith("-I"):
ext_args["include_dirs"].append(arg[2:])
elif arg.startswith("-f") or arg.startswith("-D"):
ext_args["extra_compile_args"].append(arg)
else:
raise ValueError('Unexpected pkg-config output: "%s" in "%s"'
% (arg, o))
return ext_args
fmbt_utils_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)),os.getenv("VPATH",""))
fmbt_dir = os.path.join(fmbt_utils_dir, "..")
version = (file(os.path.join(fmbt_dir, "configure.ac"), "r")
.readline()
.split(",")[1]
.replace(' ','')
.replace('[','')
.replace(']',''))
lines = [line.replace("\\","").strip()
for line in file(os.path.join(fmbt_utils_dir, "Makefile.am"))
if not "=" in line]
modules = [module.replace(".py","")
for module in lines[lines.index("# modules")+1:
lines.index("# end of modules")]]
scripts = lines[lines.index("# scripts")+1:
lines.index("# end of scripts")]
eye4graphcs_buildflags = pkg_config("MagickCore")
ext_eye4graphics = Extension('eye4graphics',
sources = ['eye4graphics.cc'],
**eye4graphcs_buildflags)
setup(name = 'fmbt-python',
version = version,
description = 'fMBT Python tools and libraries',
author = 'Antti Kervinen',
author_email = '[email protected]',
py_modules = modules,
scripts = scripts,
ext_modules = [ext_eye4graphics]
)
| lgpl-2.1 |
switchboardOp/ansible | lib/ansible/modules/network/netscaler/netscaler_server.py | 14 | 12592 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Citrix Systems
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netscaler_server
short_description: Manage server configuration
description:
- Manage server entities configuration.
- This module is intended to run either on the ansible control node or a bastion (jumpserver) with access to the actual netscaler instance.
version_added: "2.4.0"
author: George Nikolopoulos (@giorgos-nikolopoulos)
options:
name:
description:
- "Name for the server."
- >-
Must begin with an ASCII alphabetic or underscore C(_) character, and must contain only ASCII
alphanumeric, underscore C(_), hash C(#), period C(.), space C( ), colon C(:), at C(@), equals C(=), and hyphen C(-)
characters.
- "Can be changed after the name is created."
- "Minimum length = 1"
ipaddress:
description:
- >-
IPv4 or IPv6 address of the server. If you create an IP address based server, you can specify the
name of the server, instead of its IP address, when creating a service. Note: If you do not create a
server entry, the server IP address that you enter when you create a service becomes the name of the
server.
domain:
description:
- "Domain name of the server. For a domain based configuration, you must create the server first."
- "Minimum length = 1"
translationip:
description:
- "IP address used to transform the server's DNS-resolved IP address."
translationmask:
description:
- "The netmask of the translation ip."
domainresolveretry:
description:
- >-
Time, in seconds, for which the NetScaler appliance must wait, after DNS resolution fails, before
sending the next DNS query to resolve the domain name.
- "Minimum value = C(5)"
- "Maximum value = C(20939)"
default: 5
ipv6address:
description:
- >-
Support IPv6 addressing mode. If you configure a server with the IPv6 addressing mode, you cannot use
the server in the IPv4 addressing mode.
default: false
type: bool
comment:
description:
- "Any information about the server."
td:
description:
- >-
Integer value that uniquely identifies the traffic domain in which you want to configure the entity.
If you do not specify an ID, the entity becomes part of the default traffic domain, which has an ID
of 0.
- "Minimum value = C(0)"
- "Maximum value = C(4094)"
disabled:
description:
- When set to C(true) the server state will be set to DISABLED.
- When set to C(false) the server state will be set to ENABLED.
- >-
Note that due to limitations of the underlying NITRO API a C(disabled) state change alone
does not cause the module result to report a changed status.
type: bool
default: false
extends_documentation_fragment: netscaler
requirements:
- nitro python sdk
'''
EXAMPLES = '''
- name: Setup server
delegate_to: localhost
netscaler_server:
nsip: 172.18.0.2
nitro_user: nsroot
nitro_pass: nsroot
state: present
name: server-1
ipaddress: 192.168.1.1
'''
RETURN = '''
loglines:
description: list of logged messages by the module
returned: always
type: list
sample: ['message 1', 'message 2']
msg:
description: Message detailing the failure reason
returned: failure
type: str
sample: "Action does not exist"
diff:
description: List of differences between the actual configured object and the configuration specified in the module
returned: failure
type: dict
sample: { 'targetlbvserver': 'difference. ours: (str) server1 other: (str) server2' }
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.netscaler import ConfigProxy, get_nitro_client, netscaler_common_arguments, log, loglines, get_immutables_intersection
try:
from nssrc.com.citrix.netscaler.nitro.resource.config.basic.server import server
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
PYTHON_SDK_IMPORTED = True
except ImportError as e:
PYTHON_SDK_IMPORTED = False
def server_exists(client, module):
log('Checking if server exists')
if server.count_filtered(client, 'name:%s' % module.params['name']) > 0:
return True
else:
return False
def server_identical(client, module, server_proxy):
log('Checking if configured server is identical')
if server.count_filtered(client, 'name:%s' % module.params['name']) == 0:
return False
server_list = server.get_filtered(client, 'name:%s' % module.params['name'])
if server_proxy.has_equal_attributes(server_list[0]):
return True
else:
return False
def diff_list(client, module, server_proxy):
ret_val = server_proxy.diff_object(server.get_filtered(client, 'name:%s' % module.params['name'])[0]),
return ret_val[0]
def do_state_change(client, module, server_proxy):
if module.params['disabled']:
log('Disabling server')
result = server.disable(client, server_proxy.actual)
else:
log('Enabling server')
result = server.enable(client, server_proxy.actual)
return result
def main():
module_specific_arguments = dict(
name=dict(type='str'),
ipaddress=dict(type='str'),
domain=dict(type='str'),
translationip=dict(type='str'),
translationmask=dict(type='str'),
domainresolveretry=dict(type='int'),
ipv6address=dict(
type='bool',
default=False
),
comment=dict(type='str'),
td=dict(type='float'),
)
hand_inserted_arguments = dict(
disabled=dict(
type='bool',
default=False,
),
)
argument_spec = dict()
argument_spec.update(netscaler_common_arguments)
argument_spec.update(module_specific_arguments)
argument_spec.update(hand_inserted_arguments)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
module_result = dict(
changed=False,
failed=False,
loglines=loglines,
)
# Fail the module if imports failed
if not PYTHON_SDK_IMPORTED:
module.fail_json(msg='Could not load nitro python sdk')
# Fallthrough to rest of execution
client = get_nitro_client(module)
try:
client.login()
except nitro_exception as e:
msg = "nitro exception during login. errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg)
except Exception as e:
if str(type(e)) == "<class 'requests.exceptions.ConnectionError'>":
module.fail_json(msg='Connection error %s' % str(e))
elif str(type(e)) == "<class 'requests.exceptions.SSLError'>":
module.fail_json(msg='SSL Error %s' % str(e))
else:
module.fail_json(msg='Unexpected error during login %s' % str(e))
# Instantiate Server Config object
readwrite_attrs = [
'name',
'ipaddress',
'domain',
'translationip',
'translationmask',
'domainresolveretry',
'ipv6address',
'comment',
'td',
]
readonly_attrs = [
'statechangetimesec',
'tickssincelaststatechange',
'autoscale',
'customserverid',
'monthreshold',
'maxclient',
'maxreq',
'maxbandwidth',
'usip',
'cka',
'tcpb',
'cmp',
'clttimeout',
'svrtimeout',
'cipheader',
'cip',
'cacheable',
'sc',
'sp',
'downstateflush',
'appflowlog',
'boundtd',
'__count',
]
immutable_attrs = [
'name',
'domain',
'ipv6address',
'td',
]
transforms = {
'ipv6address': ['bool_yes_no'],
}
server_proxy = ConfigProxy(
actual=server(),
client=client,
attribute_values_dict=module.params,
readwrite_attrs=readwrite_attrs,
readonly_attrs=readonly_attrs,
immutable_attrs=immutable_attrs,
transforms=transforms,
)
try:
# Apply appropriate state
if module.params['state'] == 'present':
log('Applying actions for state present')
if not server_exists(client, module):
if not module.check_mode:
server_proxy.add()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
elif not server_identical(client, module, server_proxy):
# Check if we try to change value of immutable attributes
immutables_changed = get_immutables_intersection(server_proxy, diff_list(client, module, server_proxy).keys())
if immutables_changed != []:
msg = 'Cannot update immutable attributes %s' % (immutables_changed,)
module.fail_json(msg=msg, diff=diff_list(client, module, server_proxy), **module_result)
if not module.check_mode:
server_proxy.update()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
if not module.check_mode:
res = do_state_change(client, module, server_proxy)
if res.errorcode != 0:
msg = 'Error when setting disabled state. errorcode: %s message: %s' % (res.errorcode, res.message)
module.fail_json(msg=msg, **module_result)
# Sanity check for result
log('Sanity checks for state present')
if not module.check_mode:
if not server_exists(client, module):
module.fail_json(msg='Server does not seem to exist', **module_result)
if not server_identical(client, module, server_proxy):
module.fail_json(
msg='Server is not configured according to parameters given',
diff=diff_list(client, module, server_proxy),
**module_result
)
elif module.params['state'] == 'absent':
log('Applying actions for state absent')
if server_exists(client, module):
if not module.check_mode:
server_proxy.delete()
if module.params['save_config']:
client.save_config()
module_result['changed'] = True
else:
module_result['changed'] = False
# Sanity check for result
log('Sanity checks for state absent')
if not module.check_mode:
if server_exists(client, module):
module.fail_json(msg='Server seems to be present', **module_result)
except nitro_exception as e:
msg = "nitro exception errorcode=%s, message=%s" % (str(e.errorcode), e.message)
module.fail_json(msg=msg, **module_result)
client.logout()
module.exit_json(**module_result)
if __name__ == "__main__":
main()
| gpl-3.0 |
tinloaf/home-assistant | homeassistant/components/binary_sensor/__init__.py | 4 | 3179 | """
Component to interface with binary sensors.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/binary_sensor/
"""
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import Entity
from homeassistant.const import (STATE_ON, STATE_OFF)
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
DOMAIN = 'binary_sensor'
SCAN_INTERVAL = timedelta(seconds=30)
ENTITY_ID_FORMAT = DOMAIN + '.{}'
DEVICE_CLASSES = [
'battery', # On means low, Off means normal
'cold', # On means cold, Off means normal
'connectivity', # On means connected, Off means disconnected
'door', # On means open, Off means closed
'garage_door', # On means open, Off means closed
'gas', # On means gas detected, Off means no gas (clear)
'heat', # On means hot, Off means normal
'light', # On means light detected, Off means no light
'lock', # On means open (unlocked), Off means closed (locked)
'moisture', # On means wet, Off means dry
'motion', # On means motion detected, Off means no motion (clear)
'moving', # On means moving, Off means not moving (stopped)
'occupancy', # On means occupied, Off means not occupied (clear)
'opening', # On means open, Off means closed
'plug', # On means plugged in, Off means unplugged
'power', # On means power detected, Off means no power
'presence', # On means home, Off means away
'problem', # On means problem detected, Off means no problem (OK)
'safety', # On means unsafe, Off means safe
'smoke', # On means smoke detected, Off means no smoke (clear)
'sound', # On means sound detected, Off means no sound (clear)
'vibration', # On means vibration detected, Off means no vibration
'window', # On means open, Off means closed
]
DEVICE_CLASSES_SCHEMA = vol.All(vol.Lower, vol.In(DEVICE_CLASSES))
async def async_setup(hass, config):
"""Track states and offer events for binary sensors."""
component = hass.data[DOMAIN] = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL)
await component.async_setup(config)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class BinarySensorDevice(Entity):
"""Represent a binary sensor."""
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return None
@property
def state(self):
"""Return the state of the binary sensor."""
return STATE_ON if self.is_on else STATE_OFF
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return None
| apache-2.0 |
TempSquad/TEMP2016-site-client | node_modules/gulp-sass/node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/xml_fix.py | 2767 | 2174 | # Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Applies a fix to CR LF TAB handling in xml.dom.
Fixes this: http://code.google.com/p/chromium/issues/detail?id=76293
Working around this: http://bugs.python.org/issue5752
TODO(bradnelson): Consider dropping this when we drop XP support.
"""
import xml.dom.minidom
def _Replacement_write_data(writer, data, is_attrib=False):
"""Writes datachars to writer."""
data = data.replace("&", "&").replace("<", "<")
data = data.replace("\"", """).replace(">", ">")
if is_attrib:
data = data.replace(
"\r", "
").replace(
"\n", "
").replace(
"\t", "	")
writer.write(data)
def _Replacement_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent+"<" + self.tagName)
attrs = self._get_attributes()
a_names = attrs.keys()
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
_Replacement_write_data(writer, attrs[a_name].value, is_attrib=True)
writer.write("\"")
if self.childNodes:
writer.write(">%s" % newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write("%s</%s>%s" % (indent, self.tagName, newl))
else:
writer.write("/>%s" % newl)
class XmlFix(object):
"""Object to manage temporary patching of xml.dom.minidom."""
def __init__(self):
# Preserve current xml.dom.minidom functions.
self.write_data = xml.dom.minidom._write_data
self.writexml = xml.dom.minidom.Element.writexml
# Inject replacement versions of a function and a method.
xml.dom.minidom._write_data = _Replacement_write_data
xml.dom.minidom.Element.writexml = _Replacement_writexml
def Cleanup(self):
if self.write_data:
xml.dom.minidom._write_data = self.write_data
xml.dom.minidom.Element.writexml = self.writexml
self.write_data = None
def __del__(self):
self.Cleanup()
| apache-2.0 |
ujjvala-addsol/addsol_hr | openerp/addons/base_report_designer/__init__.py | 421 | 1136 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import wizard
import base_report_designer
import installer
import openerp_sxw2rml
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ParuninPavel/lenta4_hack | vkapp/contrib/sites/migrations/0003_set_site_domain_and_name.py | 1 | 1085 | """
To understand why this file is here, please read:
http://cookiecutter-django.readthedocs.io/en/latest/faq.html#why-is-there-a-django-contrib-sites-directory-in-cookiecutter-django
"""
from django.conf import settings
from django.db import migrations
def update_site_forward(apps, schema_editor):
"""Set site domain and name."""
Site = apps.get_model('sites', 'Site')
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
'domain': 'lentach.cleverbots.ru',
'name': 'lentach'
}
)
def update_site_backward(apps, schema_editor):
"""Revert site domain and name to default."""
Site = apps.get_model('sites', 'Site')
Site.objects.update_or_create(
id=settings.SITE_ID,
defaults={
'domain': 'example.com',
'name': 'example.com'
}
)
class Migration(migrations.Migration):
dependencies = [
('sites', '0002_alter_domain_unique'),
]
operations = [
migrations.RunPython(update_site_forward, update_site_backward),
]
| mit |
sunlianqiang/kbengine | kbe/res/scripts/common/Lib/test/test_zipfile.py | 60 | 70441 | import io
import os
import sys
import importlib.util
import time
import struct
import zipfile
import unittest
from tempfile import TemporaryFile
from random import randint, random, getrandbits
from test.support import (TESTFN, findfile, unlink, rmtree,
requires_zlib, requires_bz2, requires_lzma,
captured_stdout, check_warnings)
TESTFN2 = TESTFN + "2"
TESTFNDIR = TESTFN + "d"
FIXEDTEST_SIZE = 1000
DATAFILES_DIR = 'zipfile_datafiles'
SMALL_TEST_DATA = [('_ziptest1', '1q2w3e4r5t'),
('ziptest2dir/_ziptest2', 'qawsedrftg'),
('ziptest2dir/ziptest3dir/_ziptest3', 'azsxdcfvgb'),
('ziptest2dir/ziptest3dir/ziptest4dir/_ziptest3', '6y7u8i9o0p')]
def get_files(test):
yield TESTFN2
with TemporaryFile() as f:
yield f
test.assertFalse(f.closed)
with io.BytesIO() as f:
yield f
test.assertFalse(f.closed)
def openU(zipfp, fn):
with check_warnings(('', DeprecationWarning)):
return zipfp.open(fn, 'rU')
class AbstractTestsWithSourceFile:
@classmethod
def setUpClass(cls):
cls.line_gen = [bytes("Zipfile test line %d. random float: %f\n" %
(i, random()), "ascii")
for i in range(FIXEDTEST_SIZE)]
cls.data = b''.join(cls.line_gen)
def setUp(self):
# Make a source file with some lines
with open(TESTFN, "wb") as fp:
fp.write(self.data)
def make_test_archive(self, f, compression):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", compression) as zipfp:
zipfp.write(TESTFN, "another.name")
zipfp.write(TESTFN, TESTFN)
zipfp.writestr("strfile", self.data)
def zip_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
self.assertEqual(zipfp.read(TESTFN), self.data)
self.assertEqual(zipfp.read("another.name"), self.data)
self.assertEqual(zipfp.read("strfile"), self.data)
# Print the ZIP directory
fp = io.StringIO()
zipfp.printdir(file=fp)
directory = fp.getvalue()
lines = directory.splitlines()
self.assertEqual(len(lines), 4) # Number of files + header
self.assertIn('File Name', lines[0])
self.assertIn('Modified', lines[0])
self.assertIn('Size', lines[0])
fn, date, time_, size = lines[1].split()
self.assertEqual(fn, 'another.name')
self.assertTrue(time.strptime(date, '%Y-%m-%d'))
self.assertTrue(time.strptime(time_, '%H:%M:%S'))
self.assertEqual(size, str(len(self.data)))
# Check the namelist
names = zipfp.namelist()
self.assertEqual(len(names), 3)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
# Check infolist
infos = zipfp.infolist()
names = [i.filename for i in infos]
self.assertEqual(len(names), 3)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
for i in infos:
self.assertEqual(i.file_size, len(self.data))
# check getinfo
for nm in (TESTFN, "another.name", "strfile"):
info = zipfp.getinfo(nm)
self.assertEqual(info.filename, nm)
self.assertEqual(info.file_size, len(self.data))
# Check that testzip doesn't raise an exception
zipfp.testzip()
def test_basic(self):
for f in get_files(self):
self.zip_test(f, self.compression)
def zip_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(256)
if not read_data:
break
zipdata1.append(read_data)
zipdata2 = []
with zipfp.open("another.name") as zipopen2:
while True:
read_data = zipopen2.read(256)
if not read_data:
break
zipdata2.append(read_data)
self.assertEqual(b''.join(zipdata1), self.data)
self.assertEqual(b''.join(zipdata2), self.data)
def test_open(self):
for f in get_files(self):
self.zip_open_test(f, self.compression)
def zip_random_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(randint(1, 1024))
if not read_data:
break
zipdata1.append(read_data)
self.assertEqual(b''.join(zipdata1), self.data)
def test_random_open(self):
for f in get_files(self):
self.zip_random_open_test(f, self.compression)
def zip_read1_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp, \
zipfp.open(TESTFN) as zipopen:
zipdata = []
while True:
read_data = zipopen.read1(-1)
if not read_data:
break
zipdata.append(read_data)
self.assertEqual(b''.join(zipdata), self.data)
def test_read1(self):
for f in get_files(self):
self.zip_read1_test(f, self.compression)
def zip_read1_10_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp, \
zipfp.open(TESTFN) as zipopen:
zipdata = []
while True:
read_data = zipopen.read1(10)
self.assertLessEqual(len(read_data), 10)
if not read_data:
break
zipdata.append(read_data)
self.assertEqual(b''.join(zipdata), self.data)
def test_read1_10(self):
for f in get_files(self):
self.zip_read1_10_test(f, self.compression)
def zip_readline_read_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp, \
zipfp.open(TESTFN) as zipopen:
data = b''
while True:
read = zipopen.readline()
if not read:
break
data += read
read = zipopen.read(100)
if not read:
break
data += read
self.assertEqual(data, self.data)
def test_readline_read(self):
# Issue #7610: calls to readline() interleaved with calls to read().
for f in get_files(self):
self.zip_readline_read_test(f, self.compression)
def zip_readline_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
with zipfp.open(TESTFN) as zipopen:
for line in self.line_gen:
linedata = zipopen.readline()
self.assertEqual(linedata, line)
def test_readline(self):
for f in get_files(self):
self.zip_readline_test(f, self.compression)
def zip_readlines_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
with zipfp.open(TESTFN) as zipopen:
ziplines = zipopen.readlines()
for line, zipline in zip(self.line_gen, ziplines):
self.assertEqual(zipline, line)
def test_readlines(self):
for f in get_files(self):
self.zip_readlines_test(f, self.compression)
def zip_iterlines_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
with zipfp.open(TESTFN) as zipopen:
for line, zipline in zip(self.line_gen, zipopen):
self.assertEqual(zipline, line)
def test_iterlines(self):
for f in get_files(self):
self.zip_iterlines_test(f, self.compression)
def test_low_compression(self):
"""Check for cases where compressed data is larger than original."""
# Create the ZIP archive
with zipfile.ZipFile(TESTFN2, "w", self.compression) as zipfp:
zipfp.writestr("strfile", '12')
# Get an open object for strfile
with zipfile.ZipFile(TESTFN2, "r", self.compression) as zipfp:
with zipfp.open("strfile") as openobj:
self.assertEqual(openobj.read(1), b'1')
self.assertEqual(openobj.read(1), b'2')
def test_writestr_compression(self):
zipfp = zipfile.ZipFile(TESTFN2, "w")
zipfp.writestr("b.txt", "hello world", compress_type=self.compression)
info = zipfp.getinfo('b.txt')
self.assertEqual(info.compress_type, self.compression)
def test_read_return_size(self):
# Issue #9837: ZipExtFile.read() shouldn't return more bytes
# than requested.
for test_size in (1, 4095, 4096, 4097, 16384):
file_size = test_size + 1
junk = getrandbits(8 * file_size).to_bytes(file_size, 'little')
with zipfile.ZipFile(io.BytesIO(), "w", self.compression) as zipf:
zipf.writestr('foo', junk)
with zipf.open('foo', 'r') as fp:
buf = fp.read(test_size)
self.assertEqual(len(buf), test_size)
def test_truncated_zipfile(self):
fp = io.BytesIO()
with zipfile.ZipFile(fp, mode='w') as zipf:
zipf.writestr('strfile', self.data, compress_type=self.compression)
end_offset = fp.tell()
zipfiledata = fp.getvalue()
fp = io.BytesIO(zipfiledata)
with zipfile.ZipFile(fp) as zipf:
with zipf.open('strfile') as zipopen:
fp.truncate(end_offset - 20)
with self.assertRaises(EOFError):
zipopen.read()
fp = io.BytesIO(zipfiledata)
with zipfile.ZipFile(fp) as zipf:
with zipf.open('strfile') as zipopen:
fp.truncate(end_offset - 20)
with self.assertRaises(EOFError):
while zipopen.read(100):
pass
fp = io.BytesIO(zipfiledata)
with zipfile.ZipFile(fp) as zipf:
with zipf.open('strfile') as zipopen:
fp.truncate(end_offset - 20)
with self.assertRaises(EOFError):
while zipopen.read1(100):
pass
def tearDown(self):
unlink(TESTFN)
unlink(TESTFN2)
class StoredTestsWithSourceFile(AbstractTestsWithSourceFile,
unittest.TestCase):
compression = zipfile.ZIP_STORED
test_low_compression = None
def zip_test_writestr_permissions(self, f, compression):
# Make sure that writestr creates files with mode 0600,
# when it is passed a name rather than a ZipInfo instance.
self.make_test_archive(f, compression)
with zipfile.ZipFile(f, "r") as zipfp:
zinfo = zipfp.getinfo('strfile')
self.assertEqual(zinfo.external_attr, 0o600 << 16)
def test_writestr_permissions(self):
for f in get_files(self):
self.zip_test_writestr_permissions(f, zipfile.ZIP_STORED)
def test_absolute_arcnames(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, "/absolute")
with zipfile.ZipFile(TESTFN2, "r", zipfile.ZIP_STORED) as zipfp:
self.assertEqual(zipfp.namelist(), ["absolute"])
def test_append_to_zip_file(self):
"""Test appending to an existing zipfile."""
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with zipfile.ZipFile(TESTFN2, "a", zipfile.ZIP_STORED) as zipfp:
zipfp.writestr("strfile", self.data)
self.assertEqual(zipfp.namelist(), [TESTFN, "strfile"])
def test_append_to_non_zip_file(self):
"""Test appending to an existing file that is not a zipfile."""
# NOTE: this test fails if len(d) < 22 because of the first
# line "fpin.seek(-22, 2)" in _EndRecData
data = b'I am not a ZipFile!'*10
with open(TESTFN2, 'wb') as f:
f.write(data)
with zipfile.ZipFile(TESTFN2, "a", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with open(TESTFN2, 'rb') as f:
f.seek(len(data))
with zipfile.ZipFile(f, "r") as zipfp:
self.assertEqual(zipfp.namelist(), [TESTFN])
def test_ignores_newline_at_end(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with open(TESTFN2, 'a') as f:
f.write("\r\n\00\00\00")
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
self.assertIsInstance(zipfp, zipfile.ZipFile)
def test_ignores_stuff_appended_past_comments(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.comment = b"this is a comment"
zipfp.write(TESTFN, TESTFN)
with open(TESTFN2, 'a') as f:
f.write("abcdef\r\n")
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
self.assertIsInstance(zipfp, zipfile.ZipFile)
self.assertEqual(zipfp.comment, b"this is a comment")
def test_write_default_name(self):
"""Check that calling ZipFile.write without arcname specified
produces the expected result."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
zipfp.write(TESTFN)
with open(TESTFN, "rb") as f:
self.assertEqual(zipfp.read(TESTFN), f.read())
def test_write_to_readonly(self):
"""Check that trying to call write() on a readonly ZipFile object
raises a RuntimeError."""
with zipfile.ZipFile(TESTFN2, mode="w") as zipfp:
zipfp.writestr("somefile.txt", "bogus")
with zipfile.ZipFile(TESTFN2, mode="r") as zipfp:
self.assertRaises(RuntimeError, zipfp.write, TESTFN)
def test_add_file_before_1980(self):
# Set atime and mtime to 1970-01-01
os.utime(TESTFN, (0, 0))
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
self.assertRaises(ValueError, zipfp.write, TESTFN)
@requires_zlib
class DeflateTestsWithSourceFile(AbstractTestsWithSourceFile,
unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
def test_per_file_compression(self):
"""Check that files within a Zip archive can have different
compression options."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
zipfp.write(TESTFN, 'storeme', zipfile.ZIP_STORED)
zipfp.write(TESTFN, 'deflateme', zipfile.ZIP_DEFLATED)
sinfo = zipfp.getinfo('storeme')
dinfo = zipfp.getinfo('deflateme')
self.assertEqual(sinfo.compress_type, zipfile.ZIP_STORED)
self.assertEqual(dinfo.compress_type, zipfile.ZIP_DEFLATED)
@requires_bz2
class Bzip2TestsWithSourceFile(AbstractTestsWithSourceFile,
unittest.TestCase):
compression = zipfile.ZIP_BZIP2
@requires_lzma
class LzmaTestsWithSourceFile(AbstractTestsWithSourceFile,
unittest.TestCase):
compression = zipfile.ZIP_LZMA
class AbstractTestZip64InSmallFiles:
# These tests test the ZIP64 functionality without using large files,
# see test_zipfile64 for proper tests.
@classmethod
def setUpClass(cls):
line_gen = (bytes("Test of zipfile line %d." % i, "ascii")
for i in range(0, FIXEDTEST_SIZE))
cls.data = b'\n'.join(line_gen)
def setUp(self):
self._limit = zipfile.ZIP64_LIMIT
zipfile.ZIP64_LIMIT = 5
# Make a source file with some lines
with open(TESTFN, "wb") as fp:
fp.write(self.data)
def zip_test(self, f, compression):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", compression, allowZip64=True) as zipfp:
zipfp.write(TESTFN, "another.name")
zipfp.write(TESTFN, TESTFN)
zipfp.writestr("strfile", self.data)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
self.assertEqual(zipfp.read(TESTFN), self.data)
self.assertEqual(zipfp.read("another.name"), self.data)
self.assertEqual(zipfp.read("strfile"), self.data)
# Print the ZIP directory
fp = io.StringIO()
zipfp.printdir(fp)
directory = fp.getvalue()
lines = directory.splitlines()
self.assertEqual(len(lines), 4) # Number of files + header
self.assertIn('File Name', lines[0])
self.assertIn('Modified', lines[0])
self.assertIn('Size', lines[0])
fn, date, time_, size = lines[1].split()
self.assertEqual(fn, 'another.name')
self.assertTrue(time.strptime(date, '%Y-%m-%d'))
self.assertTrue(time.strptime(time_, '%H:%M:%S'))
self.assertEqual(size, str(len(self.data)))
# Check the namelist
names = zipfp.namelist()
self.assertEqual(len(names), 3)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
# Check infolist
infos = zipfp.infolist()
names = [i.filename for i in infos]
self.assertEqual(len(names), 3)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
for i in infos:
self.assertEqual(i.file_size, len(self.data))
# check getinfo
for nm in (TESTFN, "another.name", "strfile"):
info = zipfp.getinfo(nm)
self.assertEqual(info.filename, nm)
self.assertEqual(info.file_size, len(self.data))
# Check that testzip doesn't raise an exception
zipfp.testzip()
def test_basic(self):
for f in get_files(self):
self.zip_test(f, self.compression)
def tearDown(self):
zipfile.ZIP64_LIMIT = self._limit
unlink(TESTFN)
unlink(TESTFN2)
class StoredTestZip64InSmallFiles(AbstractTestZip64InSmallFiles,
unittest.TestCase):
compression = zipfile.ZIP_STORED
def large_file_exception_test(self, f, compression):
with zipfile.ZipFile(f, "w", compression, allowZip64=False) as zipfp:
self.assertRaises(zipfile.LargeZipFile,
zipfp.write, TESTFN, "another.name")
def large_file_exception_test2(self, f, compression):
with zipfile.ZipFile(f, "w", compression, allowZip64=False) as zipfp:
self.assertRaises(zipfile.LargeZipFile,
zipfp.writestr, "another.name", self.data)
def test_large_file_exception(self):
for f in get_files(self):
self.large_file_exception_test(f, zipfile.ZIP_STORED)
self.large_file_exception_test2(f, zipfile.ZIP_STORED)
def test_absolute_arcnames(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED,
allowZip64=True) as zipfp:
zipfp.write(TESTFN, "/absolute")
with zipfile.ZipFile(TESTFN2, "r", zipfile.ZIP_STORED) as zipfp:
self.assertEqual(zipfp.namelist(), ["absolute"])
@requires_zlib
class DeflateTestZip64InSmallFiles(AbstractTestZip64InSmallFiles,
unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
@requires_bz2
class Bzip2TestZip64InSmallFiles(AbstractTestZip64InSmallFiles,
unittest.TestCase):
compression = zipfile.ZIP_BZIP2
@requires_lzma
class LzmaTestZip64InSmallFiles(AbstractTestZip64InSmallFiles,
unittest.TestCase):
compression = zipfile.ZIP_LZMA
class PyZipFileTests(unittest.TestCase):
def assertCompiledIn(self, name, namelist):
if name + 'o' not in namelist:
self.assertIn(name + 'c', namelist)
def test_write_pyfile(self):
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
fn = __file__
if fn.endswith('.pyc') or fn.endswith('.pyo'):
path_split = fn.split(os.sep)
if os.altsep is not None:
path_split.extend(fn.split(os.altsep))
if '__pycache__' in path_split:
fn = importlib.util.source_from_cache(fn)
else:
fn = fn[:-1]
zipfp.writepy(fn)
bn = os.path.basename(fn)
self.assertNotIn(bn, zipfp.namelist())
self.assertCompiledIn(bn, zipfp.namelist())
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
fn = __file__
if fn.endswith(('.pyc', '.pyo')):
fn = fn[:-1]
zipfp.writepy(fn, "testpackage")
bn = "%s/%s" % ("testpackage", os.path.basename(fn))
self.assertNotIn(bn, zipfp.namelist())
self.assertCompiledIn(bn, zipfp.namelist())
def test_write_python_package(self):
import email
packagedir = os.path.dirname(email.__file__)
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
zipfp.writepy(packagedir)
# Check for a couple of modules at different levels of the
# hierarchy
names = zipfp.namelist()
self.assertCompiledIn('email/__init__.py', names)
self.assertCompiledIn('email/mime/text.py', names)
def test_write_filtered_python_package(self):
import test
packagedir = os.path.dirname(test.__file__)
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
# first make sure that the test folder gives error messages
# (on the badsyntax_... files)
with captured_stdout() as reportSIO:
zipfp.writepy(packagedir)
reportStr = reportSIO.getvalue()
self.assertTrue('SyntaxError' in reportStr)
# then check that the filter works on the whole package
with captured_stdout() as reportSIO:
zipfp.writepy(packagedir, filterfunc=lambda whatever: False)
reportStr = reportSIO.getvalue()
self.assertTrue('SyntaxError' not in reportStr)
# then check that the filter works on individual files
with captured_stdout() as reportSIO, self.assertWarns(UserWarning):
zipfp.writepy(packagedir, filterfunc=lambda fn:
'bad' not in fn)
reportStr = reportSIO.getvalue()
if reportStr:
print(reportStr)
self.assertTrue('SyntaxError' not in reportStr)
def test_write_with_optimization(self):
import email
packagedir = os.path.dirname(email.__file__)
# use .pyc if running test in optimization mode,
# use .pyo if running test in debug mode
optlevel = 1 if __debug__ else 0
ext = '.pyo' if optlevel == 1 else '.pyc'
with TemporaryFile() as t, \
zipfile.PyZipFile(t, "w", optimize=optlevel) as zipfp:
zipfp.writepy(packagedir)
names = zipfp.namelist()
self.assertIn('email/__init__' + ext, names)
self.assertIn('email/mime/text' + ext, names)
def test_write_python_directory(self):
os.mkdir(TESTFN2)
try:
with open(os.path.join(TESTFN2, "mod1.py"), "w") as fp:
fp.write("print(42)\n")
with open(os.path.join(TESTFN2, "mod2.py"), "w") as fp:
fp.write("print(42 * 42)\n")
with open(os.path.join(TESTFN2, "mod2.txt"), "w") as fp:
fp.write("bla bla bla\n")
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
zipfp.writepy(TESTFN2)
names = zipfp.namelist()
self.assertCompiledIn('mod1.py', names)
self.assertCompiledIn('mod2.py', names)
self.assertNotIn('mod2.txt', names)
finally:
rmtree(TESTFN2)
def test_write_python_directory_filtered(self):
os.mkdir(TESTFN2)
try:
with open(os.path.join(TESTFN2, "mod1.py"), "w") as fp:
fp.write("print(42)\n")
with open(os.path.join(TESTFN2, "mod2.py"), "w") as fp:
fp.write("print(42 * 42)\n")
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
zipfp.writepy(TESTFN2, filterfunc=lambda fn:
not fn.endswith('mod2.py'))
names = zipfp.namelist()
self.assertCompiledIn('mod1.py', names)
self.assertNotIn('mod2.py', names)
finally:
rmtree(TESTFN2)
def test_write_non_pyfile(self):
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
with open(TESTFN, 'w') as f:
f.write('most definitely not a python file')
self.assertRaises(RuntimeError, zipfp.writepy, TESTFN)
unlink(TESTFN)
def test_write_pyfile_bad_syntax(self):
os.mkdir(TESTFN2)
try:
with open(os.path.join(TESTFN2, "mod1.py"), "w") as fp:
fp.write("Bad syntax in python file\n")
with TemporaryFile() as t, zipfile.PyZipFile(t, "w") as zipfp:
# syntax errors are printed to stdout
with captured_stdout() as s:
zipfp.writepy(os.path.join(TESTFN2, "mod1.py"))
self.assertIn("SyntaxError", s.getvalue())
# as it will not have compiled the python file, it will
# include the .py file not .pyc or .pyo
names = zipfp.namelist()
self.assertIn('mod1.py', names)
self.assertNotIn('mod1.pyc', names)
self.assertNotIn('mod1.pyo', names)
finally:
rmtree(TESTFN2)
class ExtractTests(unittest.TestCase):
def test_extract(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
writtenfile = zipfp.extract(fpath)
# make sure it was written to the right place
correctfile = os.path.join(os.getcwd(), fpath)
correctfile = os.path.normpath(correctfile)
self.assertEqual(writtenfile, correctfile)
# make sure correct data is in correct file
with open(writtenfile, "rb") as f:
self.assertEqual(fdata.encode(), f.read())
unlink(writtenfile)
# remove the test file subdirectories
rmtree(os.path.join(os.getcwd(), 'ziptest2dir'))
def test_extract_all(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
zipfp.extractall()
for fpath, fdata in SMALL_TEST_DATA:
outfile = os.path.join(os.getcwd(), fpath)
with open(outfile, "rb") as f:
self.assertEqual(fdata.encode(), f.read())
unlink(outfile)
# remove the test file subdirectories
rmtree(os.path.join(os.getcwd(), 'ziptest2dir'))
def check_file(self, filename, content):
self.assertTrue(os.path.isfile(filename))
with open(filename, 'rb') as f:
self.assertEqual(f.read(), content)
def test_sanitize_windows_name(self):
san = zipfile.ZipFile._sanitize_windows_name
# Passing pathsep in allows this test to work regardless of platform.
self.assertEqual(san(r',,?,C:,foo,bar/z', ','), r'_,C_,foo,bar/z')
self.assertEqual(san(r'a\b,c<d>e|f"g?h*i', ','), r'a\b,c_d_e_f_g_h_i')
self.assertEqual(san('../../foo../../ba..r', '/'), r'foo/ba..r')
def test_extract_hackers_arcnames_common_cases(self):
common_hacknames = [
('../foo/bar', 'foo/bar'),
('foo/../bar', 'foo/bar'),
('foo/../../bar', 'foo/bar'),
('foo/bar/..', 'foo/bar'),
('./../foo/bar', 'foo/bar'),
('/foo/bar', 'foo/bar'),
('/foo/../bar', 'foo/bar'),
('/foo/../../bar', 'foo/bar'),
]
self._test_extract_hackers_arcnames(common_hacknames)
@unittest.skipIf(os.path.sep != '\\', 'Requires \\ as path separator.')
def test_extract_hackers_arcnames_windows_only(self):
"""Test combination of path fixing and windows name sanitization."""
windows_hacknames = [
(r'..\foo\bar', 'foo/bar'),
(r'..\/foo\/bar', 'foo/bar'),
(r'foo/\..\/bar', 'foo/bar'),
(r'foo\/../\bar', 'foo/bar'),
(r'C:foo/bar', 'foo/bar'),
(r'C:/foo/bar', 'foo/bar'),
(r'C://foo/bar', 'foo/bar'),
(r'C:\foo\bar', 'foo/bar'),
(r'//conky/mountpoint/foo/bar', 'foo/bar'),
(r'\\conky\mountpoint\foo\bar', 'foo/bar'),
(r'///conky/mountpoint/foo/bar', 'conky/mountpoint/foo/bar'),
(r'\\\conky\mountpoint\foo\bar', 'conky/mountpoint/foo/bar'),
(r'//conky//mountpoint/foo/bar', 'conky/mountpoint/foo/bar'),
(r'\\conky\\mountpoint\foo\bar', 'conky/mountpoint/foo/bar'),
(r'//?/C:/foo/bar', 'foo/bar'),
(r'\\?\C:\foo\bar', 'foo/bar'),
(r'C:/../C:/foo/bar', 'C_/foo/bar'),
(r'a:b\c<d>e|f"g?h*i', 'b/c_d_e_f_g_h_i'),
('../../foo../../ba..r', 'foo/ba..r'),
]
self._test_extract_hackers_arcnames(windows_hacknames)
@unittest.skipIf(os.path.sep != '/', r'Requires / as path separator.')
def test_extract_hackers_arcnames_posix_only(self):
posix_hacknames = [
('//foo/bar', 'foo/bar'),
('../../foo../../ba..r', 'foo../ba..r'),
(r'foo/..\bar', r'foo/..\bar'),
]
self._test_extract_hackers_arcnames(posix_hacknames)
def _test_extract_hackers_arcnames(self, hacknames):
for arcname, fixedname in hacknames:
content = b'foobar' + arcname.encode()
with zipfile.ZipFile(TESTFN2, 'w', zipfile.ZIP_STORED) as zipfp:
zinfo = zipfile.ZipInfo()
# preserve backslashes
zinfo.filename = arcname
zinfo.external_attr = 0o600 << 16
zipfp.writestr(zinfo, content)
arcname = arcname.replace(os.sep, "/")
targetpath = os.path.join('target', 'subdir', 'subsub')
correctfile = os.path.join(targetpath, *fixedname.split('/'))
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
writtenfile = zipfp.extract(arcname, targetpath)
self.assertEqual(writtenfile, correctfile,
msg='extract %r: %r != %r' %
(arcname, writtenfile, correctfile))
self.check_file(correctfile, content)
rmtree('target')
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
zipfp.extractall(targetpath)
self.check_file(correctfile, content)
rmtree('target')
correctfile = os.path.join(os.getcwd(), *fixedname.split('/'))
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
writtenfile = zipfp.extract(arcname)
self.assertEqual(writtenfile, correctfile,
msg="extract %r" % arcname)
self.check_file(correctfile, content)
rmtree(fixedname.split('/')[0])
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
zipfp.extractall()
self.check_file(correctfile, content)
rmtree(fixedname.split('/')[0])
unlink(TESTFN2)
class OtherTests(unittest.TestCase):
def test_open_via_zip_info(self):
# Create the ZIP archive
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.writestr("name", "foo")
with self.assertWarns(UserWarning):
zipfp.writestr("name", "bar")
self.assertEqual(zipfp.namelist(), ["name"] * 2)
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
infos = zipfp.infolist()
data = b""
for info in infos:
with zipfp.open(info) as zipopen:
data += zipopen.read()
self.assertIn(data, {b"foobar", b"barfoo"})
data = b""
for info in infos:
data += zipfp.read(info)
self.assertIn(data, {b"foobar", b"barfoo"})
def test_universal_deprecation(self):
f = io.BytesIO()
with zipfile.ZipFile(f, "w") as zipfp:
zipfp.writestr('spam.txt', b'ababagalamaga')
with zipfile.ZipFile(f, "r") as zipfp:
for mode in 'U', 'rU':
with self.assertWarns(DeprecationWarning):
zipopen = zipfp.open('spam.txt', mode)
zipopen.close()
def test_universal_readaheads(self):
f = io.BytesIO()
data = b'a\r\n' * 16 * 1024
with zipfile.ZipFile(f, 'w', zipfile.ZIP_STORED) as zipfp:
zipfp.writestr(TESTFN, data)
data2 = b''
with zipfile.ZipFile(f, 'r') as zipfp, \
openU(zipfp, TESTFN) as zipopen:
for line in zipopen:
data2 += line
self.assertEqual(data, data2.replace(b'\n', b'\r\n'))
def test_writestr_extended_local_header_issue1202(self):
with zipfile.ZipFile(TESTFN2, 'w') as orig_zip:
for data in 'abcdefghijklmnop':
zinfo = zipfile.ZipInfo(data)
zinfo.flag_bits |= 0x08 # Include an extended local header.
orig_zip.writestr(zinfo, data)
def test_close(self):
"""Check that the zipfile is closed after the 'with' block."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
self.assertIsNotNone(zipfp.fp, 'zipfp is not open')
self.assertIsNone(zipfp.fp, 'zipfp is not closed')
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
self.assertIsNotNone(zipfp.fp, 'zipfp is not open')
self.assertIsNone(zipfp.fp, 'zipfp is not closed')
def test_close_on_exception(self):
"""Check that the zipfile is closed if an exception is raised in the
'with' block."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
try:
with zipfile.ZipFile(TESTFN2, "r") as zipfp2:
raise zipfile.BadZipFile()
except zipfile.BadZipFile:
self.assertIsNone(zipfp2.fp, 'zipfp is not closed')
def test_unsupported_version(self):
# File has an extract_version of 120
data = (b'PK\x03\x04x\x00\x00\x00\x00\x00!p\xa1@\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00xPK\x01\x02x\x03x\x00\x00\x00\x00'
b'\x00!p\xa1@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00\x00xPK\x05\x06'
b'\x00\x00\x00\x00\x01\x00\x01\x00/\x00\x00\x00\x1f\x00\x00\x00\x00\x00')
self.assertRaises(NotImplementedError, zipfile.ZipFile,
io.BytesIO(data), 'r')
@requires_zlib
def test_read_unicode_filenames(self):
# bug #10801
fname = findfile('zip_cp437_header.zip')
with zipfile.ZipFile(fname) as zipfp:
for name in zipfp.namelist():
zipfp.open(name).close()
def test_write_unicode_filenames(self):
with zipfile.ZipFile(TESTFN, "w") as zf:
zf.writestr("foo.txt", "Test for unicode filename")
zf.writestr("\xf6.txt", "Test for unicode filename")
self.assertIsInstance(zf.infolist()[0].filename, str)
with zipfile.ZipFile(TESTFN, "r") as zf:
self.assertEqual(zf.filelist[0].filename, "foo.txt")
self.assertEqual(zf.filelist[1].filename, "\xf6.txt")
def test_create_non_existent_file_for_append(self):
if os.path.exists(TESTFN):
os.unlink(TESTFN)
filename = 'testfile.txt'
content = b'hello, world. this is some content.'
try:
with zipfile.ZipFile(TESTFN, 'a') as zf:
zf.writestr(filename, content)
except OSError:
self.fail('Could not append data to a non-existent zip file.')
self.assertTrue(os.path.exists(TESTFN))
with zipfile.ZipFile(TESTFN, 'r') as zf:
self.assertEqual(zf.read(filename), content)
def test_close_erroneous_file(self):
# This test checks that the ZipFile constructor closes the file object
# it opens if there's an error in the file. If it doesn't, the
# traceback holds a reference to the ZipFile object and, indirectly,
# the file object.
# On Windows, this causes the os.unlink() call to fail because the
# underlying file is still open. This is SF bug #412214.
#
with open(TESTFN, "w") as fp:
fp.write("this is not a legal zip file\n")
try:
zf = zipfile.ZipFile(TESTFN)
except zipfile.BadZipFile:
pass
def test_is_zip_erroneous_file(self):
"""Check that is_zipfile() correctly identifies non-zip files."""
# - passing a filename
with open(TESTFN, "w") as fp:
fp.write("this is not a legal zip file\n")
self.assertFalse(zipfile.is_zipfile(TESTFN))
# - passing a file object
with open(TESTFN, "rb") as fp:
self.assertFalse(zipfile.is_zipfile(fp))
# - passing a file-like object
fp = io.BytesIO()
fp.write(b"this is not a legal zip file\n")
self.assertFalse(zipfile.is_zipfile(fp))
fp.seek(0, 0)
self.assertFalse(zipfile.is_zipfile(fp))
def test_damaged_zipfile(self):
"""Check that zipfiles with missing bytes at the end raise BadZipFile."""
# - Create a valid zip file
fp = io.BytesIO()
with zipfile.ZipFile(fp, mode="w") as zipf:
zipf.writestr("foo.txt", b"O, for a Muse of Fire!")
zipfiledata = fp.getvalue()
# - Now create copies of it missing the last N bytes and make sure
# a BadZipFile exception is raised when we try to open it
for N in range(len(zipfiledata)):
fp = io.BytesIO(zipfiledata[:N])
self.assertRaises(zipfile.BadZipFile, zipfile.ZipFile, fp)
def test_is_zip_valid_file(self):
"""Check that is_zipfile() correctly identifies zip files."""
# - passing a filename
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt", b"O, for a Muse of Fire!")
self.assertTrue(zipfile.is_zipfile(TESTFN))
# - passing a file object
with open(TESTFN, "rb") as fp:
self.assertTrue(zipfile.is_zipfile(fp))
fp.seek(0, 0)
zip_contents = fp.read()
# - passing a file-like object
fp = io.BytesIO()
fp.write(zip_contents)
self.assertTrue(zipfile.is_zipfile(fp))
fp.seek(0, 0)
self.assertTrue(zipfile.is_zipfile(fp))
def test_non_existent_file_raises_OSError(self):
# make sure we don't raise an AttributeError when a partially-constructed
# ZipFile instance is finalized; this tests for regression on SF tracker
# bug #403871.
# The bug we're testing for caused an AttributeError to be raised
# when a ZipFile instance was created for a file that did not
# exist; the .fp member was not initialized but was needed by the
# __del__() method. Since the AttributeError is in the __del__(),
# it is ignored, but the user should be sufficiently annoyed by
# the message on the output that regression will be noticed
# quickly.
self.assertRaises(OSError, zipfile.ZipFile, TESTFN)
def test_empty_file_raises_BadZipFile(self):
f = open(TESTFN, 'w')
f.close()
self.assertRaises(zipfile.BadZipFile, zipfile.ZipFile, TESTFN)
with open(TESTFN, 'w') as fp:
fp.write("short file")
self.assertRaises(zipfile.BadZipFile, zipfile.ZipFile, TESTFN)
def test_closed_zip_raises_RuntimeError(self):
"""Verify that testzip() doesn't swallow inappropriate exceptions."""
data = io.BytesIO()
with zipfile.ZipFile(data, mode="w") as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
# This is correct; calling .read on a closed ZipFile should raise
# a RuntimeError, and so should calling .testzip. An earlier
# version of .testzip would swallow this exception (and any other)
# and report that the first file in the archive was corrupt.
self.assertRaises(RuntimeError, zipf.read, "foo.txt")
self.assertRaises(RuntimeError, zipf.open, "foo.txt")
self.assertRaises(RuntimeError, zipf.testzip)
self.assertRaises(RuntimeError, zipf.writestr, "bogus.txt", "bogus")
with open(TESTFN, 'w') as f:
f.write('zipfile test data')
self.assertRaises(RuntimeError, zipf.write, TESTFN)
def test_bad_constructor_mode(self):
"""Check that bad modes passed to ZipFile constructor are caught."""
self.assertRaises(RuntimeError, zipfile.ZipFile, TESTFN, "q")
def test_bad_open_mode(self):
"""Check that bad modes passed to ZipFile.open are caught."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipf:
# read the data to make sure the file is there
zipf.read("foo.txt")
self.assertRaises(RuntimeError, zipf.open, "foo.txt", "q")
def test_read0(self):
"""Check that calling read(0) on a ZipExtFile object returns an empty
string and doesn't advance file pointer."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
# read the data to make sure the file is there
with zipf.open("foo.txt") as f:
for i in range(FIXEDTEST_SIZE):
self.assertEqual(f.read(0), b'')
self.assertEqual(f.read(), b"O, for a Muse of Fire!")
def test_open_non_existent_item(self):
"""Check that attempting to call open() for an item that doesn't
exist in the archive raises a RuntimeError."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
self.assertRaises(KeyError, zipf.open, "foo.txt", "r")
def test_bad_compression_mode(self):
"""Check that bad compression methods passed to ZipFile.open are
caught."""
self.assertRaises(RuntimeError, zipfile.ZipFile, TESTFN, "w", -1)
def test_unsupported_compression(self):
# data is declared as shrunk, but actually deflated
data = (b'PK\x03\x04.\x00\x00\x00\x01\x00\xe4C\xa1@\x00\x00\x00'
b'\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00x\x03\x00PK\x01'
b'\x02.\x03.\x00\x00\x00\x01\x00\xe4C\xa1@\x00\x00\x00\x00\x02\x00\x00'
b'\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x80\x01\x00\x00\x00\x00xPK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x00'
b'/\x00\x00\x00!\x00\x00\x00\x00\x00')
with zipfile.ZipFile(io.BytesIO(data), 'r') as zipf:
self.assertRaises(NotImplementedError, zipf.open, 'x')
def test_null_byte_in_filename(self):
"""Check that a filename containing a null byte is properly
terminated."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt\x00qqq", b"O, for a Muse of Fire!")
self.assertEqual(zipf.namelist(), ['foo.txt'])
def test_struct_sizes(self):
"""Check that ZIP internal structure sizes are calculated correctly."""
self.assertEqual(zipfile.sizeEndCentDir, 22)
self.assertEqual(zipfile.sizeCentralDir, 46)
self.assertEqual(zipfile.sizeEndCentDir64, 56)
self.assertEqual(zipfile.sizeEndCentDir64Locator, 20)
def test_comments(self):
"""Check that comments on the archive are handled properly."""
# check default comment is empty
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
self.assertEqual(zipf.comment, b'')
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipfr:
self.assertEqual(zipfr.comment, b'')
# check a simple short comment
comment = b'Bravely taking to his feet, he beat a very brave retreat.'
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.comment = comment
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipfr:
self.assertEqual(zipf.comment, comment)
# check a comment of max length
comment2 = ''.join(['%d' % (i**3 % 10) for i in range((1 << 16)-1)])
comment2 = comment2.encode("ascii")
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.comment = comment2
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipfr:
self.assertEqual(zipfr.comment, comment2)
# check a comment that is too long is truncated
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
with self.assertWarns(UserWarning):
zipf.comment = comment2 + b'oops'
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipfr:
self.assertEqual(zipfr.comment, comment2)
# check that comments are correctly modified in append mode
with zipfile.ZipFile(TESTFN,mode="w") as zipf:
zipf.comment = b"original comment"
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN,mode="a") as zipf:
zipf.comment = b"an updated comment"
with zipfile.ZipFile(TESTFN,mode="r") as zipf:
self.assertEqual(zipf.comment, b"an updated comment")
# check that comments are correctly shortened in append mode
with zipfile.ZipFile(TESTFN,mode="w") as zipf:
zipf.comment = b"original comment that's longer"
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN,mode="a") as zipf:
zipf.comment = b"shorter comment"
with zipfile.ZipFile(TESTFN,mode="r") as zipf:
self.assertEqual(zipf.comment, b"shorter comment")
def test_unicode_comment(self):
with zipfile.ZipFile(TESTFN, "w", zipfile.ZIP_STORED) as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with self.assertRaises(TypeError):
zipf.comment = "this is an error"
def test_change_comment_in_empty_archive(self):
with zipfile.ZipFile(TESTFN, "a", zipfile.ZIP_STORED) as zipf:
self.assertFalse(zipf.filelist)
zipf.comment = b"this is a comment"
with zipfile.ZipFile(TESTFN, "r") as zipf:
self.assertEqual(zipf.comment, b"this is a comment")
def test_change_comment_in_nonempty_archive(self):
with zipfile.ZipFile(TESTFN, "w", zipfile.ZIP_STORED) as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, "a", zipfile.ZIP_STORED) as zipf:
self.assertTrue(zipf.filelist)
zipf.comment = b"this is a comment"
with zipfile.ZipFile(TESTFN, "r") as zipf:
self.assertEqual(zipf.comment, b"this is a comment")
def test_empty_zipfile(self):
# Check that creating a file in 'w' or 'a' mode and closing without
# adding any files to the archives creates a valid empty ZIP file
zipf = zipfile.ZipFile(TESTFN, mode="w")
zipf.close()
try:
zipf = zipfile.ZipFile(TESTFN, mode="r")
except zipfile.BadZipFile:
self.fail("Unable to create empty ZIP file in 'w' mode")
zipf = zipfile.ZipFile(TESTFN, mode="a")
zipf.close()
try:
zipf = zipfile.ZipFile(TESTFN, mode="r")
except:
self.fail("Unable to create empty ZIP file in 'a' mode")
def test_open_empty_file(self):
# Issue 1710703: Check that opening a file with less than 22 bytes
# raises a BadZipFile exception (rather than the previously unhelpful
# OSError)
f = open(TESTFN, 'w')
f.close()
self.assertRaises(zipfile.BadZipFile, zipfile.ZipFile, TESTFN, 'r')
def test_create_zipinfo_before_1980(self):
self.assertRaises(ValueError,
zipfile.ZipInfo, 'seventies', (1979, 1, 1, 0, 0, 0))
def test_zipfile_with_short_extra_field(self):
"""If an extra field in the header is less than 4 bytes, skip it."""
zipdata = (
b'PK\x03\x04\x14\x00\x00\x00\x00\x00\x93\x9b\xad@\x8b\x9e'
b'\xd9\xd3\x01\x00\x00\x00\x01\x00\x00\x00\x03\x00\x03\x00ab'
b'c\x00\x00\x00APK\x01\x02\x14\x03\x14\x00\x00\x00\x00'
b'\x00\x93\x9b\xad@\x8b\x9e\xd9\xd3\x01\x00\x00\x00\x01\x00\x00'
b'\x00\x03\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa4\x81\x00'
b'\x00\x00\x00abc\x00\x00PK\x05\x06\x00\x00\x00\x00'
b'\x01\x00\x01\x003\x00\x00\x00%\x00\x00\x00\x00\x00'
)
with zipfile.ZipFile(io.BytesIO(zipdata), 'r') as zipf:
# testzip returns the name of the first corrupt file, or None
self.assertIsNone(zipf.testzip())
def tearDown(self):
unlink(TESTFN)
unlink(TESTFN2)
class AbstractBadCrcTests:
def test_testzip_with_bad_crc(self):
"""Tests that files with bad CRCs return their name from testzip."""
zipdata = self.zip_with_bad_crc
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
# testzip returns the name of the first corrupt file, or None
self.assertEqual('afile', zipf.testzip())
def test_read_with_bad_crc(self):
"""Tests that files with bad CRCs raise a BadZipFile exception when read."""
zipdata = self.zip_with_bad_crc
# Using ZipFile.read()
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
self.assertRaises(zipfile.BadZipFile, zipf.read, 'afile')
# Using ZipExtFile.read()
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
with zipf.open('afile', 'r') as corrupt_file:
self.assertRaises(zipfile.BadZipFile, corrupt_file.read)
# Same with small reads (in order to exercise the buffering logic)
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
with zipf.open('afile', 'r') as corrupt_file:
corrupt_file.MIN_READ_SIZE = 2
with self.assertRaises(zipfile.BadZipFile):
while corrupt_file.read(2):
pass
class StoredBadCrcTests(AbstractBadCrcTests, unittest.TestCase):
compression = zipfile.ZIP_STORED
zip_with_bad_crc = (
b'PK\003\004\024\0\0\0\0\0 \213\212;:r'
b'\253\377\f\0\0\0\f\0\0\0\005\0\0\000af'
b'ilehello,AworldP'
b'K\001\002\024\003\024\0\0\0\0\0 \213\212;:'
b'r\253\377\f\0\0\0\f\0\0\0\005\0\0\0\0'
b'\0\0\0\0\0\0\0\200\001\0\0\0\000afi'
b'lePK\005\006\0\0\0\0\001\0\001\0003\000'
b'\0\0/\0\0\0\0\0')
@requires_zlib
class DeflateBadCrcTests(AbstractBadCrcTests, unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
zip_with_bad_crc = (
b'PK\x03\x04\x14\x00\x00\x00\x08\x00n}\x0c=FA'
b'KE\x10\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00af'
b'ile\xcbH\xcd\xc9\xc9W(\xcf/\xcaI\xc9\xa0'
b'=\x13\x00PK\x01\x02\x14\x03\x14\x00\x00\x00\x08\x00n'
b'}\x0c=FAKE\x10\x00\x00\x00n\x00\x00\x00\x05'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00'
b'\x00afilePK\x05\x06\x00\x00\x00\x00\x01\x00'
b'\x01\x003\x00\x00\x003\x00\x00\x00\x00\x00')
@requires_bz2
class Bzip2BadCrcTests(AbstractBadCrcTests, unittest.TestCase):
compression = zipfile.ZIP_BZIP2
zip_with_bad_crc = (
b'PK\x03\x04\x14\x03\x00\x00\x0c\x00nu\x0c=FA'
b'KE8\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00af'
b'ileBZh91AY&SY\xd4\xa8\xca'
b'\x7f\x00\x00\x0f\x11\x80@\x00\x06D\x90\x80 \x00 \xa5'
b'P\xd9!\x03\x03\x13\x13\x13\x89\xa9\xa9\xc2u5:\x9f'
b'\x8b\xb9"\x9c(HjTe?\x80PK\x01\x02\x14'
b'\x03\x14\x03\x00\x00\x0c\x00nu\x0c=FAKE8'
b'\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00 \x80\x80\x81\x00\x00\x00\x00afilePK'
b'\x05\x06\x00\x00\x00\x00\x01\x00\x01\x003\x00\x00\x00[\x00'
b'\x00\x00\x00\x00')
@requires_lzma
class LzmaBadCrcTests(AbstractBadCrcTests, unittest.TestCase):
compression = zipfile.ZIP_LZMA
zip_with_bad_crc = (
b'PK\x03\x04\x14\x03\x00\x00\x0e\x00nu\x0c=FA'
b'KE\x1b\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00af'
b'ile\t\x04\x05\x00]\x00\x00\x00\x04\x004\x19I'
b'\xee\x8d\xe9\x17\x89:3`\tq!.8\x00PK'
b'\x01\x02\x14\x03\x14\x03\x00\x00\x0e\x00nu\x0c=FA'
b'KE\x1b\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00 \x80\x80\x81\x00\x00\x00\x00afil'
b'ePK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x003\x00\x00'
b'\x00>\x00\x00\x00\x00\x00')
class DecryptionTests(unittest.TestCase):
"""Check that ZIP decryption works. Since the library does not
support encryption at the moment, we use a pre-generated encrypted
ZIP file."""
data = (
b'PK\x03\x04\x14\x00\x01\x00\x00\x00n\x92i.#y\xef?&\x00\x00\x00\x1a\x00'
b'\x00\x00\x08\x00\x00\x00test.txt\xfa\x10\xa0gly|\xfa-\xc5\xc0=\xf9y'
b'\x18\xe0\xa8r\xb3Z}Lg\xbc\xae\xf9|\x9b\x19\xe4\x8b\xba\xbb)\x8c\xb0\xdbl'
b'PK\x01\x02\x14\x00\x14\x00\x01\x00\x00\x00n\x92i.#y\xef?&\x00\x00\x00'
b'\x1a\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x01\x00 \x00\xb6\x81'
b'\x00\x00\x00\x00test.txtPK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x006\x00'
b'\x00\x00L\x00\x00\x00\x00\x00' )
data2 = (
b'PK\x03\x04\x14\x00\t\x00\x08\x00\xcf}38xu\xaa\xb2\x14\x00\x00\x00\x00\x02'
b'\x00\x00\x04\x00\x15\x00zeroUT\t\x00\x03\xd6\x8b\x92G\xda\x8b\x92GUx\x04'
b'\x00\xe8\x03\xe8\x03\xc7<M\xb5a\xceX\xa3Y&\x8b{oE\xd7\x9d\x8c\x98\x02\xc0'
b'PK\x07\x08xu\xaa\xb2\x14\x00\x00\x00\x00\x02\x00\x00PK\x01\x02\x17\x03'
b'\x14\x00\t\x00\x08\x00\xcf}38xu\xaa\xb2\x14\x00\x00\x00\x00\x02\x00\x00'
b'\x04\x00\r\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa4\x81\x00\x00\x00\x00ze'
b'roUT\x05\x00\x03\xd6\x8b\x92GUx\x00\x00PK\x05\x06\x00\x00\x00\x00\x01'
b'\x00\x01\x00?\x00\x00\x00[\x00\x00\x00\x00\x00' )
plain = b'zipfile.py encryption test'
plain2 = b'\x00'*512
def setUp(self):
with open(TESTFN, "wb") as fp:
fp.write(self.data)
self.zip = zipfile.ZipFile(TESTFN, "r")
with open(TESTFN2, "wb") as fp:
fp.write(self.data2)
self.zip2 = zipfile.ZipFile(TESTFN2, "r")
def tearDown(self):
self.zip.close()
os.unlink(TESTFN)
self.zip2.close()
os.unlink(TESTFN2)
def test_no_password(self):
# Reading the encrypted file without password
# must generate a RunTime exception
self.assertRaises(RuntimeError, self.zip.read, "test.txt")
self.assertRaises(RuntimeError, self.zip2.read, "zero")
def test_bad_password(self):
self.zip.setpassword(b"perl")
self.assertRaises(RuntimeError, self.zip.read, "test.txt")
self.zip2.setpassword(b"perl")
self.assertRaises(RuntimeError, self.zip2.read, "zero")
@requires_zlib
def test_good_password(self):
self.zip.setpassword(b"python")
self.assertEqual(self.zip.read("test.txt"), self.plain)
self.zip2.setpassword(b"12345")
self.assertEqual(self.zip2.read("zero"), self.plain2)
def test_unicode_password(self):
self.assertRaises(TypeError, self.zip.setpassword, "unicode")
self.assertRaises(TypeError, self.zip.read, "test.txt", "python")
self.assertRaises(TypeError, self.zip.open, "test.txt", pwd="python")
self.assertRaises(TypeError, self.zip.extract, "test.txt", pwd="python")
class AbstractTestsWithRandomBinaryFiles:
@classmethod
def setUpClass(cls):
datacount = randint(16, 64)*1024 + randint(1, 1024)
cls.data = b''.join(struct.pack('<f', random()*randint(-1000, 1000))
for i in range(datacount))
def setUp(self):
# Make a source file with some lines
with open(TESTFN, "wb") as fp:
fp.write(self.data)
def tearDown(self):
unlink(TESTFN)
unlink(TESTFN2)
def make_test_archive(self, f, compression):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", compression) as zipfp:
zipfp.write(TESTFN, "another.name")
zipfp.write(TESTFN, TESTFN)
def zip_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
testdata = zipfp.read(TESTFN)
self.assertEqual(len(testdata), len(self.data))
self.assertEqual(testdata, self.data)
self.assertEqual(zipfp.read("another.name"), self.data)
def test_read(self):
for f in get_files(self):
self.zip_test(f, self.compression)
def zip_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(256)
if not read_data:
break
zipdata1.append(read_data)
zipdata2 = []
with zipfp.open("another.name") as zipopen2:
while True:
read_data = zipopen2.read(256)
if not read_data:
break
zipdata2.append(read_data)
testdata1 = b''.join(zipdata1)
self.assertEqual(len(testdata1), len(self.data))
self.assertEqual(testdata1, self.data)
testdata2 = b''.join(zipdata2)
self.assertEqual(len(testdata2), len(self.data))
self.assertEqual(testdata2, self.data)
def test_open(self):
for f in get_files(self):
self.zip_open_test(f, self.compression)
def zip_random_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(randint(1, 1024))
if not read_data:
break
zipdata1.append(read_data)
testdata = b''.join(zipdata1)
self.assertEqual(len(testdata), len(self.data))
self.assertEqual(testdata, self.data)
def test_random_open(self):
for f in get_files(self):
self.zip_random_open_test(f, self.compression)
class StoredTestsWithRandomBinaryFiles(AbstractTestsWithRandomBinaryFiles,
unittest.TestCase):
compression = zipfile.ZIP_STORED
@requires_zlib
class DeflateTestsWithRandomBinaryFiles(AbstractTestsWithRandomBinaryFiles,
unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
@requires_bz2
class Bzip2TestsWithRandomBinaryFiles(AbstractTestsWithRandomBinaryFiles,
unittest.TestCase):
compression = zipfile.ZIP_BZIP2
@requires_lzma
class LzmaTestsWithRandomBinaryFiles(AbstractTestsWithRandomBinaryFiles,
unittest.TestCase):
compression = zipfile.ZIP_LZMA
@requires_zlib
class TestsWithMultipleOpens(unittest.TestCase):
def setUp(self):
# Create the ZIP archive
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_DEFLATED) as zipfp:
zipfp.writestr('ones', '1'*FIXEDTEST_SIZE)
zipfp.writestr('twos', '2'*FIXEDTEST_SIZE)
def test_same_file(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
with zipfile.ZipFile(TESTFN2, mode="r") as zipf:
with zipf.open('ones') as zopen1, zipf.open('ones') as zopen2:
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read(500)
data2 += zopen2.read(500)
self.assertEqual(data1, data2)
def test_different_file(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
with zipfile.ZipFile(TESTFN2, mode="r") as zipf:
with zipf.open('ones') as zopen1, zipf.open('twos') as zopen2:
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read(500)
data2 += zopen2.read(500)
self.assertEqual(data1, b'1'*FIXEDTEST_SIZE)
self.assertEqual(data2, b'2'*FIXEDTEST_SIZE)
def test_interleaved(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
with zipfile.ZipFile(TESTFN2, mode="r") as zipf:
with zipf.open('ones') as zopen1, zipf.open('twos') as zopen2:
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read(500)
data2 += zopen2.read(500)
self.assertEqual(data1, b'1'*FIXEDTEST_SIZE)
self.assertEqual(data2, b'2'*FIXEDTEST_SIZE)
def tearDown(self):
unlink(TESTFN2)
class TestWithDirectory(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN2)
def test_extract_dir(self):
with zipfile.ZipFile(findfile("zipdir.zip")) as zipf:
zipf.extractall(TESTFN2)
self.assertTrue(os.path.isdir(os.path.join(TESTFN2, "a")))
self.assertTrue(os.path.isdir(os.path.join(TESTFN2, "a", "b")))
self.assertTrue(os.path.exists(os.path.join(TESTFN2, "a", "b", "c")))
def test_bug_6050(self):
# Extraction should succeed if directories already exist
os.mkdir(os.path.join(TESTFN2, "a"))
self.test_extract_dir()
def test_store_dir(self):
os.mkdir(os.path.join(TESTFN2, "x"))
zipf = zipfile.ZipFile(TESTFN, "w")
zipf.write(os.path.join(TESTFN2, "x"), "x")
self.assertTrue(zipf.filelist[0].filename.endswith("x/"))
def tearDown(self):
rmtree(TESTFN2)
if os.path.exists(TESTFN):
unlink(TESTFN)
class AbstractUniversalNewlineTests:
@classmethod
def setUpClass(cls):
cls.line_gen = [bytes("Test of zipfile line %d." % i, "ascii")
for i in range(FIXEDTEST_SIZE)]
cls.seps = (b'\r', b'\r\n', b'\n')
cls.arcdata = {}
for n, s in enumerate(cls.seps):
cls.arcdata[s] = s.join(cls.line_gen) + s
def setUp(self):
self.arcfiles = {}
for n, s in enumerate(self.seps):
self.arcfiles[s] = '%s-%d' % (TESTFN, n)
with open(self.arcfiles[s], "wb") as f:
f.write(self.arcdata[s])
def make_test_archive(self, f, compression):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", compression) as zipfp:
for fn in self.arcfiles.values():
zipfp.write(fn, fn)
def read_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with openU(zipfp, fn) as fp:
zipdata = fp.read()
self.assertEqual(self.arcdata[sep], zipdata)
def test_read(self):
for f in get_files(self):
self.read_test(f, self.compression)
def readline_read_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with openU(zipfp, fn) as zipopen:
data = b''
while True:
read = zipopen.readline()
if not read:
break
data += read
read = zipopen.read(5)
if not read:
break
data += read
self.assertEqual(data, self.arcdata[b'\n'])
def test_readline_read(self):
for f in get_files(self):
self.readline_read_test(f, self.compression)
def readline_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with openU(zipfp, fn) as zipopen:
for line in self.line_gen:
linedata = zipopen.readline()
self.assertEqual(linedata, line + b'\n')
def test_readline(self):
for f in get_files(self):
self.readline_test(f, self.compression)
def readlines_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with openU(zipfp, fn) as fp:
ziplines = fp.readlines()
for line, zipline in zip(self.line_gen, ziplines):
self.assertEqual(zipline, line + b'\n')
def test_readlines(self):
for f in get_files(self):
self.readlines_test(f, self.compression)
def iterlines_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with openU(zipfp, fn) as fp:
for line, zipline in zip(self.line_gen, fp):
self.assertEqual(zipline, line + b'\n')
def test_iterlines(self):
for f in get_files(self):
self.iterlines_test(f, self.compression)
def tearDown(self):
for sep, fn in self.arcfiles.items():
unlink(fn)
unlink(TESTFN)
unlink(TESTFN2)
class StoredUniversalNewlineTests(AbstractUniversalNewlineTests,
unittest.TestCase):
compression = zipfile.ZIP_STORED
@requires_zlib
class DeflateUniversalNewlineTests(AbstractUniversalNewlineTests,
unittest.TestCase):
compression = zipfile.ZIP_DEFLATED
@requires_bz2
class Bzip2UniversalNewlineTests(AbstractUniversalNewlineTests,
unittest.TestCase):
compression = zipfile.ZIP_BZIP2
@requires_lzma
class LzmaUniversalNewlineTests(AbstractUniversalNewlineTests,
unittest.TestCase):
compression = zipfile.ZIP_LZMA
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
cyli/volatility | volatility/plugins/linux/netstat.py | 10 | 2404 | # Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
"""
@author: Andrew Case
@license: GNU General Public License 2.0
@contact: [email protected]
@organization:
"""
import socket
import volatility.obj as obj
import volatility.plugins.linux.common as linux_common
import volatility.plugins.linux.lsof as linux_lsof
import volatility.plugins.linux.pslist as linux_pslist
class linux_netstat(linux_pslist.linux_pslist):
"""Lists open sockets"""
def __init__(self, config, *args, **kwargs):
linux_pslist.linux_pslist.__init__(self, config, *args, **kwargs)
self._config.add_option('IGNORE_UNIX', short_option = 'U', default = None, help = 'ignore unix sockets', action = 'store_true')
# its a socket!
def render_text(self, outfd, data):
linux_common.set_plugin_members(self)
if not self.addr_space.profile.has_type("inet_sock"):
# ancient (2.6.9) centos kernels do not have inet_sock in debug info
raise AttributeError, "Given profile does not have inet_sock, please file a bug if the kernel version is > 2.6.11"
for task in data:
for ents in task.netstat():
if ents[0] == socket.AF_INET:
(_, proto, saddr, sport, daddr, dport, state) = ents[1]
outfd.write("{0:8s} {1:<16}:{2:>5} {3:<16}:{4:>5} {5:<15s} {6:>17s}/{7:<5d}\n".format(proto, saddr, sport, daddr, dport, state, task.comm, task.pid))
elif ents[0] == socket.AF_UNIX and not self._config.IGNORE_UNIX:
(name, inum) = ents[1]
outfd.write("UNIX {0:<8d} {1:>17s}/{2:<5d} {3:s}\n".format(inum, task.comm, task.pid, name))
| gpl-2.0 |
vdmann/cse-360-image-hosting-website | lib/python2.7/site-packages/django/contrib/gis/management/commands/inspectdb.py | 315 | 1466 | from django.core.management.commands.inspectdb import Command as InspectDBCommand
class Command(InspectDBCommand):
db_module = 'django.contrib.gis.db'
gis_tables = {}
def get_field_type(self, connection, table_name, row):
field_type, field_params, field_notes = super(Command, self).get_field_type(connection, table_name, row)
if field_type == 'GeometryField':
geo_col = row[0]
# Getting a more specific field type and any additional parameters
# from the `get_geometry_type` routine for the spatial backend.
field_type, geo_params = connection.introspection.get_geometry_type(table_name, geo_col)
field_params.update(geo_params)
# Adding the table name and column to the `gis_tables` dictionary, this
# allows us to track which tables need a GeoManager.
if table_name in self.gis_tables:
self.gis_tables[table_name].append(geo_col)
else:
self.gis_tables[table_name] = [geo_col]
return field_type, field_params, field_notes
def get_meta(self, table_name):
meta_lines = super(Command, self).get_meta(table_name)
if table_name in self.gis_tables:
# If the table is a geographic one, then we need make
# GeoManager the default manager for the model.
meta_lines.insert(0, ' objects = models.GeoManager()')
return meta_lines
| mit |
Bysmyyr/blink-crosswalk | Tools/Scripts/webkitpy/style/checkers/png.py | 170 | 3914 | # Copyright (C) 2012 Balazs Ankes ([email protected]) University of Szeged
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Supports checking WebKit style in png files."""
import os
import re
from webkitpy.common import checksvnconfigfile
from webkitpy.common import read_checksum_from_png
from webkitpy.common.system.systemhost import SystemHost
from webkitpy.common.checkout.scm.detection import SCMDetector
class PNGChecker(object):
"""Check svn:mime-type for checking style"""
categories = set(['image/png'])
def __init__(self, file_path, handle_style_error, scm=None, host=None):
self._file_path = file_path
self._handle_style_error = handle_style_error
self._host = host or SystemHost()
self._fs = self._host.filesystem
self._detector = scm or SCMDetector(self._fs, self._host.executive).detect_scm_system(self._fs.getcwd())
def check(self, inline=None):
errorstr = ""
config_file_path = ""
detection = self._detector.display_name()
if self._fs.exists(self._file_path) and self._file_path.endswith("-expected.png"):
with self._fs.open_binary_file_for_reading(self._file_path) as filehandle:
if not read_checksum_from_png.read_checksum(filehandle):
self._handle_style_error(0, 'image/png', 5, "Image lacks a checksum. Generate pngs using run-webkit-tests to ensure they have a checksum.")
if detection == "git":
(file_missing, autoprop_missing, png_missing) = checksvnconfigfile.check(self._host, self._fs)
config_file_path = checksvnconfigfile.config_file_path(self._host, self._fs)
if file_missing:
self._handle_style_error(0, 'image/png', 5, "There is no SVN config file. (%s)" % config_file_path)
elif autoprop_missing and png_missing:
self._handle_style_error(0, 'image/png', 5, checksvnconfigfile.errorstr_autoprop(config_file_path) + checksvnconfigfile.errorstr_png(config_file_path))
elif autoprop_missing:
self._handle_style_error(0, 'image/png', 5, checksvnconfigfile.errorstr_autoprop(config_file_path))
elif png_missing:
self._handle_style_error(0, 'image/png', 5, checksvnconfigfile.errorstr_png(config_file_path))
elif detection == "svn":
prop_get = self._detector.propget("svn:mime-type", self._file_path)
if prop_get != "image/png":
errorstr = "Set the svn:mime-type property (svn propset svn:mime-type image/png %s)." % self._file_path
self._handle_style_error(0, 'image/png', 5, errorstr)
| bsd-3-clause |
matthewoliver/swift | test/unit/obj/test_expirer.py | 2 | 34828 | # Copyright (c) 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from time import time
from unittest import main, TestCase
from test.unit import FakeRing, mocked_http_conn, debug_logger
from tempfile import mkdtemp
from shutil import rmtree
from collections import defaultdict
from copy import deepcopy
import mock
import six
from six.moves import urllib
from swift.common import internal_client, utils, swob
from swift.common.utils import Timestamp
from swift.obj import expirer
def not_random():
return 0.5
last_not_sleep = 0
def not_sleep(seconds):
global last_not_sleep
last_not_sleep = seconds
class FakeInternalClient(object):
container_ring = FakeRing()
def __init__(self, aco_dict):
"""
:param aco_dict: A dict of account ,container, object that
FakeInternalClient can return when each method called. Each account
has container name dict, and each container dict has object name
list in the container.
e.g. {'account1': {
'container1: ['obj1', 'obj2', 'obj3'],
'container2: [],
},
'account2': {},
}
"""
self.aco_dict = defaultdict(dict)
self.aco_dict.update(aco_dict)
def get_account_info(self, account):
acc_dict = self.aco_dict[account]
container_count = len(acc_dict)
obj_count = sum(len(objs) for objs in acc_dict.values())
return container_count, obj_count
def iter_containers(self, account, prefix=''):
acc_dict = self.aco_dict[account]
return sorted([{'name': six.text_type(container)} for container in
acc_dict if container.startswith(prefix)])
def delete_container(*a, **kw):
pass
def iter_objects(self, account, container):
acc_dict = self.aco_dict[account]
obj_iter = acc_dict.get(container, [])
return [{'name': six.text_type(obj)} for obj in obj_iter]
def make_request(*a, **kw):
pass
class TestObjectExpirer(TestCase):
maxDiff = None
internal_client = None
def setUp(self):
global not_sleep
self.old_loadapp = internal_client.loadapp
self.old_sleep = internal_client.sleep
internal_client.loadapp = lambda *a, **kw: None
internal_client.sleep = not_sleep
self.rcache = mkdtemp()
self.conf = {'recon_cache_path': self.rcache}
self.logger = debug_logger('test-expirer')
self.past_time = str(int(time() - 86400))
self.future_time = str(int(time() + 86400))
# Dummy task queue for test
self.fake_swift = FakeInternalClient({
'.expiring_objects': {
# this task container will be checked
self.past_time: [
# tasks ready for execution
self.past_time + '-a0/c0/o0',
self.past_time + '-a1/c1/o1',
self.past_time + '-a2/c2/o2',
self.past_time + '-a3/c3/o3',
self.past_time + '-a4/c4/o4',
self.past_time + '-a5/c5/o5',
self.past_time + '-a6/c6/o6',
self.past_time + '-a7/c7/o7',
# task objects for unicode test
self.past_time + u'-a8/c8/o8\u2661',
self.past_time + u'-a9/c9/o9\xf8',
# this task will be skipped
self.future_time + '-a10/c10/o10'],
# this task container will be skipped
self.future_time: [
self.future_time + '-a11/c11/o11']}
})
self.expirer = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=self.fake_swift)
# target object paths which should be expirerd now
self.expired_target_path_list = [
'a0/c0/o0', 'a1/c1/o1', 'a2/c2/o2', 'a3/c3/o3', 'a4/c4/o4',
'a5/c5/o5', 'a6/c6/o6', 'a7/c7/o7',
'a8/c8/o8\xe2\x99\xa1', 'a9/c9/o9\xc3\xb8',
]
def tearDown(self):
rmtree(self.rcache)
internal_client.sleep = self.old_sleep
internal_client.loadapp = self.old_loadapp
def test_get_process_values_from_kwargs(self):
x = expirer.ObjectExpirer({})
vals = {
'processes': 5,
'process': 1,
}
x.get_process_values(vals)
self.assertEqual(x.processes, 5)
self.assertEqual(x.process, 1)
def test_get_process_values_from_config(self):
vals = {
'processes': 5,
'process': 1,
}
x = expirer.ObjectExpirer(vals)
x.get_process_values({})
self.assertEqual(x.processes, 5)
self.assertEqual(x.process, 1)
def test_get_process_values_negative_process(self):
vals = {
'processes': 5,
'process': -1,
}
# from config
x = expirer.ObjectExpirer(vals)
expected_msg = 'process must be an integer greater' \
' than or equal to 0'
with self.assertRaises(ValueError) as ctx:
x.get_process_values({})
self.assertEqual(str(ctx.exception), expected_msg)
# from kwargs
x = expirer.ObjectExpirer({})
with self.assertRaises(ValueError) as ctx:
x.get_process_values(vals)
self.assertEqual(str(ctx.exception), expected_msg)
def test_get_process_values_negative_processes(self):
vals = {
'processes': -5,
'process': 1,
}
# from config
x = expirer.ObjectExpirer(vals)
expected_msg = 'processes must be an integer greater' \
' than or equal to 0'
with self.assertRaises(ValueError) as ctx:
x.get_process_values({})
self.assertEqual(str(ctx.exception), expected_msg)
# from kwargs
x = expirer.ObjectExpirer({})
with self.assertRaises(ValueError) as ctx:
x.get_process_values(vals)
self.assertEqual(str(ctx.exception), expected_msg)
def test_get_process_values_process_greater_than_processes(self):
vals = {
'processes': 5,
'process': 7,
}
# from config
x = expirer.ObjectExpirer(vals)
expected_msg = 'process must be less than processes'
with self.assertRaises(ValueError) as ctx:
x.get_process_values({})
self.assertEqual(str(ctx.exception), expected_msg)
# from kwargs
x = expirer.ObjectExpirer({})
with self.assertRaises(ValueError) as ctx:
x.get_process_values(vals)
self.assertEqual(str(ctx.exception), expected_msg)
def test_get_process_values_process_equal_to_processes(self):
vals = {
'processes': 5,
'process': 5,
}
# from config
x = expirer.ObjectExpirer(vals)
expected_msg = 'process must be less than processes'
with self.assertRaises(ValueError) as ctx:
x.get_process_values({})
self.assertEqual(str(ctx.exception), expected_msg)
# from kwargs
x = expirer.ObjectExpirer({})
with self.assertRaises(ValueError) as ctx:
x.get_process_values(vals)
self.assertEqual(str(ctx.exception), expected_msg)
def test_init_concurrency_too_small(self):
conf = {
'concurrency': 0,
}
self.assertRaises(ValueError, expirer.ObjectExpirer, conf)
conf = {
'concurrency': -1,
}
self.assertRaises(ValueError, expirer.ObjectExpirer, conf)
def test_process_based_concurrency(self):
class ObjectExpirer(expirer.ObjectExpirer):
def __init__(self, conf, swift):
super(ObjectExpirer, self).__init__(conf, swift=swift)
self.processes = 3
self.deleted_objects = {}
def delete_object(self, target_path, delete_timestamp,
task_account, task_container, task_object):
if task_container not in self.deleted_objects:
self.deleted_objects[task_container] = set()
self.deleted_objects[task_container].add(task_object)
x = ObjectExpirer(self.conf, swift=self.fake_swift)
deleted_objects = defaultdict(set)
for i in range(3):
x.process = i
# reset progress so we know we don't double-up work among processes
x.deleted_objects = defaultdict(set)
x.run_once()
for task_container, deleted in x.deleted_objects.items():
self.assertFalse(deleted_objects[task_container] & deleted)
deleted_objects[task_container] |= deleted
# sort for comparison
deleted_objects = {
con: sorted(o_set) for con, o_set in deleted_objects.items()}
expected = {
self.past_time: [
self.past_time + '-' + target_path
for target_path in self.expired_target_path_list]}
self.assertEqual(deleted_objects, expected)
def test_delete_object(self):
x = expirer.ObjectExpirer({}, logger=self.logger)
actual_obj = 'actual_obj'
timestamp = int(time())
reclaim_ts = timestamp - x.reclaim_age
account = 'account'
container = 'container'
obj = 'obj'
http_exc = {
resp_code:
internal_client.UnexpectedResponse(
str(resp_code), swob.HTTPException(status=resp_code))
for resp_code in {404, 412, 500}
}
exc_other = Exception()
def check_call_to_delete_object(exc, ts, should_pop):
x.logger.clear()
start_reports = x.report_objects
with mock.patch.object(x, 'delete_actual_object',
side_effect=exc) as delete_actual:
with mock.patch.object(x, 'pop_queue') as pop_queue:
x.delete_object(actual_obj, ts, account, container, obj)
delete_actual.assert_called_once_with(actual_obj, ts)
log_lines = x.logger.get_lines_for_level('error')
if should_pop:
pop_queue.assert_called_once_with(account, container, obj)
self.assertEqual(start_reports + 1, x.report_objects)
self.assertFalse(log_lines)
else:
self.assertFalse(pop_queue.called)
self.assertEqual(start_reports, x.report_objects)
self.assertEqual(1, len(log_lines))
if isinstance(exc, internal_client.UnexpectedResponse):
self.assertEqual(
log_lines[0],
'Unexpected response while deleting object '
'account container obj: %s' % exc.resp.status_int)
else:
self.assertTrue(log_lines[0].startswith(
'Exception while deleting object '
'account container obj'))
# verify pop_queue logic on exceptions
for exc, ts, should_pop in [(None, timestamp, True),
(http_exc[404], timestamp, False),
(http_exc[412], timestamp, False),
(http_exc[500], reclaim_ts, False),
(exc_other, reclaim_ts, False),
(http_exc[404], reclaim_ts, True),
(http_exc[412], reclaim_ts, True)]:
try:
check_call_to_delete_object(exc, ts, should_pop)
except AssertionError as err:
self.fail("Failed on %r at %f: %s" % (exc, ts, err))
def test_report(self):
x = expirer.ObjectExpirer({}, logger=self.logger)
x.report()
self.assertEqual(x.logger.get_lines_for_level('info'), [])
x.logger._clear()
x.report(final=True)
self.assertTrue(
'completed' in str(x.logger.get_lines_for_level('info')))
self.assertTrue(
'so far' not in str(x.logger.get_lines_for_level('info')))
x.logger._clear()
x.report_last_time = time() - x.report_interval
x.report()
self.assertTrue(
'completed' not in str(x.logger.get_lines_for_level('info')))
self.assertTrue(
'so far' in str(x.logger.get_lines_for_level('info')))
def test_parse_task_obj(self):
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
def assert_parse_task_obj(task_obj, expected_delete_at,
expected_account, expected_container,
expected_obj):
delete_at, account, container, obj = x.parse_task_obj(task_obj)
self.assertEqual(delete_at, expected_delete_at)
self.assertEqual(account, expected_account)
self.assertEqual(container, expected_container)
self.assertEqual(obj, expected_obj)
assert_parse_task_obj('0000-a/c/o', 0, 'a', 'c', 'o')
assert_parse_task_obj('0001-a/c/o', 1, 'a', 'c', 'o')
assert_parse_task_obj('1000-a/c/o', 1000, 'a', 'c', 'o')
assert_parse_task_obj('0000-acc/con/obj', 0, 'acc', 'con', 'obj')
def make_task(self, delete_at, target):
return {
'task_account': '.expiring_objects',
'task_container': delete_at,
'task_object': delete_at + '-' + target,
'delete_timestamp': Timestamp(delete_at),
'target_path': target,
}
def test_round_robin_order(self):
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
task_con_obj_list = [
# objects in 0000 timestamp container
self.make_task('0000', 'a/c0/o0'),
self.make_task('0000', 'a/c0/o1'),
# objects in 0001 timestamp container
self.make_task('0001', 'a/c1/o0'),
self.make_task('0001', 'a/c1/o1'),
# objects in 0002 timestamp container
self.make_task('0002', 'a/c2/o0'),
self.make_task('0002', 'a/c2/o1'),
]
result = list(x.round_robin_order(task_con_obj_list))
# sorted by popping one object to delete for each target_container
expected = [
self.make_task('0000', 'a/c0/o0'),
self.make_task('0001', 'a/c1/o0'),
self.make_task('0002', 'a/c2/o0'),
self.make_task('0000', 'a/c0/o1'),
self.make_task('0001', 'a/c1/o1'),
self.make_task('0002', 'a/c2/o1'),
]
self.assertEqual(expected, result)
# task containers have some task objects with invalid target paths
task_con_obj_list = [
# objects in 0000 timestamp container
self.make_task('0000', 'invalid0'),
self.make_task('0000', 'a/c0/o0'),
self.make_task('0000', 'a/c0/o1'),
# objects in 0001 timestamp container
self.make_task('0001', 'a/c1/o0'),
self.make_task('0001', 'invalid1'),
self.make_task('0001', 'a/c1/o1'),
# objects in 0002 timestamp container
self.make_task('0002', 'a/c2/o0'),
self.make_task('0002', 'a/c2/o1'),
self.make_task('0002', 'invalid2'),
]
result = list(x.round_robin_order(task_con_obj_list))
# the invalid task objects are ignored
expected = [
self.make_task('0000', 'a/c0/o0'),
self.make_task('0001', 'a/c1/o0'),
self.make_task('0002', 'a/c2/o0'),
self.make_task('0000', 'a/c0/o1'),
self.make_task('0001', 'a/c1/o1'),
self.make_task('0002', 'a/c2/o1'),
]
self.assertEqual(expected, result)
# for a given target container, tasks won't necessarily all go in
# the same timestamp container
task_con_obj_list = [
# objects in 0000 timestamp container
self.make_task('0000', 'a/c0/o0'),
self.make_task('0000', 'a/c0/o1'),
self.make_task('0000', 'a/c2/o2'),
self.make_task('0000', 'a/c2/o3'),
# objects in 0001 timestamp container
self.make_task('0001', 'a/c0/o2'),
self.make_task('0001', 'a/c0/o3'),
self.make_task('0001', 'a/c1/o0'),
self.make_task('0001', 'a/c1/o1'),
# objects in 0002 timestamp container
self.make_task('0002', 'a/c2/o0'),
self.make_task('0002', 'a/c2/o1'),
]
result = list(x.round_robin_order(task_con_obj_list))
# so we go around popping by *target* container, not *task* container
expected = [
self.make_task('0000', 'a/c0/o0'),
self.make_task('0001', 'a/c1/o0'),
self.make_task('0000', 'a/c2/o2'),
self.make_task('0000', 'a/c0/o1'),
self.make_task('0001', 'a/c1/o1'),
self.make_task('0000', 'a/c2/o3'),
self.make_task('0001', 'a/c0/o2'),
self.make_task('0002', 'a/c2/o0'),
self.make_task('0001', 'a/c0/o3'),
self.make_task('0002', 'a/c2/o1'),
]
self.assertEqual(expected, result)
# all of the work to be done could be for different target containers
task_con_obj_list = [
# objects in 0000 timestamp container
self.make_task('0000', 'a/c0/o'),
self.make_task('0000', 'a/c1/o'),
self.make_task('0000', 'a/c2/o'),
self.make_task('0000', 'a/c3/o'),
# objects in 0001 timestamp container
self.make_task('0001', 'a/c4/o'),
self.make_task('0001', 'a/c5/o'),
self.make_task('0001', 'a/c6/o'),
self.make_task('0001', 'a/c7/o'),
# objects in 0002 timestamp container
self.make_task('0002', 'a/c8/o'),
self.make_task('0002', 'a/c9/o'),
]
result = list(x.round_robin_order(task_con_obj_list))
# in which case, we kind of hammer the task containers
self.assertEqual(task_con_obj_list, result)
def test_hash_mod(self):
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
mod_count = [0, 0, 0]
for i in range(1000):
name = 'obj%d' % i
mod = x.hash_mod(name, 3)
mod_count[mod] += 1
# 1000 names are well shuffled
self.assertGreater(mod_count[0], 300)
self.assertGreater(mod_count[1], 300)
self.assertGreater(mod_count[2], 300)
def test_iter_task_accounts_to_expire(self):
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
results = [_ for _ in x.iter_task_accounts_to_expire()]
self.assertEqual(results, [('.expiring_objects', 0, 1)])
self.conf['processes'] = '2'
self.conf['process'] = '1'
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
results = [_ for _ in x.iter_task_accounts_to_expire()]
self.assertEqual(results, [('.expiring_objects', 1, 2)])
def test_delete_at_time_of_task_container(self):
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
self.assertEqual(x.delete_at_time_of_task_container('0000'), 0)
self.assertEqual(x.delete_at_time_of_task_container('0001'), 1)
self.assertEqual(x.delete_at_time_of_task_container('1000'), 1000)
def test_run_once_nothing_to_do(self):
x = expirer.ObjectExpirer(self.conf, logger=self.logger)
x.swift = 'throw error because a string does not have needed methods'
x.run_once()
self.assertEqual(x.logger.get_lines_for_level('error'),
["Unhandled exception: "])
log_args, log_kwargs = x.logger.log_dict['error'][0]
self.assertEqual(str(log_kwargs['exc_info'][1]),
"'str' object has no attribute 'get_account_info'")
def test_run_once_calls_report(self):
with mock.patch.object(self.expirer, 'pop_queue',
lambda a, c, o: None):
self.expirer.run_once()
self.assertEqual(
self.expirer.logger.get_lines_for_level('info'), [
'Pass beginning for task account .expiring_objects; '
'2 possible containers; 12 possible objects',
'Pass completed in 0s; 10 objects expired',
])
def test_skip_task_account_without_task_container(self):
fake_swift = FakeInternalClient({
# task account has no containers
'.expiring_objects': dict()
})
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=fake_swift)
x.run_once()
self.assertEqual(
x.logger.get_lines_for_level('info'), [
'Pass completed in 0s; 0 objects expired',
])
def test_iter_task_to_expire(self):
# In this test, all tasks are assigned to the tested expirer
my_index = 0
divisor = 1
task_account_container_list = [('.expiring_objects', self.past_time)]
expected = [
self.make_task(self.past_time, target_path)
for target_path in self.expired_target_path_list]
self.assertEqual(
list(self.expirer.iter_task_to_expire(
task_account_container_list, my_index, divisor)),
expected)
# the task queue has invalid task object
invalid_aco_dict = deepcopy(self.fake_swift.aco_dict)
invalid_aco_dict['.expiring_objects'][self.past_time].insert(
0, self.past_time + '-invalid0')
invalid_aco_dict['.expiring_objects'][self.past_time].insert(
5, self.past_time + '-invalid1')
invalid_fake_swift = FakeInternalClient(invalid_aco_dict)
x = expirer.ObjectExpirer(self.conf, logger=self.logger,
swift=invalid_fake_swift)
# but the invalid tasks are skipped
self.assertEqual(
list(x.iter_task_to_expire(
task_account_container_list, my_index, divisor)),
expected)
def test_run_once_unicode_problem(self):
requests = []
def capture_requests(ipaddr, port, method, path, *args, **kwargs):
requests.append((method, path))
# 3 DELETE requests for each 10 executed task objects to pop_queue
code_list = [200] * 3 * 10
with mocked_http_conn(*code_list, give_connect=capture_requests):
self.expirer.run_once()
self.assertEqual(len(requests), 30)
def test_container_timestamp_break(self):
with mock.patch.object(self.fake_swift, 'iter_objects') as mock_method:
self.expirer.run_once()
# iter_objects is called only for past_time, not future_time
self.assertEqual(mock_method.call_args_list,
[mock.call('.expiring_objects', self.past_time)])
def test_object_timestamp_break(self):
with mock.patch.object(self.expirer, 'delete_actual_object') \
as mock_method, \
mock.patch.object(self.expirer, 'pop_queue'):
self.expirer.run_once()
# executed tasks are with past time
self.assertEqual(
mock_method.call_args_list,
[mock.call(target_path, self.past_time)
for target_path in self.expired_target_path_list])
def test_failed_delete_keeps_entry(self):
def deliberately_blow_up(actual_obj, timestamp):
raise Exception('failed to delete actual object')
# any tasks are not done
with mock.patch.object(self.expirer, 'delete_actual_object',
deliberately_blow_up), \
mock.patch.object(self.expirer, 'pop_queue') as mock_method:
self.expirer.run_once()
# no tasks are popped from the queue
self.assertEqual(mock_method.call_args_list, [])
# all tasks are done
with mock.patch.object(self.expirer, 'delete_actual_object',
lambda o, t: None), \
mock.patch.object(self.expirer, 'pop_queue') as mock_method:
self.expirer.run_once()
# all tasks are popped from the queue
self.assertEqual(
mock_method.call_args_list,
[mock.call('.expiring_objects', self.past_time,
self.past_time + '-' + target_path)
for target_path in self.expired_target_path_list])
def test_success_gets_counted(self):
self.assertEqual(self.expirer.report_objects, 0)
with mock.patch('swift.obj.expirer.MAX_OBJECTS_TO_CACHE', 0), \
mock.patch.object(self.expirer, 'delete_actual_object',
lambda o, t: None), \
mock.patch.object(self.expirer, 'pop_queue',
lambda a, c, o: None):
self.expirer.run_once()
self.assertEqual(self.expirer.report_objects, 10)
def test_delete_actual_object_does_not_get_unicode(self):
got_unicode = [False]
def delete_actual_object_test_for_unicode(actual_obj, timestamp):
if isinstance(actual_obj, six.text_type):
got_unicode[0] = True
self.assertEqual(self.expirer.report_objects, 0)
with mock.patch.object(self.expirer, 'delete_actual_object',
delete_actual_object_test_for_unicode), \
mock.patch.object(self.expirer, 'pop_queue',
lambda a, c, o: None):
self.expirer.run_once()
self.assertEqual(self.expirer.report_objects, 10)
self.assertFalse(got_unicode[0])
def test_failed_delete_continues_on(self):
def fail_delete_container(*a, **kw):
raise Exception('failed to delete container')
def fail_delete_actual_object(actual_obj, timestamp):
raise Exception('failed to delete actual object')
with mock.patch.object(self.fake_swift, 'delete_container',
fail_delete_container), \
mock.patch.object(self.expirer, 'delete_actual_object',
fail_delete_actual_object):
self.expirer.run_once()
error_lines = self.expirer.logger.get_lines_for_level('error')
self.assertEqual(error_lines, [
'Exception while deleting object %s %s %s '
'failed to delete actual object: ' % (
'.expiring_objects', self.past_time,
self.past_time + '-' + target_path)
for target_path in self.expired_target_path_list] + [
'Exception while deleting container %s %s '
'failed to delete container: ' % (
'.expiring_objects', self.past_time)])
self.assertEqual(self.expirer.logger.get_lines_for_level('info'), [
'Pass beginning for task account .expiring_objects; '
'2 possible containers; 12 possible objects',
'Pass completed in 0s; 0 objects expired',
])
def test_run_forever_initial_sleep_random(self):
global last_not_sleep
def raise_system_exit():
raise SystemExit('test_run_forever')
interval = 1234
x = expirer.ObjectExpirer({'__file__': 'unit_test',
'interval': interval})
orig_random = expirer.random
orig_sleep = expirer.sleep
try:
expirer.random = not_random
expirer.sleep = not_sleep
x.run_once = raise_system_exit
x.run_forever()
except SystemExit as err:
pass
finally:
expirer.random = orig_random
expirer.sleep = orig_sleep
self.assertEqual(str(err), 'test_run_forever')
self.assertEqual(last_not_sleep, 0.5 * interval)
def test_run_forever_catches_usual_exceptions(self):
raises = [0]
def raise_exceptions():
raises[0] += 1
if raises[0] < 2:
raise Exception('exception %d' % raises[0])
raise SystemExit('exiting exception %d' % raises[0])
x = expirer.ObjectExpirer({}, logger=self.logger)
orig_sleep = expirer.sleep
try:
expirer.sleep = not_sleep
x.run_once = raise_exceptions
x.run_forever()
except SystemExit as err:
pass
finally:
expirer.sleep = orig_sleep
self.assertEqual(str(err), 'exiting exception 2')
self.assertEqual(x.logger.get_lines_for_level('error'),
['Unhandled exception: '])
log_args, log_kwargs = x.logger.log_dict['error'][0]
self.assertEqual(str(log_kwargs['exc_info'][1]),
'exception 1')
def test_delete_actual_object(self):
got_env = [None]
def fake_app(env, start_response):
got_env[0] = env
start_response('204 No Content', [('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
ts = Timestamp('1234')
x.delete_actual_object('/path/to/object', ts)
self.assertEqual(got_env[0]['HTTP_X_IF_DELETE_AT'], ts)
self.assertEqual(got_env[0]['HTTP_X_TIMESTAMP'],
got_env[0]['HTTP_X_IF_DELETE_AT'])
def test_delete_actual_object_nourlquoting(self):
# delete_actual_object should not do its own url quoting because
# internal client's make_request handles that.
got_env = [None]
def fake_app(env, start_response):
got_env[0] = env
start_response('204 No Content', [('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
ts = Timestamp('1234')
x.delete_actual_object('/path/to/object name', ts)
self.assertEqual(got_env[0]['HTTP_X_IF_DELETE_AT'], ts)
self.assertEqual(got_env[0]['HTTP_X_TIMESTAMP'],
got_env[0]['HTTP_X_IF_DELETE_AT'])
self.assertEqual(got_env[0]['PATH_INFO'], '/v1/path/to/object name')
def test_delete_actual_object_returns_expected_error(self):
def do_test(test_status, should_raise):
calls = [0]
def fake_app(env, start_response):
calls[0] += 1
start_response(test_status, [('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
ts = Timestamp('1234')
if should_raise:
with self.assertRaises(internal_client.UnexpectedResponse):
x.delete_actual_object('/path/to/object', ts)
else:
x.delete_actual_object('/path/to/object', ts)
self.assertEqual(calls[0], 1)
# object was deleted and tombstone reaped
do_test('404 Not Found', True)
# object was overwritten *after* the original expiration, or
do_test('409 Conflict', False)
# object was deleted but tombstone still exists, or
# object was overwritten ahead of the original expiration, or
# object was POSTed to with a new (or no) expiration, or ...
do_test('412 Precondition Failed', True)
def test_delete_actual_object_does_not_handle_odd_stuff(self):
def fake_app(env, start_response):
start_response(
'503 Internal Server Error',
[('Content-Length', '0')])
return []
internal_client.loadapp = lambda *a, **kw: fake_app
x = expirer.ObjectExpirer({})
exc = None
try:
x.delete_actual_object('/path/to/object', Timestamp('1234'))
except Exception as err:
exc = err
finally:
pass
self.assertEqual(503, exc.resp.status_int)
def test_delete_actual_object_quotes(self):
name = 'this name should get quoted'
timestamp = Timestamp('1366063156.863045')
x = expirer.ObjectExpirer({})
x.swift.make_request = mock.Mock()
x.swift.make_request.return_value.status_int = 204
x.delete_actual_object(name, timestamp)
self.assertEqual(x.swift.make_request.call_count, 1)
self.assertEqual(x.swift.make_request.call_args[0][1],
'/v1/' + urllib.parse.quote(name))
def test_delete_actual_object_queue_cleaning(self):
name = 'something'
timestamp = Timestamp('1515544858.80602')
x = expirer.ObjectExpirer({})
x.swift.make_request = mock.MagicMock()
x.delete_actual_object(name, timestamp)
self.assertEqual(x.swift.make_request.call_count, 1)
header = 'X-Backend-Clean-Expiring-Object-Queue'
self.assertEqual(
x.swift.make_request.call_args[0][2].get(header),
'no')
def test_pop_queue(self):
x = expirer.ObjectExpirer({}, logger=self.logger,
swift=FakeInternalClient({}))
requests = []
def capture_requests(ipaddr, port, method, path, *args, **kwargs):
requests.append((method, path))
with mocked_http_conn(
200, 200, 200, give_connect=capture_requests) as fake_conn:
x.pop_queue('a', 'c', 'o')
self.assertRaises(StopIteration, fake_conn.code_iter.next)
for method, path in requests:
self.assertEqual(method, 'DELETE')
device, part, account, container, obj = utils.split_path(
path, 5, 5, True)
self.assertEqual(account, 'a')
self.assertEqual(container, 'c')
self.assertEqual(obj, 'o')
if __name__ == '__main__':
main()
| apache-2.0 |
oopy/micropython | tests/basics/exceptpoly2.py | 63 | 1789 | try:
raise MemoryError
except Exception:
print("Caught MemoryError via Exception")
try:
raise MemoryError
except MemoryError:
print("Caught MemoryError")
try:
raise NameError
except Exception:
print("Caught NameError via Exception")
try:
raise NameError
except NameError:
print("Caught NameError")
try:
raise NotImplementedError
except RuntimeError:
print("Caught NotImplementedError via RuntimeError")
try:
raise NotImplementedError
except NotImplementedError:
print("Caught NotImplementedError")
try:
raise OSError
except Exception:
print("Caught OSError via Exception")
try:
raise OSError
except OSError:
print("Caught OSError")
try:
raise OverflowError
except ArithmeticError:
print("Caught OverflowError via ArithmeticError")
try:
raise OverflowError
except OverflowError:
print("Caught OverflowError")
try:
raise RuntimeError
except Exception:
print("Caught RuntimeError via Exception")
try:
raise RuntimeError
except RuntimeError:
print("Caught RuntimeError")
try:
raise SyntaxError
except Exception:
print("Caught SyntaxError via Exception")
try:
raise SyntaxError
except SyntaxError:
print("Caught SyntaxError")
try:
raise TypeError
except Exception:
print("Caught TypeError via Exception")
try:
raise TypeError
except TypeError:
print("Caught TypeError")
try:
raise ValueError
except Exception:
print("Caught ValueError via Exception")
try:
raise ValueError
except ValueError:
print("Caught ValueError")
try:
raise ZeroDivisionError
except ArithmeticError:
print("Caught ZeroDivisionError via ArithmeticError")
try:
raise ZeroDivisionError
except ZeroDivisionError:
print("Caught ZeroDivisionError")
| mit |
haeusser/tensorflow | tensorflow/contrib/ndlstm/python/lstm2d_test.py | 22 | 3473 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for 2D LSTMs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO: #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
import numpy as np
from tensorflow.contrib.ndlstm.python import lstm2d as lstm2d_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
lstm2d = lstm2d_lib
def _rand(*size):
return np.random.uniform(size=size).astype("f")
class Lstm2DTest(test_util.TensorFlowTestCase):
def testImagesToSequenceDims(self):
with self.test_session():
inputs = constant_op.constant(_rand(2, 7, 11, 5))
outputs = lstm2d.images_to_sequence(inputs)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (11, 14, 5))
def testSequenceToImagesDims(self):
with self.test_session():
inputs = constant_op.constant(_rand(11, 14, 5))
outputs = lstm2d.sequence_to_images(inputs, 2)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (2, 7, 11, 5))
def testImagesAndSequenceDims(self):
with self.test_session():
size = (2, 7, 11, 5)
inputs = constant_op.constant(_rand(*size))
sequence = lstm2d.images_to_sequence(inputs)
outputs = lstm2d.sequence_to_images(sequence, size[0])
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), size)
def testSeparableLstmDims(self):
with self.test_session():
inputs = constant_op.constant(_rand(2, 7, 11, 5))
outputs = lstm2d.separable_lstm(inputs, 8)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (2, 7, 11, 8))
def testReduceToSequenceDims(self):
with self.test_session():
inputs = constant_op.constant(_rand(2, 7, 11, 5))
outputs = lstm2d.reduce_to_sequence(inputs, 8)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (2, 11, 8))
def testReduceToFinalDims(self):
with self.test_session():
inputs = constant_op.constant(_rand(2, 7, 11, 5))
outputs = lstm2d.reduce_to_final(inputs, 8, 12)
variables.global_variables_initializer().run()
result = outputs.eval()
self.assertEqual(tuple(result.shape), (2, 8))
if __name__ == "__main__":
test.main()
| apache-2.0 |
runt18/nupic | tests/unit/nupic/engine/unified_py_parameter_test.py | 35 | 3604 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Test for get/setParameter in python -- these methods are syntactic sugar
that allow you to access parameters without knowing their types,
at a moderate performance penalty.
"""
import unittest2 as unittest
# import for type comparison with Array.
# (Seems we should be able to use nupic.engine.Array directly.)
import nupic.bindings.engine_internal
from nupic.engine import Network
class NetworkUnifiedPyParameterTest(unittest.TestCase):
def testScalars(self):
scalars = [
("int32Param", 32, int, 35),
("uint32Param", 33, int, 36),
("int64Param", 64, long, 74),
("uint64Param", 65, long, 75),
("real32Param", 32.1, float, 33.1),
("real64Param", 64.1, float, 65.1),
("stringParam", "nodespec value", str, "new value")]
n = Network()
l1= n.addRegion("l1", "TestNode", "")
x = l1.getParameter("uint32Param")
for paramName, initval, paramtype, newval in scalars:
# Check the initial value for each parameter.
x = l1.getParameter(paramName)
self.assertEqual(type(x), paramtype)
if initval is None:
continue
if type(x) == float:
self.assertTrue(abs(x - initval) < 0.00001)
else:
self.assertEqual(x, initval)
# Now set the value, and check to make sure the value is updated
l1.setParameter(paramName, newval)
x = l1.getParameter(paramName)
self.assertEqual(type(x), paramtype)
if type(x) == float:
self.assertTrue(abs(x - newval) < 0.00001)
else:
self.assertEqual(x, newval)
def testArrays(self):
arrays = [
("real32ArrayParam",
[0*32, 1*32, 2*32, 3*32, 4*32, 5*32, 6*32, 7*32],
"Real32"),
("int64ArrayParam",
[0*64, 1*64, 2*64, 3*64],
"Int64")
]
n = Network()
l1= n.addRegion("l1", "TestNode", "")
for paramName, initval, paramtype in arrays:
x = l1.getParameter(paramName)
self.assertTrue(isinstance(x, nupic.bindings.engine_internal.Array))
self.assertEqual(x.getType(), paramtype)
self.assertEqual(len(x), len(initval))
for i in xrange(len(x)):
self.assertEqual(x[i], initval[i])
for i in xrange(len(x)):
x[i] = x[i] * 2
l1.setParameter(paramName, x)
x = l1.getParameter(paramName)
self.assertTrue(isinstance(x, nupic.bindings.engine_internal.Array))
self.assertEqual(x.getType(), paramtype)
self.assertEqual(len(x), len(initval))
for i in xrange(len(x)):
self.assertEqual(x[i], 2 * initval[i])
if __name__ == "__main__":
unittest.main()
| agpl-3.0 |
zengenti/ansible | lib/ansible/plugins/callback/junit.py | 92 | 8951 | # (c) 2016 Matt Clay <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import time
import re
from ansible.module_utils._text import to_bytes, to_text
from ansible.plugins.callback import CallbackBase
try:
from junit_xml import TestSuite, TestCase
HAS_JUNIT_XML = True
except ImportError:
HAS_JUNIT_XML = False
try:
from collections import OrderedDict
HAS_ORDERED_DICT = True
except ImportError:
try:
from ordereddict import OrderedDict
HAS_ORDERED_DICT = True
except ImportError:
HAS_ORDERED_DICT = False
class CallbackModule(CallbackBase):
"""
This callback writes playbook output to a JUnit formatted XML file.
Tasks show up in the report as follows:
'ok': pass
'failed' with 'EXPECTED FAILURE' in the task name: pass
'failed' due to an exception: error
'failed' for other reasons: failure
'skipped': skipped
This plugin makes use of the following environment variables:
JUNIT_OUTPUT_DIR (optional): Directory to write XML files to.
Default: ~/.ansible.log
JUNIT_TASK_CLASS (optional): Configure the output to be one class per yaml file
Default: False
Requires:
junit_xml
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'junit'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self):
super(CallbackModule, self).__init__()
self._output_dir = os.getenv('JUNIT_OUTPUT_DIR', os.path.expanduser('~/.ansible.log'))
self._task_class = os.getenv('JUNIT_TASK_CLASS', 'False').lower()
self._playbook_path = None
self._playbook_name = None
self._play_name = None
self._task_data = None
self.disabled = False
if not HAS_JUNIT_XML:
self.disabled = True
self._display.warning('The `junit_xml` python module is not installed. '
'Disabling the `junit` callback plugin.')
if HAS_ORDERED_DICT:
self._task_data = OrderedDict()
else:
self.disabled = True
self._display.warning('The `ordereddict` python module is not installed. '
'Disabling the `junit` callback plugin.')
if not os.path.exists(self._output_dir):
os.mkdir(self._output_dir)
def _start_task(self, task):
""" record the start of a task for one or more hosts """
uuid = task._uuid
if uuid in self._task_data:
return
play = self._play_name
name = task.get_name().strip()
path = task.get_path()
if not task.no_log:
args = ', '.join(('%s=%s' % a for a in task.args.items()))
if args:
name += ' ' + args
self._task_data[uuid] = TaskData(uuid, name, path, play)
def _finish_task(self, status, result):
""" record the results of a task for a single host """
task_uuid = result._task._uuid
if hasattr(result, '_host'):
host_uuid = result._host._uuid
host_name = result._host.name
else:
host_uuid = 'include'
host_name = 'include'
task_data = self._task_data[task_uuid]
if status == 'failed' and 'EXPECTED FAILURE' in task_data.name:
status = 'ok'
task_data.add_host(HostData(host_uuid, host_name, status, result))
def _build_test_case(self, task_data, host_data):
""" build a TestCase from the given TaskData and HostData """
name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
duration = host_data.finish - task_data.start
if self._task_class == 'true':
junit_classname = re.sub('\.yml:[0-9]+$', '', task_data.path)
else:
junit_classname = task_data.path
if host_data.status == 'included':
return TestCase(name, junit_classname, duration, host_data.result)
res = host_data.result._result
rc = res.get('rc', 0)
dump = self._dump_results(res, indent=0)
dump = self._cleanse_string(dump)
if host_data.status == 'ok':
return TestCase(name, junit_classname, duration, dump)
test_case = TestCase(name, junit_classname, duration)
if host_data.status == 'failed':
if 'exception' in res:
message = res['exception'].strip().split('\n')[-1]
output = res['exception']
test_case.add_error_info(message, output)
elif 'msg' in res:
message = res['msg']
test_case.add_failure_info(message, dump)
else:
test_case.add_failure_info('rc=%s' % rc, dump)
elif host_data.status == 'skipped':
if 'skip_reason' in res:
message = res['skip_reason']
else:
message = 'skipped'
test_case.add_skipped_info(message)
return test_case
def _cleanse_string(self, value):
""" convert surrogate escapes to the unicode replacement character to avoid XML encoding errors """
return to_text(to_bytes(value, errors='surrogateescape'), errors='replace')
def _generate_report(self):
""" generate a TestSuite report from the collected TaskData and HostData """
test_cases = []
for task_uuid, task_data in self._task_data.items():
for host_uuid, host_data in task_data.host_data.items():
test_cases.append(self._build_test_case(task_data, host_data))
test_suite = TestSuite(self._playbook_name, test_cases)
report = TestSuite.to_xml_string([test_suite])
output_file = os.path.join(self._output_dir, '%s-%s.xml' % (self._playbook_name, time.time()))
with open(output_file, 'wb') as xml:
xml.write(to_bytes(report, errors='surrogate_or_strict'))
def v2_playbook_on_start(self, playbook):
self._playbook_path = playbook._file_name
self._playbook_name = os.path.splitext(os.path.basename(self._playbook_path))[0]
def v2_playbook_on_play_start(self, play):
self._play_name = play.get_name()
def v2_runner_on_no_hosts(self, task):
self._start_task(task)
def v2_playbook_on_task_start(self, task, is_conditional):
self._start_task(task)
def v2_playbook_on_cleanup_task_start(self, task):
self._start_task(task)
def v2_playbook_on_handler_task_start(self, task):
self._start_task(task)
def v2_runner_on_failed(self, result, ignore_errors=False):
if ignore_errors:
self._finish_task('ok', result)
else:
self._finish_task('failed', result)
def v2_runner_on_ok(self, result):
self._finish_task('ok', result)
def v2_runner_on_skipped(self, result):
self._finish_task('skipped', result)
def v2_playbook_on_include(self, included_file):
self._finish_task('included', included_file)
def v2_playbook_on_stats(self, stats):
self._generate_report()
class TaskData:
"""
Data about an individual task.
"""
def __init__(self, uuid, name, path, play):
self.uuid = uuid
self.name = name
self.path = path
self.play = play
self.start = None
self.host_data = OrderedDict()
self.start = time.time()
def add_host(self, host):
if host.uuid in self.host_data:
if host.status == 'included':
# concatenate task include output from multiple items
host.result = '%s\n%s' % (self.host_data[host.uuid].result, host.result)
else:
raise Exception('%s: %s: %s: duplicate host callback: %s' % (self.path, self.play, self.name, host.name))
self.host_data[host.uuid] = host
class HostData:
"""
Data about an individual host.
"""
def __init__(self, uuid, name, status, result):
self.uuid = uuid
self.name = name
self.status = status
self.result = result
self.finish = time.time()
| gpl-3.0 |
liggitt/docker-registry | depends/docker-registry-core/docker_registry/drivers/file.py | 38 | 4471 | # -*- coding: utf-8 -*-
# Copyright (c) 2014 Docker.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
docker_registry.drivers.file
~~~~~~~~~~~~~~~~~~~~~~~~~~
This is a simple filesystem based driver.
"""
import os
import shutil
from ..core import driver
from ..core import exceptions
from ..core import lru
class Storage(driver.Base):
supports_bytes_range = True
def __init__(self, path=None, config=None):
self._root_path = path or './tmp'
def _init_path(self, path=None, create=False):
path = os.path.join(self._root_path, path) if path else self._root_path
if create is True:
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
return path
@lru.get
def get_content(self, path):
path = self._init_path(path)
try:
with open(path, mode='rb') as f:
d = f.read()
except Exception:
raise exceptions.FileNotFoundError('%s is not there' % path)
return d
@lru.set
def put_content(self, path, content):
path = self._init_path(path, create=True)
with open(path, mode='wb') as f:
f.write(content)
return path
def stream_read(self, path, bytes_range=None):
path = self._init_path(path)
nb_bytes = 0
total_size = 0
try:
with open(path, mode='rb') as f:
if bytes_range:
f.seek(bytes_range[0])
total_size = bytes_range[1] - bytes_range[0] + 1
while True:
buf = None
if bytes_range:
# Bytes Range is enabled
buf_size = self.buffer_size
if nb_bytes + buf_size > total_size:
# We make sure we don't read out of the range
buf_size = total_size - nb_bytes
if buf_size > 0:
buf = f.read(buf_size)
nb_bytes += len(buf)
else:
# We're at the end of the range
buf = ''
else:
buf = f.read(self.buffer_size)
if not buf:
break
yield buf
except IOError:
raise exceptions.FileNotFoundError('%s is not there' % path)
def stream_write(self, path, fp):
# Size is mandatory
path = self._init_path(path, create=True)
with open(path, mode='wb') as f:
try:
while True:
buf = fp.read(self.buffer_size)
if not buf:
break
f.write(buf)
except IOError:
pass
def list_directory(self, path=None):
prefix = ''
if path:
prefix = '%s/' % path
path = self._init_path(path)
exists = False
try:
for d in os.listdir(path):
exists = True
yield prefix + d
except Exception:
pass
if not exists:
raise exceptions.FileNotFoundError('%s is not there' % path)
def exists(self, path):
path = self._init_path(path)
return os.path.exists(path)
@lru.remove
def remove(self, path):
path = self._init_path(path)
if os.path.isdir(path):
shutil.rmtree(path)
return
try:
os.remove(path)
except OSError:
raise exceptions.FileNotFoundError('%s is not there' % path)
def get_size(self, path):
path = self._init_path(path)
try:
return os.path.getsize(path)
except OSError:
raise exceptions.FileNotFoundError('%s is not there' % path)
| apache-2.0 |
mhild/Sick-Beard | lib/hachoir_core/field/seekable_field_set.py | 90 | 6130 | from lib.hachoir_core.field import Field, BasicFieldSet, FakeArray, MissingField, ParserError
from lib.hachoir_core.tools import makeUnicode
from lib.hachoir_core.error import HACHOIR_ERRORS
from itertools import repeat
import lib.hachoir_core.config as config
class RootSeekableFieldSet(BasicFieldSet):
def __init__(self, parent, name, stream, description, size):
BasicFieldSet.__init__(self, parent, name, stream, description, size)
self._generator = self.createFields()
self._offset = 0
self._current_size = 0
if size:
self._current_max_size = size
else:
self._current_max_size = 0
self._field_dict = {}
self._field_array = []
def _feedOne(self):
assert self._generator
field = self._generator.next()
self._addField(field)
return field
def array(self, key):
return FakeArray(self, key)
def getFieldByAddress(self, address, feed=True):
for field in self._field_array:
if field.address <= address < field.address + field.size:
return field
for field in self._readFields():
if field.address <= address < field.address + field.size:
return field
return None
def _stopFeed(self):
self._size = self._current_max_size
self._generator = None
done = property(lambda self: not bool(self._generator))
def _getSize(self):
if self._size is None:
self._feedAll()
return self._size
size = property(_getSize)
def _getField(self, key, const):
field = Field._getField(self, key, const)
if field is not None:
return field
if key in self._field_dict:
return self._field_dict[key]
if self._generator and not const:
try:
while True:
field = self._feedOne()
if field.name == key:
return field
except StopIteration:
self._stopFeed()
except HACHOIR_ERRORS, err:
self.error("Error: %s" % makeUnicode(err))
self._stopFeed()
return None
def getField(self, key, const=True):
if isinstance(key, (int, long)):
if key < 0:
raise KeyError("Key must be positive!")
if not const:
self.readFirstFields(key+1)
if len(self._field_array) <= key:
raise MissingField(self, key)
return self._field_array[key]
return Field.getField(self, key, const)
def _addField(self, field):
if field._name.endswith("[]"):
self.setUniqueFieldName(field)
if config.debug:
self.info("[+] DBG: _addField(%s)" % field.name)
if field._address != self._offset:
self.warning("Set field %s address to %s (was %s)" % (
field.path, self._offset//8, field._address//8))
field._address = self._offset
assert field.name not in self._field_dict
self._checkFieldSize(field)
self._field_dict[field.name] = field
self._field_array.append(field)
self._current_size += field.size
self._offset += field.size
self._current_max_size = max(self._current_max_size, field.address + field.size)
def _checkAddress(self, address):
if self._size is not None:
max_addr = self._size
else:
# FIXME: Use parent size
max_addr = self.stream.size
return address < max_addr
def _checkFieldSize(self, field):
size = field.size
addr = field.address
if not self._checkAddress(addr+size-1):
raise ParserError("Unable to add %s: field is too large" % field.name)
def seekBit(self, address, relative=True):
if not relative:
address -= self.absolute_address
if address < 0:
raise ParserError("Seek below field set start (%s.%s)" % divmod(address, 8))
if not self._checkAddress(address):
raise ParserError("Seek above field set end (%s.%s)" % divmod(address, 8))
self._offset = address
return None
def seekByte(self, address, relative=True):
return self.seekBit(address*8, relative)
def readMoreFields(self, number):
return self._readMoreFields(xrange(number))
def _feedAll(self):
return self._readMoreFields(repeat(1))
def _readFields(self):
while True:
added = self._readMoreFields(xrange(1))
if not added:
break
yield self._field_array[-1]
def _readMoreFields(self, index_generator):
added = 0
if self._generator:
try:
for index in index_generator:
self._feedOne()
added += 1
except StopIteration:
self._stopFeed()
except HACHOIR_ERRORS, err:
self.error("Error: %s" % makeUnicode(err))
self._stopFeed()
return added
current_length = property(lambda self: len(self._field_array))
current_size = property(lambda self: self._offset)
def __iter__(self):
for field in self._field_array:
yield field
if self._generator:
try:
while True:
yield self._feedOne()
except StopIteration:
self._stopFeed()
raise StopIteration
def __len__(self):
if self._generator:
self._feedAll()
return len(self._field_array)
def nextFieldAddress(self):
return self._offset
def getFieldIndex(self, field):
return self._field_array.index(field)
class SeekableFieldSet(RootSeekableFieldSet):
def __init__(self, parent, name, description=None, size=None):
assert issubclass(parent.__class__, BasicFieldSet)
RootSeekableFieldSet.__init__(self, parent, name, parent.stream, description, size)
| gpl-3.0 |
tectronics/admiral-jiscmrd | test/MiscLib/tests/TestSuperGlobal.py | 8 | 1649 | # $Id: TestSuperGlobal.py 1047 2009-01-15 14:48:58Z graham $
#
# Unit testing for SuperGlobal module
# See http://pyunit.sourceforge.net/pyunit.html
#
import sys
import unittest
sys.path.append("../..")
from MiscLib.SuperGlobal import *
class TestSuperGlobal(unittest.TestCase):
def setUp(self):
return
def tearDown(self):
return
# Test cases
def testCreateSuperGlobal(self):
sg = SuperGlobal()
sg.i1 = 1
sg.s1 = "s1"
self.assertEqual(sg.i1, 1)
self.assertEqual(sg.s1, "s1")
def testRetrieveSuperGlobal(self):
self.testCreateSuperGlobal()
sg = SuperGlobal()
self.assertEqual(sg.i1, 1)
self.assertEqual(sg.s1, "s1")
def testUpdateSuperGlobal(self):
self.testCreateSuperGlobal()
sg = SuperGlobal()
sg.i2 = 2
sg.s2 = "s2"
self.assertEqual(sg.i1, 1)
self.assertEqual(sg.s1, "s1")
self.assertEqual(sg.i2, 2)
self.assertEqual(sg.s2, "s2")
sg2 = SuperGlobal()
sg2.i1 = 11
sg2.s1 = "s11"
self.assertEqual(sg.i1, 11)
self.assertEqual(sg.s1, "s11")
self.assertEqual(sg.i2, 2)
self.assertEqual(sg.s2, "s2")
# Code to run unit tests directly from command line.
def getTestSuite():
suite = unittest.TestSuite()
suite.addTest(TestSuperGlobal("testCreateSuperGlobal"))
suite.addTest(TestSuperGlobal("testRetrieveSuperGlobal"))
suite.addTest(TestSuperGlobal("testUpdateSuperGlobal"))
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(getTestSuite())
# End.
| mit |
yavalvas/yav_com | build/matplotlib/examples/widgets/slider_demo.py | 13 | 1179 | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button, RadioButtons
fig, ax = plt.subplots()
plt.subplots_adjust(left=0.25, bottom=0.25)
t = np.arange(0.0, 1.0, 0.001)
a0 = 5
f0 = 3
s = a0*np.sin(2*np.pi*f0*t)
l, = plt.plot(t,s, lw=2, color='red')
plt.axis([0, 1, -10, 10])
axcolor = 'lightgoldenrodyellow'
axfreq = plt.axes([0.25, 0.1, 0.65, 0.03], axisbg=axcolor)
axamp = plt.axes([0.25, 0.15, 0.65, 0.03], axisbg=axcolor)
sfreq = Slider(axfreq, 'Freq', 0.1, 30.0, valinit=f0)
samp = Slider(axamp, 'Amp', 0.1, 10.0, valinit=a0)
def update(val):
amp = samp.val
freq = sfreq.val
l.set_ydata(amp*np.sin(2*np.pi*freq*t))
fig.canvas.draw_idle()
sfreq.on_changed(update)
samp.on_changed(update)
resetax = plt.axes([0.8, 0.025, 0.1, 0.04])
button = Button(resetax, 'Reset', color=axcolor, hovercolor='0.975')
def reset(event):
sfreq.reset()
samp.reset()
button.on_clicked(reset)
rax = plt.axes([0.025, 0.5, 0.15, 0.15], axisbg=axcolor)
radio = RadioButtons(rax, ('red', 'blue', 'green'), active=0)
def colorfunc(label):
l.set_color(label)
fig.canvas.draw_idle()
radio.on_clicked(colorfunc)
plt.show()
| mit |
karmix/anaconda | tests/gui/inside/storage.py | 13 | 9863 | # Copyright (C) 2014 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Chris Lumens <[email protected]>
from dogtail.predicate import GenericPredicate
from dogtail.utils import doDelay
from . import UITestCase
class BasicStorageTestCase(UITestCase):
def check_select_disks(self, spoke):
# FIXME: This is a really roundabout way of determining whether a disk is
# selected or not. For some reason when a DiskOverview is selected, its icon
# loses the name "Hard Disk". For now, we can use this to check.
def _selected(do):
return len(do.findChildren(GenericPredicate(name="Hard Disk"))) == 0
# A real disk is any disk attached to the VM that's not the special 10 MB
# one used for communicating in and out of the VM. Therefore, scan all
# children of the DiskOverview looking for one whose name contains that
# size.
def _real_disk(do):
for child in do.findChildren(GenericPredicate()):
if child.name == "10 MiB":
return False
return True
# There should be some disks displayed on the screen.
overviews = spoke.findChildren(GenericPredicate(roleName="disk overview"))
self.assertGreater(len(overviews), 0, msg="No disks are displayed")
if len(overviews) == 1:
# Only one disk was given to this test case, so anaconda selected it
# by default. Verify. Not sure how this would happen with how
# testing is currently done, but it's good to be prepared.
self.assertTrue(_selected(overviews[0]))
else:
# More than one disk was provided, so anaconda did not select any.
# Let's select all disks and proceed.
for overview in filter(_real_disk, overviews):
self.assertFalse(_selected(overview))
overview.click()
self.assertTrue(_selected(overview))
def check_shopping_cart(self, spoke):
pass
def check_storage_options(self, spoke):
button = self.find("Automatically configure partitioning.", "radio button", node=spoke)
self.assertIsNotNone(button, msg="Autopart button not found")
self.assertTrue(button.checked, msg="Autopart should be selected")
button = self.find("I would like to make additional space available.", "check box", node=spoke)
self.assertIsNotNone(button, msg="Reclaim button not found")
self.assertFalse(button.checked, msg="Reclaim button should not be selected")
button = self.find("Encrypt my data.", "check box", node=spoke)
self.assertIsNotNone(button, msg="Encrypt button not found")
self.assertFalse(button.checked, msg="Encrypt button should not be selected")
def _common_run(self):
# First, we need to click on the storage spoke selector.
self.enter_spoke("INSTALLATION DESTINATION")
# Now verify we are on the right screen.
w = self.check_window_displayed("INSTALLATION DESTINATION")
self.check_help_button(w)
# Given that we attach a second disk to the system (for storing the test
# suite and results), anaconda will not select disks by default. Thus,
# the storage options panel should currently be insensitive.
area = self.find("Storage Options", node=w)
self.assertIsNotNone(area, "Storage Options not found")
self.assertFalse(area.sensitive, msg="Storage options should be insensitive")
# Select disk overviews. In the basic case, this means uninitialized
# disks that we're going to do autopart on.
self.check_select_disks(w)
# And now with disks selected, the storage options should be sensitive.
self.assertTrue(area.sensitive, msg="Storage options should be sensitive")
self.check_shopping_cart(w)
self.check_storage_options(w)
return w
def _run(self):
w = self._common_run()
# And then we click the Done button which should take the user right back to
# the hub. There's no need to display any other dialogs given that this is
# an install against empty disks and no other options were checked.
self.exit_spoke(node=w)
class BasicReclaimTestCase(BasicStorageTestCase):
def check_reclaim_buttons_before(self, dlg):
# Test initial sensitivity of widgets upon entering the reclaim dialog. A
# partition should be selected in the view, the only way out should be via
# the Cancel button, and the only operation available on the selected partition
# should be deleting.
button = self.find("Preserve", "push button", node=dlg)
self.assertIsNotNone(button, msg="Preserve button not found")
self.assertFalse(button.sensitive, msg="Preserve button should be insensitive")
button = self.find("Delete", "push button", node=dlg)
self.assertIsNotNone(button, msg="Delete button not found")
self.assertTrue(button.sensitive, msg="Delete button should be sensitive")
button = self.find("Shrink", "push button", node=dlg)
self.assertIsNotNone(button, msg="Shrink button not found")
self.assertFalse(button.sensitive, msg="Shrink button should be insensitive")
button = self.find("Delete all", "push button", node=dlg)
self.assertIsNotNone(button, msg="Delete all button not found")
self.assertTrue(button.sensitive, msg="Delete all button should be sensitive")
button = self.find("Cancel", "push button", node=dlg)
self.assertIsNotNone(button, msg="Cancel button not found")
self.assertTrue(button.sensitive, msg="Cancel button should be sensitive")
button = self.find("Reclaim space", "push button", node=dlg)
self.assertIsNotNone(button, msg="Reclaim button not found")
self.assertFalse(button.sensitive, msg="Reclaim button should be insensitive")
def check_reclaim_buttons_after(self, dlg):
# Test sensitivity of widgets now that enough space has been freed up on disks
# to continue. The Preserve buttons should be the only available operations,
# Delete all should have been renamed to Preserve All, and there should now be
# two ways out of the dialog.
button = self.find("Preserve", "push button", node=dlg)
self.assertIsNotNone(button, msg="Preserve button not found")
self.assertTrue(button.sensitive, msg="Preserve button should be sensitive")
button = self.find("Delete", "push button", node=dlg)
self.assertIsNotNone(button, msg="Delete button not found")
self.assertFalse(button.sensitive, msg="Delete button should be insensitive")
button = self.find("Shrink", "push button", node=dlg)
self.assertIsNotNone(button, msg="Shrink button not found")
self.assertFalse(button.sensitive, msg="Shrink button should be insensitive")
button = self.find("Preserve all", "push button", node=dlg)
self.assertIsNotNone(button, msg="Preserve all button not found")
self.assertTrue(button.sensitive, msg="Preserve all button should be sensitive")
button = self.find("Cancel", "push button", node=dlg)
self.assertIsNotNone(button, msg="Cancel button not found")
self.assertTrue(button.sensitive, msg="Cancel button should be sensitive")
button = self.find("Reclaim space", "push button", node=dlg)
self.assertIsNotNone(button, msg="Reclaim button not found")
self.assertTrue(button.sensitive, msg="Reclaim button should be sensitive")
def check_reclaim(self, optionsDlg):
self.click_button("Reclaim space", node=optionsDlg)
# Verify we are on the reclaim dialog.
reclaimDlg = self.check_dialog_displayed("Reclaim")
self.check_reclaim_buttons_before(reclaimDlg)
# Click the Delete all button to free up enough space.
self.click_button("Delete all", node=reclaimDlg)
self.check_reclaim_buttons_after(reclaimDlg)
# Click on Reclaim space, which should take us all the way back to the hub.
self.click_button("Reclaim space", node=reclaimDlg)
doDelay(5)
self.check_window_displayed("INSTALLATION SUMMARY")
def _run(self):
w = self._common_run()
# Clicking the Done button should bring up the installation options dialog
# indicating there's not currently enough space to install, but more space
# can be made by going to the reclaim dialog.
self.click_button("_Done", node=w)
optionsDlg = self.check_dialog_displayed("Need Space")
self.check_reclaim(optionsDlg)
class CantReclaimTestCase(BasicStorageTestCase):
def _run(self):
w = self._common_run()
# Clicking the Done button should bring up the installation options dialog
# indicating there's never going to be enough space to install. There's nothing
# to do now but quit.
self.click_button("_Done", node=w)
doDelay(5)
optionsDlg = self.check_dialog_displayed("No Space")
self.click_button("Quit installer", node=optionsDlg)
| gpl-2.0 |
rismalrv/edx-platform | common/lib/xmodule/xmodule/foldit_module.py | 68 | 6538 | import logging
from lxml import etree
from pkg_resources import resource_string
from xmodule.editing_module import EditingDescriptor
from xmodule.x_module import XModule
from xmodule.xml_module import XmlDescriptor
from xblock.fields import Scope, Integer, String
from .fields import Date
log = logging.getLogger(__name__)
class FolditFields(object):
# default to what Spring_7012x uses
required_level_half_credit = Integer(default=3, scope=Scope.settings)
required_sublevel_half_credit = Integer(default=5, scope=Scope.settings)
required_level = Integer(default=4, scope=Scope.settings)
required_sublevel = Integer(default=5, scope=Scope.settings)
due = Date(help="Date that this problem is due by", scope=Scope.settings)
show_basic_score = String(scope=Scope.settings, default='false')
show_leaderboard = String(scope=Scope.settings, default='false')
class FolditModule(FolditFields, XModule):
css = {'scss': [resource_string(__name__, 'css/foldit/leaderboard.scss')]}
def __init__(self, *args, **kwargs):
"""
Example:
<foldit show_basic_score="true"
required_level="4"
required_sublevel="3"
required_level_half_credit="2"
required_sublevel_half_credit="3"
show_leaderboard="false"/>
"""
super(FolditModule, self).__init__(*args, **kwargs)
self.due_time = self.due
def is_complete(self):
"""
Did the user get to the required level before the due date?
"""
# We normally don't want django dependencies in xmodule. foldit is
# special. Import this late to avoid errors with things not yet being
# initialized.
from foldit.models import PuzzleComplete
complete = PuzzleComplete.is_level_complete(
self.system.anonymous_student_id,
self.required_level,
self.required_sublevel,
self.due_time)
return complete
def is_half_complete(self):
"""
Did the user reach the required level for half credit?
Ideally this would be more flexible than just 0, 0.5, or 1 credit. On
the other hand, the xml attributes for specifying more specific
cut-offs and partial grades can get more confusing.
"""
from foldit.models import PuzzleComplete
complete = PuzzleComplete.is_level_complete(
self.system.anonymous_student_id,
self.required_level_half_credit,
self.required_sublevel_half_credit,
self.due_time)
return complete
def completed_puzzles(self):
"""
Return a list of puzzles that this user has completed, as an array of
dicts:
[ {'set': int,
'subset': int,
'created': datetime} ]
The list is sorted by set, then subset
"""
from foldit.models import PuzzleComplete
return sorted(
PuzzleComplete.completed_puzzles(self.system.anonymous_student_id),
key=lambda d: (d['set'], d['subset']))
def puzzle_leaders(self, n=10, courses=None):
"""
Returns a list of n pairs (user, score) corresponding to the top
scores; the pairs are in descending order of score.
"""
from foldit.models import Score
if courses is None:
courses = [self.location.course_key]
leaders = [(leader['username'], leader['score']) for leader in Score.get_tops_n(10, course_list=courses)]
leaders.sort(key=lambda x: -x[1])
return leaders
def get_html(self):
"""
Render the html for the module.
"""
goal_level = '{0}-{1}'.format(
self.required_level,
self.required_sublevel)
showbasic = (self.show_basic_score.lower() == "true")
showleader = (self.show_leaderboard.lower() == "true")
context = {
'due': self.due,
'success': self.is_complete(),
'goal_level': goal_level,
'completed': self.completed_puzzles(),
'top_scores': self.puzzle_leaders(),
'show_basic': showbasic,
'show_leader': showleader,
'folditbasic': self.get_basicpuzzles_html(),
'folditchallenge': self.get_challenge_html()
}
return self.system.render_template('foldit.html', context)
def get_basicpuzzles_html(self):
"""
Render html for the basic puzzle section.
"""
goal_level = '{0}-{1}'.format(
self.required_level,
self.required_sublevel)
context = {
'due': self.due,
'success': self.is_complete(),
'goal_level': goal_level,
'completed': self.completed_puzzles(),
}
return self.system.render_template('folditbasic.html', context)
def get_challenge_html(self):
"""
Render html for challenge (i.e., the leaderboard)
"""
context = {
'top_scores': self.puzzle_leaders()}
return self.system.render_template('folditchallenge.html', context)
def get_score(self):
"""
0 if required_level_half_credit - required_sublevel_half_credit not
reached.
0.5 if required_level_half_credit and required_sublevel_half_credit
reached.
1 if requred_level and required_sublevel reached.
"""
if self.is_complete():
score = 1
elif self.is_half_complete():
score = 0.5
else:
score = 0
return {'score': score,
'total': self.max_score()}
def max_score(self):
return 1
class FolditDescriptor(FolditFields, XmlDescriptor, EditingDescriptor):
"""
Module for adding Foldit problems to courses
"""
mako_template = "widgets/html-edit.html"
module_class = FolditModule
filename_extension = "xml"
has_score = True
show_in_read_only_mode = True
js = {'coffee': [resource_string(__name__, 'js/src/html/edit.coffee')]}
js_module_name = "HTMLEditingDescriptor"
# The grade changes without any student interaction with the edx website,
# so always need to actually check.
always_recalculate_grades = True
@classmethod
def definition_from_xml(cls, xml_object, system):
return {}, []
def definition_to_xml(self, resource_fs):
xml_object = etree.Element('foldit')
return xml_object
| agpl-3.0 |
neumerance/deploy | .venv/lib/python2.7/site-packages/django/contrib/gis/gdal/srs.py | 219 | 11914 | """
The Spatial Reference class, represensents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print(srs)
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print(srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print(srs.ellipsoid)
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print(srs.projected, srs.geographic)
False True
>>> srs.import_epsg(32140)
>>> print(srs.name)
NAD83 / Texas South Central
"""
from ctypes import byref, c_char_p, c_int
# Getting the error checking routine and exceptions
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
from django.utils import six
from django.utils.encoding import force_bytes
#### Spatial Reference class. ####
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL Web site,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
#### Python 'magic' routines ####
def __init__(self, srs_input=''):
"""
Creates a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
srs_type = 'user'
if isinstance(srs_input, six.string_types):
# Encoding to ASCII if unicode passed in.
if isinstance(srs_input, six.text_type):
srs_input = srs_input.encode('ascii')
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, six.integer_types):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
buf = c_char_p(b'')
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __del__(self):
"Destroys this spatial reference."
if self._ptr: capi.release_srs(self._ptr)
def __getitem__(self, target):
"""
Returns the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]')
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print(srs['GEOGCS'])
WGS 84
>>> print(srs['DATUM'])
WGS_1984
>>> print(srs['AUTHORITY'])
EPSG
>>> print(srs['AUTHORITY', 1]) # The authority value
4326
>>> print(srs['TOWGS84', 4]) # the fourth value in this wkt
0
>>> print(srs['UNIT|AUTHORITY']) # For the units authority, have to use the pipe symbole.
EPSG
>>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the untis
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"The string representation uses 'pretty' WKT."
return self.pretty_wkt
#### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, six.string_types) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, force_bytes(target), index)
def auth_name(self, target):
"Returns the authority name for the given string target node."
return capi.get_auth_name(self.ptr, force_bytes(target))
def auth_code(self, target):
"Returns the authority code for the given string target node."
return capi.get_auth_code(self.ptr, force_bytes(target))
def clone(self):
"Returns a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morphs this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morphs this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Checks to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
#### Name & SRID properties ####
@property
def name(self):
"Returns the name of this Spatial Reference."
if self.projected: return self.attr_value('PROJCS')
elif self.geographic: return self.attr_value('GEOGCS')
elif self.local: return self.attr_value('LOCAL_CS')
else: return None
@property
def srid(self):
"Returns the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
#### Unit Properties ####
@property
def linear_name(self):
"Returns the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Returns the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Returns the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Returns the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Returns a 2-tuple of the units value and the units name,
and will automatically determines whether to return the linear
or angular units.
"""
units, name = None, None
if self.projected or self.local:
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
if name is not None:
name.decode()
return (units, name)
#### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Returns the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Returns the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Returns the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
#### Boolean Properties ####
@property
def geographic(self):
"""
Returns True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Returns True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Returns True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
#### Import Routines #####
def import_epsg(self, epsg):
"Imports the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Imports the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Imports the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, force_bytes(user_input))
def import_wkt(self, wkt):
"Imports the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(wkt)))
def import_xml(self, xml):
"Imports the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
#### Export Properties ####
@property
def wkt(self):
"Returns the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Returns the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Returns the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Returns the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), dialect)
class CoordTransform(GDALBase):
"The coordinate system transformation object."
def __init__(self, source, target):
"Initializes on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __del__(self):
"Deletes this Coordinate Transformation object."
if self._ptr: capi.destroy_ct(self._ptr)
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
| apache-2.0 |
rambo/asylum | project/members/models.py | 4 | 6401 | # -*- coding: utf-8 -*-
import random
from decimal import Decimal
from access.models import AccessType
from access.utils import resolve_acl
from django.core.exceptions import ValidationError
from django.db import models, transaction
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django_markdown.models import MarkdownField
from reversion import revisions
from asylum.models import AsylumModel
from .handlers import call_saves, get_handler_instance
def generate_unique_randomid():
"""Generate pseudorandom ids until a free one is found"""
candidate = "0x%x" % random.randint(1, 2 ** 32)
while Member.objects.filter(anonymized_id=candidate).count():
candidate = "0x%x" % random.randint(1, 2 ** 32)
return candidate
def generate_unique_memberid():
"""Gives the next highest member id"""
try:
highest_member = Member.objects.exclude(member_id=None).order_by('-member_id')[0]
highest = highest_member.member_id
except IndexError:
highest = 0
candidate = highest + 1
while Member.objects.filter(member_id=candidate).count():
candidate += 1
return candidate
class MemberCommon(AsylumModel):
fname = models.CharField(_("First name"), max_length=200, blank=False)
lname = models.CharField(_("Last name"), max_length=200, blank=False)
city = models.CharField(_("City of residence"), max_length=200, blank=False)
email = models.EmailField(_("Email address"), unique=True, blank=False)
phone = models.CharField(_("Phone number"), max_length=200, blank=True)
nick = models.CharField(_("Nickname"), max_length=200, blank=True)
def __str__(self):
return '"%s, %s" <%s>' % (self.lname, self.fname, self.email)
@property
def name(self):
return "%s %s" % (self.fname, self.lname)
@property
def rname(self):
return "%s, %s" % (self.lname, self.fname)
@property
def access_acl(self):
return resolve_acl(AccessType.objects.filter(pk__in=self.access_granted.select_related('atype').values_list('atype', flat=True)))
class Meta:
abstract = True
ordering = ['lname', 'fname']
class MemberType(AsylumModel):
label = models.CharField(_("Label"), max_length=200, blank=False)
def __str__(self):
return self.label
class Meta:
verbose_name = _('Member Type')
verbose_name_plural = _('Member Types')
revisions.default_revision_manager.register(MemberType)
class Member(MemberCommon):
accepted = models.DateField(_("Date accepted"), default=timezone.now)
mtypes = models.ManyToManyField(MemberType, related_name='+', verbose_name=_("Membership types"), blank=True)
anonymized_id = models.CharField(_("Anonymized id (for use in external databases)"), max_length=24, unique=True, blank=True, null=True, default=generate_unique_randomid)
member_id = models.PositiveIntegerField(_("Member id no"), blank=True, null=True, unique=True, default=generate_unique_memberid)
@property
def credit(self):
ret = self.creditor_transactions.all().aggregate(models.Sum('amount'))['amount__sum']
if ret == None:
return Decimal(0.0)
return ret
@call_saves('MEMBER_CALLBACKS_HANDLER')
def save(self, *args, **kwargs):
return super().save(*args, **kwargs)
class Meta:
verbose_name = _('Member')
verbose_name_plural = _('Members')
revisions.default_revision_manager.register(Member)
class MembershipApplicationTag(AsylumModel):
label = models.CharField(_("Label"), max_length=200, blank=False)
def __str__(self):
return self.label
class Meta:
verbose_name = _('Membership Application Tag')
verbose_name_plural = _('Membership Application Tags')
revisions.default_revision_manager.register(MembershipApplicationTag)
class MembershipApplication(MemberCommon):
received = models.DateField(default=timezone.now)
tags = models.ManyToManyField(MembershipApplicationTag, related_name='+', verbose_name=_("Application tags"), blank=True)
notes = MarkdownField(verbose_name=_("Notes"), blank=True)
@call_saves('MEMBERAPPLICATION_CALLBACKS_HANDLER')
def save(self, *args, **kwargs):
return super().save(*args, **kwargs)
def validate_unique(self, exclude=None):
if exclude and 'email' in exclude:
return super().validate_unique(exclude)
if Member.objects.filter(email=self.email).count():
# TODO: Figure out the exact format the default form validators use and use that ?
raise ValidationError({'email': ValidationError(_('Member with this email already exists'), code='unique')})
return super().validate_unique(exclude)
def approve(self, set_mtypes):
h = get_handler_instance('MEMBERAPPLICATION_CALLBACKS_HANDLER')
with transaction.atomic(), revisions.create_revision():
m = Member()
m.fname = self.fname
m.lname = self.lname
m.city = self.city
m.email = self.email
m.phone = self.phone
m.nick = self.nick
if h:
h.on_approving(self, m)
m.save()
if set_mtypes:
m.mtypes = set_mtypes
m.save()
if self.notes:
n = MemberNote()
n.notes = self.notes
n.member = m
n.save()
if h:
h.on_approved(self, m)
self.delete()
class Meta:
verbose_name = _('Membership Application')
verbose_name_plural = _('Membership Applications')
revisions.default_revision_manager.register(MembershipApplication)
class MemberNote(AsylumModel):
stamp = models.DateTimeField(_("Datetime"), default=timezone.now, db_index=True)
notes = MarkdownField(verbose_name=_("Notes"), blank=False)
member = models.ForeignKey(Member, verbose_name=_("Member"), blank=True, null=True, on_delete=models.CASCADE, related_name='notes')
class Meta:
verbose_name = _('Note')
verbose_name_plural = _('Notes')
ordering = ['member__lname', 'member__fname', '-stamp']
def __str__(self):
return _("Notes about %s on %s") % (self.member, self.stamp)
revisions.default_revision_manager.register(MemberNote)
| mit |
MoisesTedeschi/python | Scripts-Python/Modulos-Diversos/python-com-scrapy/Lib/site-packages/pip/_vendor/progress/helpers.py | 24 | 2952 | # Copyright (c) 2012 Giorgos Verigakis <[email protected]>
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from __future__ import print_function
HIDE_CURSOR = '\x1b[?25l'
SHOW_CURSOR = '\x1b[?25h'
class WriteMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WriteMixin, self).__init__(**kwargs)
self._width = 0
if message:
self.message = message
if self.file and self.file.isatty():
if self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
print(self.message, end='', file=self.file)
self.file.flush()
def write(self, s):
if self.file and self.file.isatty():
b = '\b' * self._width
c = s.ljust(self._width)
print(b + c, end='', file=self.file)
self._width = max(self._width, len(s))
self.file.flush()
def finish(self):
if self.file and self.file.isatty() and self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
class WritelnMixin(object):
hide_cursor = False
def __init__(self, message=None, **kwargs):
super(WritelnMixin, self).__init__(**kwargs)
if message:
self.message = message
if self.file and self.file.isatty() and self.hide_cursor:
print(HIDE_CURSOR, end='', file=self.file)
def clearln(self):
if self.file and self.file.isatty():
print('\r\x1b[K', end='', file=self.file)
def writeln(self, line):
if self.file and self.file.isatty():
self.clearln()
print(line, end='', file=self.file)
self.file.flush()
def finish(self):
if self.file and self.file.isatty():
print(file=self.file)
if self.hide_cursor:
print(SHOW_CURSOR, end='', file=self.file)
from signal import signal, SIGINT
from sys import exit
class SigIntMixin(object):
"""Registers a signal handler that calls finish on SIGINT"""
def __init__(self, *args, **kwargs):
super(SigIntMixin, self).__init__(*args, **kwargs)
signal(SIGINT, self._sigint_handler)
def _sigint_handler(self, signum, frame):
self.finish()
exit(0)
| gpl-3.0 |
SebDieBln/QGIS | python/plugins/processing/algs/taudem/dinfdistdown_multi.py | 12 | 5393 | # -*- coding: utf-8 -*-
"""
***************************************************************************
dinfdistdown_multi.py
---------------------
Date : March 2015
Copyright : (C) 2015 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'March 2015'
__copyright__ = '(C) 2015, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4.QtGui import QIcon
from processing.core.GeoAlgorithm import GeoAlgorithm
from processing.core.ProcessingConfig import ProcessingConfig
from processing.core.GeoAlgorithmExecutionException import \
GeoAlgorithmExecutionException
from processing.core.parameters import ParameterFile
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterSelection
from processing.core.outputs import OutputDirectory
from TauDEMUtils import TauDEMUtils
class DinfDistDownMulti(GeoAlgorithm):
DINF_FLOW_DIR_GRID = 'DINF_FLOW_DIR_GRID'
PIT_FILLED_GRID = 'PIT_FILLED_GRID'
STREAM_GRID = 'STREAM_GRID'
WEIGHT_PATH_GRID = 'WEIGHT_PATH_GRID'
STAT_METHOD = 'STAT_METHOD'
DIST_METHOD = 'DIST_METHOD'
EDGE_CONTAM = 'EDGE_CONTAM'
DIST_DOWN_GRID = 'DIST_DOWN_GRID'
STATISTICS = ['Minimum', 'Maximum', 'Average']
STAT_DICT = {0: 'min', 1: 'max', 2: 'ave'}
DISTANCE = ['Pythagoras', 'Horizontal', 'Vertical', 'Surface']
DIST_DICT = {
0: 'p',
1: 'h',
2: 'v',
3: 's',
}
def getIcon(self):
return QIcon(os.path.dirname(__file__) + '/../../images/taudem.png')
def defineCharacteristics(self):
self.name, self.i18n_name = self.trAlgorithm('D-Infinity Distance Down (multifile)')
self.cmdName = 'dinfdistdown'
self.group, self.i18n_group = self.trAlgorithm('Specialized Grid Analysis tools')
self.addParameter(ParameterFile(self.DINF_FLOW_DIR_GRID,
self.tr('D-Infinity Flow Direction Grid'), True, False))
self.addParameter(ParameterFile(self.PIT_FILLED_GRID,
self.tr('Pit Filled Elevation Grid'), True, False))
self.addParameter(ParameterFile(self.STREAM_GRID,
self.tr('Stream Raster Grid'), True, False))
self.addParameter(ParameterFile(self.WEIGHT_PATH_GRID,
self.tr('Weight Path Grid'), True, True))
self.addParameter(ParameterSelection(self.STAT_METHOD,
self.tr('Statistical Method'), self.STATISTICS, 2))
self.addParameter(ParameterSelection(self.DIST_METHOD,
self.tr('Distance Method'), self.DISTANCE, 1))
self.addParameter(ParameterBoolean(self.EDGE_CONTAM,
self.tr('Check for edge contamination'), True))
self.addOutput(OutputDirectory(self.DIST_DOWN_GRID,
self.tr('D-Infinity Drop to Stream Grid')))
def processAlgorithm(self, progress):
commands = []
commands.append(os.path.join(TauDEMUtils.mpiexecPath(), 'mpiexec'))
processNum = ProcessingConfig.getSetting(TauDEMUtils.MPI_PROCESSES)
if processNum <= 0:
raise GeoAlgorithmExecutionException(
self.tr('Wrong number of MPI processes used. Please set '
'correct number before running TauDEM algorithms.'))
commands.append('-n')
commands.append(unicode(processNum))
commands.append(os.path.join(TauDEMUtils.taudemMultifilePath(), self.cmdName))
commands.append('-ang')
commands.append(self.getParameterValue(self.DINF_FLOW_DIR_GRID))
commands.append('-fel')
commands.append(self.getParameterValue(self.PIT_FILLED_GRID))
commands.append('-src')
commands.append(self.getParameterValue(self.STREAM_GRID))
wg = self.getParameterValue(self.WEIGHT_PATH_GRID)
if wg is not None:
commands.append('-wg')
commands.append(self.getParameterValue(self.WEIGHT_PATH_GRID))
commands.append('-m')
commands.append(unicode(self.STAT_DICT[self.getParameterValue(
self.STAT_METHOD)]))
commands.append(unicode(self.DIST_DICT[self.getParameterValue(
self.DIST_METHOD)]))
if not self.getParameterValue(self.EDGE_CONTAM):
commands.append('-nc')
commands.append('-dd')
commands.append(self.getOutputValue(self.DIST_DOWN_GRID))
TauDEMUtils.executeTauDEM(commands, progress)
| gpl-2.0 |
SGCreations/Flask | Work/TriviaMVA/TriviaMVA/env/Lib/site-packages/pip/_vendor/requests/packages/chardet/euctwprober.py | 2994 | 1676 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis
from .mbcssm import EUCTWSMModel
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCTWSMModel)
self._mDistributionAnalyzer = EUCTWDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-TW"
| apache-2.0 |
shsingh/ansible | test/units/modules/network/fortios/test_fortios_vpn_ssl_web_user_group_bookmark.py | 21 | 7790 | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_vpn_ssl_web_user_group_bookmark
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_vpn_ssl_web_user_group_bookmark.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_vpn_ssl_web_user_group_bookmark_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_ssl_web_user_group_bookmark': {'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_user_group_bookmark.fortios_vpn_ssl_web(input_data, fos_instance)
expected_data = {'name': 'default_name_3'
}
set_method_mock.assert_called_with('vpn.ssl.web', 'user-group-bookmark', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_vpn_ssl_web_user_group_bookmark_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_ssl_web_user_group_bookmark': {'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_user_group_bookmark.fortios_vpn_ssl_web(input_data, fos_instance)
expected_data = {'name': 'default_name_3'
}
set_method_mock.assert_called_with('vpn.ssl.web', 'user-group-bookmark', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_vpn_ssl_web_user_group_bookmark_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'vpn_ssl_web_user_group_bookmark': {'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_user_group_bookmark.fortios_vpn_ssl_web(input_data, fos_instance)
delete_method_mock.assert_called_with('vpn.ssl.web', 'user-group-bookmark', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_vpn_ssl_web_user_group_bookmark_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'vpn_ssl_web_user_group_bookmark': {'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_user_group_bookmark.fortios_vpn_ssl_web(input_data, fos_instance)
delete_method_mock.assert_called_with('vpn.ssl.web', 'user-group-bookmark', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_vpn_ssl_web_user_group_bookmark_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_ssl_web_user_group_bookmark': {'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_user_group_bookmark.fortios_vpn_ssl_web(input_data, fos_instance)
expected_data = {'name': 'default_name_3'
}
set_method_mock.assert_called_with('vpn.ssl.web', 'user-group-bookmark', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_vpn_ssl_web_user_group_bookmark_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'vpn_ssl_web_user_group_bookmark': {
'random_attribute_not_valid': 'tag', 'name': 'default_name_3'
},
'vdom': 'root'}
is_error, changed, response = fortios_vpn_ssl_web_user_group_bookmark.fortios_vpn_ssl_web(input_data, fos_instance)
expected_data = {'name': 'default_name_3'
}
set_method_mock.assert_called_with('vpn.ssl.web', 'user-group-bookmark', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| gpl-3.0 |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v2_3_0/issue_adhoc_broker.py | 19 | 1697 | from ..broker import Broker
class IssueAdhocBroker(Broker):
controller = "issue_adhocs"
def generate_issue(self, **kwargs):
"""Generates an instance of a custom issue.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param DeviceID: The ID of the Device associated with the issue to be generated.
:type DeviceID: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param IssueTypeID: ID of the custom issue to be generated.
:type IssueTypeID: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param Severity: Severity of the issue that will be generated (Error, Warning, Info)
:type Severity: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param BatchID: The batch id associated with the issue.
:type BatchID: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return IssueID: The IssueID designated to the generated custom issue
:rtype IssueID: String
"""
return self.api_request(self._get_method_fullname("generate_issue"), kwargs)
| apache-2.0 |
jinnykoo/wuyisj.com | src/oscar/apps/basket/forms.py | 26 | 10178 | from django import forms
from django.conf import settings
from django.forms.models import modelformset_factory, BaseModelFormSet
from django.db.models import Sum
from django.utils.translation import ugettext_lazy as _
from oscar.core.loading import get_model
from oscar.forms import widgets
Line = get_model('basket', 'line')
Basket = get_model('basket', 'basket')
Product = get_model('catalogue', 'product')
class BasketLineForm(forms.ModelForm):
save_for_later = forms.BooleanField(
initial=False, required=False, label=_('Save for Later'))
def __init__(self, strategy, *args, **kwargs):
super(BasketLineForm, self).__init__(*args, **kwargs)
self.instance.strategy = strategy
def clean_quantity(self):
qty = self.cleaned_data['quantity']
if qty > 0:
self.check_max_allowed_quantity(qty)
self.check_permission(qty)
return qty
def check_max_allowed_quantity(self, qty):
is_allowed, reason = self.instance.basket.is_quantity_allowed(qty)
if not is_allowed:
raise forms.ValidationError(reason)
def check_permission(self, qty):
policy = self.instance.purchase_info.availability
is_available, reason = policy.is_purchase_permitted(
quantity=qty)
if not is_available:
raise forms.ValidationError(reason)
class Meta:
model = Line
fields = ['quantity']
class BaseBasketLineFormSet(BaseModelFormSet):
def __init__(self, strategy, *args, **kwargs):
self.strategy = strategy
super(BaseBasketLineFormSet, self).__init__(*args, **kwargs)
def _construct_form(self, i, **kwargs):
return super(BaseBasketLineFormSet, self)._construct_form(
i, strategy=self.strategy, **kwargs)
def _should_delete_form(self, form):
"""
Quantity of zero is treated as if the user checked the DELETE checkbox,
which results in the basket line being deleted
"""
if super(BaseBasketLineFormSet, self)._should_delete_form(form):
return True
if self.can_delete and 'quantity' in form.cleaned_data:
return form.cleaned_data['quantity'] == 0
BasketLineFormSet = modelformset_factory(
Line, form=BasketLineForm, formset=BaseBasketLineFormSet, extra=0,
can_delete=True)
class SavedLineForm(forms.ModelForm):
move_to_basket = forms.BooleanField(initial=False, required=False,
label=_('Move to Basket'))
class Meta:
model = Line
fields = ('id', 'move_to_basket')
def __init__(self, strategy, basket, *args, **kwargs):
self.strategy = strategy
self.basket = basket
super(SavedLineForm, self).__init__(*args, **kwargs)
def clean(self):
cleaned_data = super(SavedLineForm, self).clean()
if not cleaned_data['move_to_basket']:
# skip further validation (see issue #666)
return cleaned_data
# Get total quantity of all lines with this product (there's normally
# only one but there can be more if you allow product options).
lines = self.basket.lines.filter(product=self.instance.product)
current_qty = lines.aggregate(Sum('quantity'))['quantity__sum'] or 0
desired_qty = current_qty + self.instance.quantity
result = self.strategy.fetch_for_product(self.instance.product)
is_available, reason = result.availability.is_purchase_permitted(
quantity=desired_qty)
if not is_available:
raise forms.ValidationError(reason)
return cleaned_data
class BaseSavedLineFormSet(BaseModelFormSet):
def __init__(self, strategy, basket, *args, **kwargs):
self.strategy = strategy
self.basket = basket
super(BaseSavedLineFormSet, self).__init__(*args, **kwargs)
def _construct_form(self, i, **kwargs):
return super(BaseSavedLineFormSet, self)._construct_form(
i, strategy=self.strategy, basket=self.basket, **kwargs)
SavedLineFormSet = modelformset_factory(Line, form=SavedLineForm,
formset=BaseSavedLineFormSet, extra=0,
can_delete=True)
class BasketVoucherForm(forms.Form):
code = forms.CharField(max_length=128, label=_('Code'))
def __init__(self, *args, **kwargs):
super(BasketVoucherForm, self).__init__(*args, **kwargs)
def clean_code(self):
return self.cleaned_data['code'].strip().upper()
class AddToBasketForm(forms.Form):
quantity = forms.IntegerField(initial=1, min_value=1, label=_('Quantity'))
def __init__(self, basket, product, *args, **kwargs):
# Note, the product passed in here isn't necessarily the product being
# added to the basket. For child products, it is the *parent* product
# that gets passed to the form. An optional product_id param is passed
# to indicate the ID of the child product being added to the basket.
self.basket = basket
self.parent_product = product
super(AddToBasketForm, self).__init__(*args, **kwargs)
# Dynamically build fields
if product.is_parent:
self._create_parent_product_fields(product)
self._create_product_fields(product)
# Dynamic form building methods
def _create_parent_product_fields(self, product):
"""
Adds the fields for a "group"-type product (eg, a parent product with a
list of children.
Currently requires that a stock record exists for the children
"""
choices = []
disabled_values = []
for child in product.children.all():
# Build a description of the child, including any pertinent
# attributes
attr_summary = child.attribute_summary
if attr_summary:
summary = attr_summary
else:
summary = child.get_title()
# Check if it is available to buy
info = self.basket.strategy.fetch_for_product(child)
if not info.availability.is_available_to_buy:
disabled_values.append(child.id)
choices.append((child.id, summary))
self.fields['child_id'] = forms.ChoiceField(
choices=tuple(choices), label=_("Variant"),
widget=widgets.AdvancedSelect(disabled_values=disabled_values))
def _create_product_fields(self, product):
"""
Add the product option fields.
"""
for option in product.options:
self._add_option_field(product, option)
def _add_option_field(self, product, option):
"""
Creates the appropriate form field for the product option.
This is designed to be overridden so that specific widgets can be used
for certain types of options.
"""
kwargs = {'required': option.is_required}
self.fields[option.code] = forms.CharField(**kwargs)
# Cleaning
def clean_child_id(self):
try:
child = self.parent_product.children.get(
id=self.cleaned_data['child_id'])
except Product.DoesNotExist:
raise forms.ValidationError(
_("Please select a valid product"))
# To avoid duplicate SQL queries, we cache a copy of the loaded child
# product as we're going to need it later.
self.child_product = child
return self.cleaned_data['child_id']
def clean_quantity(self):
# Check that the proposed new line quantity is sensible
qty = self.cleaned_data['quantity']
basket_threshold = settings.OSCAR_MAX_BASKET_QUANTITY_THRESHOLD
if basket_threshold:
total_basket_quantity = self.basket.num_items
max_allowed = basket_threshold - total_basket_quantity
if qty > max_allowed:
raise forms.ValidationError(
_("Due to technical limitations we are not able to ship"
" more than %(threshold)d items in one order. Your"
" basket currently has %(basket)d items.")
% {'threshold': basket_threshold,
'basket': total_basket_quantity})
return qty
@property
def product(self):
"""
The actual product being added to the basket
"""
# Note, the child product attribute is saved in the clean_child_id
# method
return getattr(self, 'child_product', self.parent_product)
def clean(self):
info = self.basket.strategy.fetch_for_product(self.product)
# Check currencies are sensible
if (self.basket.currency and
info.price.currency != self.basket.currency):
raise forms.ValidationError(
_("This product cannot be added to the basket as its currency "
"isn't the same as other products in your basket"))
# Check user has permission to add the desired quantity to their
# basket.
current_qty = self.basket.product_quantity(self.product)
desired_qty = current_qty + self.cleaned_data.get('quantity', 1)
is_permitted, reason = info.availability.is_purchase_permitted(
desired_qty)
if not is_permitted:
raise forms.ValidationError(reason)
return self.cleaned_data
# Helpers
def cleaned_options(self):
"""
Return submitted options in a clean format
"""
options = []
for option in self.parent_product.options:
if option.code in self.cleaned_data:
options.append({
'option': option,
'value': self.cleaned_data[option.code]})
return options
class SimpleAddToBasketForm(AddToBasketForm):
"""
Simplified version of the add to basket form where the quantity is
defaulted to 1 and rendered in a hidden widget
"""
quantity = forms.IntegerField(
initial=1, min_value=1, widget=forms.HiddenInput, label=_('Quantity'))
| bsd-3-clause |
OXPHOS/shogun | examples/undocumented/python/streaming_vw_createcache.py | 5 | 1432 | #!/usr/bin/env python
from shogun import StreamingVwFile
from shogun import StreamingVwCacheFile
from shogun import T_SVMLIGHT
from shogun import StreamingVwFeatures
from shogun import VowpalWabbit
parameter_list=[['../data/fm_train_sparsereal.dat']]
def streaming_vw_createcache (fname):
# First creates a binary cache from an ascii data file.
# and then trains using the StreamingVwCacheFile as input
# Open the input file as a StreamingVwFile
input_file = StreamingVwFile(fname)
# Default file name will be vw_cache.dat.cache
input_file.set_write_to_cache(True)
# Tell VW that the file is in SVMLight format
# Supported types are T_DENSE, T_SVMLIGHT and T_VW
input_file.set_parser_type(T_SVMLIGHT)
## Create a StreamingVwFeatures object, `True' indicating the examples are labelled
#features = StreamingVwFeatures(input_file, True, 1024)
## Create a VW object from the features
#vw = VowpalWabbit(features)
#vw.set_no_training(True)
## Train (in this case does nothing but run over all examples)
#vw.train()
##Finally Train using the generated cache file
## Open the input cache file as a StreamingVwCacheFile
#input_file = StreamingVwCacheFile("vw_cache.dat.cache");
## The rest is exactly as for normal input
#features = StreamingVwFeatures(input_file, True, 1024);
#vw = VowpalWabbit(features)
#vw.train()
##return vw
if __name__ == "__main__":
streaming_vw_createcache(*parameter_list[0])
| gpl-3.0 |
dagwieers/ansible | lib/ansible/modules/web_infrastructure/sophos_utm/utm_proxy_frontend.py | 36 | 8794 | #!/usr/bin/python
# Copyright: (c) 2018, Johannes Brunswicker <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
---
module: utm_proxy_frontend
author:
- Johannes Brunswicker (@MatrixCrawler)
short_description: create, update or destroy reverse_proxy frontend entry in Sophos UTM
description:
- Create, update or destroy a reverse_proxy frontend entry in Sophos UTM.
- This module needs to have the REST Ability of the UTM to be activated.
version_added: "2.8"
options:
name:
description:
- The name of the object. Will be used to identify the entry
required: true
add_content_type_header :
description:
- Whether to add the content type header or not
type: bool
default: False
address:
description:
- The reference name of the network/interface_address object.
default: REF_DefaultInternalAddress
allowed_networks:
description:
- A list of reference names for the allowed networks.
default: ['REF_NetworkAny']
certificate:
description:
- The reference name of the ca/host_key_cert object.
default: ""
comment:
description:
- An optional comment to add to the object
default: ""
disable_compression:
description:
- Whether to enable the compression
type: bool
default: False
domain:
description:
- A list of domain names for the frontend object
exceptions:
description:
- A list of exception ref names (reverse_proxy/exception)
default: []
htmlrewrite:
description:
- Whether to enable html rewrite or not
type: bool
default: False
htmlrewrite_cookies:
description:
- Whether to enable html rewrite cookie or not
type: bool
default: False
implicitredirect:
description:
- Whether to enable implicit redirection or not
type: bool
default: False
lbmethod:
description:
- Which loadbalancer method should be used
choices:
- ""
- bybusyness
- bytraffic
- byrequests
default: bybusyness
locations:
description:
- A list of location ref names (reverse_proxy/location)
default: []
port:
description:
- The frontend http port
default: 80
preservehost:
description:
- Whether to preserve host header
type: bool
default: False
profile:
description:
- The reference string of the reverse_proxy/profile
default: ""
status:
description:
- Whether to activate the frontend entry or not
type: bool
default: True
type:
description:
- Which protocol should be used
choices:
- http
- https
default: http
xheaders:
description:
- Whether to pass the host header or not
type: bool
default: False
extends_documentation_fragment:
- utm
"""
EXAMPLES = """
- name: Create utm proxy_frontend
utm_proxy_frontend:
utm_host: sophos.host.name
utm_token: abcdefghijklmno1234
name: TestFrontendEntry
host: REF_OBJECT_STRING
state: present
- name: Remove utm proxy_frontend
utm_proxy_frontend:
utm_host: sophos.host.name
utm_token: abcdefghijklmno1234
name: TestFrontendEntry
state: absent
"""
RETURN = """
result:
description: The utm object that was created
returned: success
type: complex
contains:
_ref:
description: The reference name of the object
type: string
_locked:
description: Whether or not the object is currently locked
type: boolean
_type:
description: The type of the object
type: string
name:
description: The name of the object
type: string
add_content_type_header:
description: Whether to add the content type header
type: bool
address:
description: The reference name of the address
type: string
allowed_networks:
description: List of reference names of networks associated
type: list
certificate:
description: Reference name of certificate (ca/host_key_cert)
type: string
comment:
description: The comment string
type: string
disable_compression:
description: State of compression support
type: bool
domain:
description: List of hostnames
type: list
exceptions:
description: List of associated proxy exceptions
type: list
htmlrewrite:
description: State of html rewrite
type: bool
htmlrewrite_cookies:
description: Whether the html rewrite cookie will be set
type: bool
implicitredirect:
description: Whether to use implicit redirection
type: bool
lbmethod:
description: The method of loadbalancer to use
type: string
locations:
description: The reference names of reverse_proxy/locations associated with the object
type: list
port:
description: The port of the frontend connection
type: int
preservehost:
description: Preserve host header
type: bool
profile:
description: The associated reverse_proxy/profile
type: string
status:
description: Whether the frontend object is active or not
type: bool
type:
description: The connection type
type: string
xheaders:
description: The xheaders state
type: bool
"""
from ansible.module_utils.utm_utils import UTM, UTMModule
from ansible.module_utils._text import to_native
def main():
endpoint = "reverse_proxy/frontend"
key_to_check_for_changes = ["add_content_type_header", "address", "allowed_networks", "certificate",
"comment", "disable_compression", "domain", "exceptions", "htmlrewrite",
"htmlrewrite_cookies", "implicitredirect", "lbmethod", "locations",
"port", "preservehost", "profile", "status", "type", "xheaders"]
module = UTMModule(
argument_spec=dict(
name=dict(type='str', required=True),
add_content_type_header=dict(type='bool', required=False, default=False),
address=dict(type='str', required=False, default="REF_DefaultInternalAddress"),
allowed_networks=dict(type='list', elements='str', required=False, default=["REF_NetworkAny"]),
certificate=dict(type='str', required=False, default=""),
comment=dict(type='str', required=False, default=""),
disable_compression=dict(type='bool', required=False, default=False),
domain=dict(type='list', elements='str', required=False),
exceptions=dict(type='list', elements='str', required=False, default=[]),
htmlrewrite=dict(type='bool', required=False, default=False),
htmlrewrite_cookies=dict(type='bool', required=False, default=False),
implicitredirect=dict(type='bool', required=False, default=False),
lbmethod=dict(type='str', required=False, default="bybusyness",
choices=['bybusyness', 'bytraffic', 'byrequests', '']),
locations=dict(type='list', elements='str', required=False, default=[]),
port=dict(type='int', required=False, default=80),
preservehost=dict(type='bool', required=False, default=False),
profile=dict(type='str', required=False, default=""),
status=dict(type='bool', required=False, default=True),
type=dict(type='str', required=False, default="http", choices=['http', 'https']),
xheaders=dict(type='bool', required=False, default=False),
)
)
try:
UTM(module, endpoint, key_to_check_for_changes).execute()
except Exception as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
bintlabs/pygal-nostatics | pygal/css/style_css.py | 1 | 2851 | # -*- coding: utf-8 -*-
# This file is part of pygal
#
# A python svg graph plotting library
# Copyright © 2012-2014 Kozea
#
# This library is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# This library is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with pygal. If not, see <http://www.gnu.org/licenses/>.
contents = """
{{ id }}{
background-color: {{ style.background }};
}
{{ id }}path,
{{ id }}line,
{{ id }}rect,
{{ id }}circle {
-webkit-transition: {{ style.transition }};
-moz-transition: {{ style.transition }};
transition: {{ style.transition }};
}
{{ id }}.graph > .background {
fill: {{ style.background }};
}
{{ id }}.plot > .background {
fill: {{ style.plot_background }};
}
{{ id }}.graph {
fill: {{ style.foreground }};
}
{{ id }}text.no_data {
fill: {{ style.foreground_light }};
}
{{ id }}.title {
fill: {{ style.foreground_light }};
}
{{ id }}.legends .legend text {
fill: {{ style.foreground }};
}
{{ id }}.legends .legend:hover text {
fill: {{ style.foreground_light }};
}
{{ id }}.axis .line {
stroke: {{ style.foreground_light }};
}
{{ id }}.axis .guide.line {
stroke: {{ style.foreground_dark }};
}
{{ id }}.axis .major.line {
stroke: {{ style.foreground }};
}
{{ id }}.axis text.major {
stroke: {{ style.foreground_light }};
fill: {{ style.foreground_light }};
}
{{ id }}.axis.y .guides:hover .guide.line,
{{ id }}.line-graph .axis.x .guides:hover .guide.line,
{{ id }}.stackedline-graph .axis.x .guides:hover .guide.line,
{{ id }}.xy-graph .axis.x .guides:hover .guide.line {
stroke: {{ style.foreground_light }};
}
{{ id }}.axis .guides:hover text {
fill: {{ style.foreground_light }};
}
{{ id }}.reactive {
fill-opacity: {{ style.opacity }};
}
{{ id }}.reactive.active,
{{ id }}.active .reactive {
fill-opacity: {{ style.opacity_hover }};
}
{{ id }}.series text {
fill: {{ style.foreground_light }};
}
{{ id }}.tooltip rect {
fill: {{ style.plot_background }};
stroke: {{ style.foreground_light }};
}
{{ id }}.tooltip text {
fill: {{ style.foreground_light }};
}
{{ id }}.map-element {
fill: {{ style.foreground }};
stroke: {{ style.foreground_dark }} !important;
opacity: .9;
stroke-width: 3;
-webkit-transition: 250ms;
-moz-transition: 250ms;
-o-transition: 250ms;
transition: 250ms;
}
{{ id }}.map-element:hover {
opacity: 1;
stroke-width: 10;
}
{{ colors }}
"""
| lgpl-3.0 |
GitAngel/django | django/db/models/aggregates.py | 161 | 6624 | """
Classes to represent the definitions of aggregate functions.
"""
from django.core.exceptions import FieldError
from django.db.models.expressions import Func, Value
from django.db.models.fields import FloatField, IntegerField
__all__ = [
'Aggregate', 'Avg', 'Count', 'Max', 'Min', 'StdDev', 'Sum', 'Variance',
]
class Aggregate(Func):
contains_aggregate = True
name = None
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
# Aggregates are not allowed in UPDATE queries, so ignore for_save
c = super(Aggregate, self).resolve_expression(query, allow_joins, reuse, summarize)
if not summarize:
expressions = c.get_source_expressions()
for index, expr in enumerate(expressions):
if expr.contains_aggregate:
before_resolved = self.get_source_expressions()[index]
name = before_resolved.name if hasattr(before_resolved, 'name') else repr(before_resolved)
raise FieldError("Cannot compute %s('%s'): '%s' is an aggregate" % (c.name, name, name))
c._patch_aggregate(query) # backward-compatibility support
return c
@property
def input_field(self):
return self.source_expressions[0]
@property
def default_alias(self):
expressions = self.get_source_expressions()
if len(expressions) == 1 and hasattr(expressions[0], 'name'):
return '%s__%s' % (expressions[0].name, self.name.lower())
raise TypeError("Complex expressions require an alias")
def get_group_by_cols(self):
return []
def _patch_aggregate(self, query):
"""
Helper method for patching 3rd party aggregates that do not yet support
the new way of subclassing. This method will be removed in Django 1.10.
add_to_query(query, alias, col, source, is_summary) will be defined on
legacy aggregates which, in turn, instantiates the SQL implementation of
the aggregate. In all the cases found, the general implementation of
add_to_query looks like:
def add_to_query(self, query, alias, col, source, is_summary):
klass = SQLImplementationAggregate
aggregate = klass(col, source=source, is_summary=is_summary, **self.extra)
query.aggregates[alias] = aggregate
By supplying a known alias, we can get the SQLAggregate out of the
aggregates dict, and use the sql_function and sql_template attributes
to patch *this* aggregate.
"""
if not hasattr(self, 'add_to_query') or self.function is not None:
return
placeholder_alias = "_XXXXXXXX_"
self.add_to_query(query, placeholder_alias, None, None, None)
sql_aggregate = query.aggregates.pop(placeholder_alias)
if 'sql_function' not in self.extra and hasattr(sql_aggregate, 'sql_function'):
self.extra['function'] = sql_aggregate.sql_function
if hasattr(sql_aggregate, 'sql_template'):
self.extra['template'] = sql_aggregate.sql_template
class Avg(Aggregate):
function = 'AVG'
name = 'Avg'
def __init__(self, expression, **extra):
output_field = extra.pop('output_field', FloatField())
super(Avg, self).__init__(expression, output_field=output_field, **extra)
def as_oracle(self, compiler, connection):
if self.output_field.get_internal_type() == 'DurationField':
expression = self.get_source_expressions()[0]
from django.db.backends.oracle.functions import IntervalToSeconds, SecondsToInterval
return compiler.compile(
SecondsToInterval(Avg(IntervalToSeconds(expression)))
)
return super(Avg, self).as_sql(compiler, connection)
class Count(Aggregate):
function = 'COUNT'
name = 'Count'
template = '%(function)s(%(distinct)s%(expressions)s)'
def __init__(self, expression, distinct=False, **extra):
if expression == '*':
expression = Value(expression)
super(Count, self).__init__(
expression, distinct='DISTINCT ' if distinct else '', output_field=IntegerField(), **extra)
def __repr__(self):
return "{}({}, distinct={})".format(
self.__class__.__name__,
self.arg_joiner.join(str(arg) for arg in self.source_expressions),
'False' if self.extra['distinct'] == '' else 'True',
)
def convert_value(self, value, expression, connection, context):
if value is None:
return 0
return int(value)
class Max(Aggregate):
function = 'MAX'
name = 'Max'
class Min(Aggregate):
function = 'MIN'
name = 'Min'
class StdDev(Aggregate):
name = 'StdDev'
def __init__(self, expression, sample=False, **extra):
self.function = 'STDDEV_SAMP' if sample else 'STDDEV_POP'
super(StdDev, self).__init__(expression, output_field=FloatField(), **extra)
def __repr__(self):
return "{}({}, sample={})".format(
self.__class__.__name__,
self.arg_joiner.join(str(arg) for arg in self.source_expressions),
'False' if self.function == 'STDDEV_POP' else 'True',
)
def convert_value(self, value, expression, connection, context):
if value is None:
return value
return float(value)
class Sum(Aggregate):
function = 'SUM'
name = 'Sum'
def as_oracle(self, compiler, connection):
if self.output_field.get_internal_type() == 'DurationField':
expression = self.get_source_expressions()[0]
from django.db.backends.oracle.functions import IntervalToSeconds, SecondsToInterval
return compiler.compile(
SecondsToInterval(Sum(IntervalToSeconds(expression)))
)
return super(Sum, self).as_sql(compiler, connection)
class Variance(Aggregate):
name = 'Variance'
def __init__(self, expression, sample=False, **extra):
self.function = 'VAR_SAMP' if sample else 'VAR_POP'
super(Variance, self).__init__(expression, output_field=FloatField(), **extra)
def __repr__(self):
return "{}({}, sample={})".format(
self.__class__.__name__,
self.arg_joiner.join(str(arg) for arg in self.source_expressions),
'False' if self.function == 'VAR_POP' else 'True',
)
def convert_value(self, value, expression, connection, context):
if value is None:
return value
return float(value)
| bsd-3-clause |
andmaj/unrank-bottleneck-bench | sdsl_linear/gtest-1.6.0/scripts/pump.py | 603 | 23316 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
str = re.sub(r'^\s*\$\$.*\n', '', str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirevative(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines != [] and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirevative(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsHeaderGuardOrInclude(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsComment(line):
if IsHeaderGuardOrInclude(line):
# The style guide made an exception to allow long header guard lines
# and includes.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirevative(output, line):
if IsHeaderGuardOrInclude(line):
# The style guide made an exception to allow long header guard lines
# and includes.
output.append(line)
else:
WrapPreprocessorDirevative(line, output)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
| gpl-3.0 |
tonicbupt/flask-navigator | tests/test_navbar.py | 2 | 1514 | from pytest import fixture, raises
from flask.ext.navigation.navbar import NavigationBar
from flask.ext.navigation.item import Item, ItemReference
@fixture
def navbar():
navbar = NavigationBar('mybar', [
Item(u'Home', 'home'),
Item(u'News', 'news'),
])
return navbar
def test_attrs(navbar):
assert navbar.name == 'mybar'
assert len(navbar.items) == 2
def test_iterable(navbar):
iterable = iter(navbar)
item_1st = next(iterable)
assert item_1st.label == u'Home'
assert item_1st.endpoint == 'home'
item_2nd = next(iterable)
assert item_2nd.label == u'News'
assert item_2nd.endpoint == 'news'
with raises(StopIteration):
next(iterable)
item_reentry = next(iter(navbar)) # test for reentry iterable
assert item_reentry.label == u'Home'
assert item_reentry.endpoint == 'home'
def test_initializer(navbar):
@navbar.initializer
def initialize_more_items(nav):
return nav
assert navbar.initializers[0] is initialize_more_items
def test_alias_item():
navbar = NavigationBar('mybar', [
Item(u'Home', 'home'),
Item(u'News', 'news', args={'page': 1}),
], alias={
'foo': ItemReference('home'),
'bar': ItemReference('news', {'page': 1}),
'egg': ItemReference('news', {'page': 2}),
})
assert navbar.alias_item('foo').label == u'Home'
assert navbar.alias_item('bar').label == u'News'
with raises(KeyError):
navbar.alias_item('egg')
| mit |
ResearchSoftwareInstitute/MyHPOM | hs_file_types/models/base.py | 1 | 39179 | import os
import copy
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericRelation
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.forms.models import model_to_dict
from django.contrib.postgres.fields import HStoreField, ArrayField
from dominate.tags import div, legend, table, tr, tbody, thead, td, th, \
span, a, form, button, label, textarea, h4, input, ul, li, p
from lxml import etree
from hs_core.hydroshare.utils import get_resource_file_name_and_extension, current_site_url
from hs_core.models import ResourceFile, AbstractMetaDataElement, Coverage, CoreMetaData
class AbstractFileMetaData(models.Model):
""" base class for MyHPOM file type metadata """
# one temporal coverage and one spatial coverage
coverages = GenericRelation(Coverage)
# key/value metadata
extra_metadata = HStoreField(default={})
# keywords
keywords = ArrayField(models.CharField(max_length=100, null=True, blank=True), default=[])
# to track if any metadata element has been modified to trigger file update
is_dirty = models.BooleanField(default=False)
class Meta:
abstract = True
@classmethod
def get_metadata_model_classes(cls):
return {'coverage': Coverage}
def get_metadata_elements(self):
"""returns a list of all metadata elements (instances of AbstractMetaDataElement)
associated with this file type metadata object.
"""
return list(self.coverages.all())
def delete_all_elements(self):
self.coverages.all().delete()
self.extra_metadata = {}
self.keywords = []
self.save()
def get_html(self, include_extra_metadata=True, **kwargs):
"""Generates html for displaying all metadata elements associated with this logical file.
Subclass must override to include additional html for additional metadata it supports.
:param include_extra_metadata: a flag to control if necessary html for displaying key/value
metadata will be included
"""
root_div = div()
if self.logical_file.dataset_name:
root_div.add(self.get_dataset_name_html())
if self.keywords:
root_div.add(self.get_keywords_html())
if self.extra_metadata and include_extra_metadata:
root_div.add(self.get_key_value_metadata_html())
return root_div.render()
def get_dataset_name_html(self):
"""generates html for viewing dataset name (title)"""
if self.logical_file.dataset_name:
dataset_name_div = div(cls="col-xs-12 content-block")
with dataset_name_div:
legend("Title")
p(self.logical_file.dataset_name)
return dataset_name_div
def get_keywords_html(self):
"""generates html for viewing keywords"""
keywords_div = div()
if self.keywords:
keywords_div = div(cls="col-sm-12 content-block")
with keywords_div:
legend('Keywords')
with div(cls="tags"):
with ul(id="list-keywords-file-type", cls="tag-list custom-well"):
for kw in self.keywords:
with li():
a(kw, cls="tag")
return keywords_div
def get_key_value_metadata_html(self):
"""generates html for viewing key/vale extra metadata"""
extra_metadata_div = div()
if self.extra_metadata:
extra_metadata_div = div(cls="col-sm-12 content-block")
with extra_metadata_div:
legend('Extended Metadata')
with table(cls="hs-table table dataTable no-footer", style="width: 100%"):
with thead():
with tr(cls="header-row"):
th("Key")
th("Value")
with tbody():
for k, v in self.extra_metadata.iteritems():
with tr(data_key=k):
td(k)
td(v)
return extra_metadata_div
def get_html_forms(self, dataset_name_form=True, temporal_coverage=True, **kwargs):
"""generates html forms for all the metadata elements associated with this logical file
type
:param dataset_name_form: If True then a form for editing dataset_name (title) attribute is
included
:param temporal_coverage: if True then form elements for editing temporal coverage are
included
"""
root_div = div()
with root_div:
if dataset_name_form:
self.get_dataset_name_form()
self.get_keywords_html_form()
self.get_extra_metadata_html_form()
if temporal_coverage:
self.get_temporal_coverage_html_form()
return root_div
def get_keywords_html_form(self):
keywords_div = div(cls="col-sm-12 content-block", id="filetype-keywords")
action = "/hydroshare/hsapi/_internal/{0}/{1}/add-file-keyword-metadata/"
action = action.format(self.logical_file.__class__.__name__, self.logical_file.id)
delete_action = "/hydroshare/hsapi/_internal/{0}/{1}/delete-file-keyword-metadata/"
delete_action = delete_action.format(self.logical_file.__class__.__name__,
self.logical_file.id)
with keywords_div:
legend("Keywords")
with form(id="id-keywords-filetype", action=action, method="post",
enctype="multipart/form-data"):
input(id="id-delete-keyword-filetype-action", type="hidden",
value=delete_action)
with div(cls="tags"):
with div(id="add-keyword-wrapper", cls="input-group"):
input(id="txt-keyword-filetype", cls="form-control",
placeholder="keyword",
type="text", name="keywords")
with span(cls="input-group-btn"):
a("Add", id="btn-add-keyword-filetype", cls="btn btn-success",
type="button")
with ul(id="lst-tags-filetype", cls="custom-well tag-list"):
for kw in self.keywords:
with li(cls="tag"):
span(kw)
with a():
span(cls="glyphicon glyphicon-remove-circle icon-remove")
p("Duplicate. Keywords not added.", id="id-keywords-filetype-msg",
cls="text-danger small", style="display: none;")
def get_spatial_coverage_form(self, allow_edit=False):
return Coverage.get_spatial_html_form(resource=None, element=self.spatial_coverage,
allow_edit=allow_edit, file_type=True)
def get_temporal_coverage_form(self, allow_edit=True):
return Coverage.get_temporal_html_form(resource=None, element=self.temporal_coverage,
file_type=True, allow_edit=allow_edit)
def get_extra_metadata_html_form(self):
def get_add_keyvalue_button():
add_key_value_btn = a(cls="btn btn-success", type="button", data_toggle="modal",
data_target="#add-keyvalue-filetype-modal",
style="margin-bottom:20px;")
with add_key_value_btn:
with span(cls="glyphicon glyphicon-plus"):
span("Add Key/Value", cls="button-label")
return add_key_value_btn
if self.extra_metadata:
root_div_extra = div(cls="col-xs-12", id="filetype-extra-metadata")
with root_div_extra:
legend('Extended Metadata')
get_add_keyvalue_button()
with table(cls="hs-table table dataTable no-footer",
style="width: 100%"):
with thead():
with tr(cls="header-row"):
th("Key")
th("Value")
th("Edit/Remove")
with tbody():
counter = 0
for k, v in self.extra_metadata.iteritems():
counter += 1
with tr(data_key=k):
td(k)
td(v)
with td():
span(data_toggle="modal", data_placement="auto", title="Edit",
cls="btn-edit-icon glyphicon glyphicon-pencil "
"icon-blue table-icon",
data_target="#edit-keyvalue-filetype-modal"
"-{}".format(counter))
span(data_toggle="modal", data_placement="auto",
title="Remove",
cls="btn-remove-icon glyphicon glyphicon-trash "
"btn-remove table-icon",
data_target="#delete-keyvalue-filetype-modal"
"-{}".format(counter))
self._get_add_key_value_modal_form()
self._get_edit_key_value_modal_forms()
self._get_delete_key_value_modal_forms()
return root_div_extra
else:
root_div_extra = div(id="filetype-extra-metadata", cls="col-xs-12 content-block")
with root_div_extra:
legend('Extended Metadata')
get_add_keyvalue_button()
self._get_add_key_value_modal_form()
return root_div_extra
def get_temporal_coverage_html_form(self):
# Note: When using this form layout the context variable 'temp_form' must be
# set prior to calling the template.render(context)
root_div = div(cls="col-lg-6 col-xs-12", id="temporal-coverage-filetype")
with root_div:
with form(id="id-coverage_temporal-file-type", action="{{ temp_form.action }}",
method="post", enctype="multipart/form-data"):
div("{% crispy temp_form %}")
with div(cls="row", style="margin-top:10px;"):
with div(cls="col-md-offset-10 col-xs-offset-6 "
"col-md-2 col-xs-6"):
button("Save changes", type="button",
cls="btn btn-primary pull-right",
style="display: none;")
return root_div
def has_all_required_elements(self):
return True
@classmethod
def get_supported_element_names(cls):
return ['Coverage']
def get_required_missing_elements(self):
return []
@property
def has_metadata(self):
if not self.coverages.all() and not self.extra_metadata \
and not self.logical_file.dataset_name:
return False
return True
@property
def spatial_coverage(self):
return self.coverages.exclude(type='period').first()
@property
def temporal_coverage(self):
return self.coverages.filter(type='period').first()
def add_to_xml_container(self, container):
"""Generates xml+rdf representation of all the metadata elements associated with this
logical file type instance. Subclass must override this if it has additional metadata
elements."""
NAMESPACES = CoreMetaData.NAMESPACES
dataset_container = etree.SubElement(
container, '{%s}Dataset' % NAMESPACES['hsterms'])
rdf_Description = etree.SubElement(dataset_container, '{%s}Description' % NAMESPACES['rdf'])
dc_datatype = etree.SubElement(rdf_Description, '{%s}type' % NAMESPACES['dc'])
data_type = current_site_url() + "/terms/" + self.logical_file.data_type
dc_datatype.set('{%s}resource' % NAMESPACES['rdf'], data_type)
if self.logical_file.dataset_name:
dc_datatitle = etree.SubElement(rdf_Description, '{%s}title' % NAMESPACES['dc'])
dc_datatitle.text = self.logical_file.dataset_name
# add fileType node
for res_file in self.logical_file.files.all():
hsterms_datafile = etree.SubElement(rdf_Description,
'{%s}dataFile' % NAMESPACES['hsterms'])
rdf_dataFile_Description = etree.SubElement(hsterms_datafile,
'{%s}Description' % NAMESPACES['rdf'])
file_uri = u'{hs_url}/resource/{res_id}/data/contents/{file_name}'.format(
hs_url=current_site_url(),
res_id=self.logical_file.resource.short_id,
file_name=res_file.short_path)
rdf_dataFile_Description.set('{%s}about' % NAMESPACES['rdf'], file_uri)
dc_title = etree.SubElement(rdf_dataFile_Description,
'{%s}title' % NAMESPACES['dc'])
file_name = get_resource_file_name_and_extension(res_file)[1]
dc_title.text = file_name
dc_format = etree.SubElement(rdf_dataFile_Description, '{%s}format' % NAMESPACES['dc'])
dc_format.text = res_file.mime_type
self.add_keywords_to_xml_container(rdf_Description)
self.add_extra_metadata_to_xml_container(rdf_Description)
for coverage in self.coverages.all():
coverage.add_to_xml_container(rdf_Description)
return rdf_Description
def add_extra_metadata_to_xml_container(self, container):
"""Generates xml+rdf representation of the all the key/value metadata associated
with an instance of the logical file type"""
for key, value in self.extra_metadata.iteritems():
hsterms_key_value = etree.SubElement(
container, '{%s}extendedMetadata' % CoreMetaData.NAMESPACES['hsterms'])
hsterms_key_value_rdf_Description = etree.SubElement(
hsterms_key_value, '{%s}Description' % CoreMetaData.NAMESPACES['rdf'])
hsterms_key = etree.SubElement(hsterms_key_value_rdf_Description,
'{%s}key' % CoreMetaData.NAMESPACES['hsterms'])
hsterms_key.text = key
hsterms_value = etree.SubElement(hsterms_key_value_rdf_Description,
'{%s}value' % CoreMetaData.NAMESPACES['hsterms'])
hsterms_value.text = value
def add_keywords_to_xml_container(self, container):
"""Generates xml+rdf representation of the all the keywords associated
with an instance of the logical file type"""
for kw in self.keywords:
dc_subject = etree.SubElement(container, '{%s}subject' % CoreMetaData.NAMESPACES['dc'])
dc_subject.text = kw
def create_element(self, element_model_name, **kwargs):
# had to import here to avoid circular import
from hs_file_types.utils import update_resource_coverage_element
model_type = self._get_metadata_element_model_type(element_model_name)
kwargs['content_object'] = self
element = model_type.model_class().create(**kwargs)
if element_model_name.lower() == "coverage":
resource = element.metadata.logical_file.resource
# resource will be None in case of coverage element being
# created as part of copying a resource that supports logical file
# types
if resource is not None:
update_resource_coverage_element(resource)
return element
def update_element(self, element_model_name, element_id, **kwargs):
# had to import here to avoid circular import
from hs_file_types.utils import update_resource_coverage_element
model_type = self._get_metadata_element_model_type(element_model_name)
kwargs['content_object'] = self
model_type.model_class().update(element_id, **kwargs)
self.is_dirty = True
self.save()
if element_model_name.lower() == "coverage":
element = model_type.model_class().objects.get(id=element_id)
resource = element.metadata.logical_file.resource
update_resource_coverage_element(resource)
def delete_element(self, element_model_name, element_id):
model_type = self._get_metadata_element_model_type(element_model_name)
model_type.model_class().remove(element_id)
self.is_dirty = True
self.save()
def _get_metadata_element_model_type(self, element_model_name):
element_model_name = element_model_name.lower()
if not self._is_valid_element(element_model_name):
raise ValidationError("Metadata element type:%s is not one of the "
"supported metadata elements for %s."
% element_model_name, type(self))
unsupported_element_error = "Metadata element type:%s is not supported." \
% element_model_name
try:
model_type = ContentType.objects.get(app_label=self.model_app_label,
model=element_model_name)
except ObjectDoesNotExist:
try:
model_type = ContentType.objects.get(app_label='hs_core',
model=element_model_name)
except ObjectDoesNotExist:
raise ValidationError(unsupported_element_error)
if not issubclass(model_type.model_class(), AbstractMetaDataElement):
raise ValidationError(unsupported_element_error)
return model_type
def _is_valid_element(self, element_name):
allowed_elements = [el.lower() for el in self.get_supported_element_names()]
return element_name.lower() in allowed_elements
@classmethod
def validate_element_data(cls, request, element_name):
"""Subclass must implement this function to validate data for for the
specified metadata element (element_name)"""
raise NotImplementedError
def get_dataset_name_form(self):
form_action = "/hydroshare/hsapi/_internal/{0}/{1}/update-filetype-dataset-name/"
form_action = form_action.format(self.logical_file.__class__.__name__, self.logical_file.id)
root_div = div(cls="col-xs-12")
dataset_name = self.logical_file.dataset_name if self.logical_file.dataset_name else ""
with root_div:
with form(action=form_action, id="filetype-dataset-name",
method="post", enctype="multipart/form-data"):
div("{% csrf_token %}")
with div(cls="form-group"):
with div(cls="control-group"):
legend('Title')
with div(cls="controls"):
input(value=dataset_name,
cls="form-control input-sm textinput textInput",
id="file_dataset_name", maxlength="250",
name="dataset_name", type="text")
with div(cls="row", style="margin-top:10px;"):
with div(cls="col-md-offset-10 col-xs-offset-6 col-md-2 col-xs-6"):
button("Save changes", cls="btn btn-primary pull-right btn-form-submit",
style="display: none;", type="button")
return root_div
def _get_add_key_value_modal_form(self):
form_action = "/hydroshare/hsapi/_internal/{0}/{1}/update-file-keyvalue-metadata/"
form_action = form_action.format(self.logical_file.__class__.__name__, self.logical_file.id)
modal_div = div(cls="modal fade", id="add-keyvalue-filetype-modal", tabindex="-1",
role="dialog", aria_labelledby="add-key-value-metadata",
aria_hidden="true")
with modal_div:
with div(cls="modal-dialog", role="document"):
with div(cls="modal-content"):
with form(action=form_action, id="add-keyvalue-filetype-metadata",
method="post", enctype="multipart/form-data"):
div("{% csrf_token %}")
with div(cls="modal-header"):
button("x", type="button", cls="close", data_dismiss="modal",
aria_hidden="true")
h4("Add Key/Value Metadata", cls="modal-title",
id="add-key-value-metadata")
with div(cls="modal-body"):
with div(cls="form-group"):
with div(cls="control-group"):
label("Key", cls="control-label requiredField",
fr="file_extra_meta_name")
with div(cls="controls"):
input(cls="form-control input-sm textinput textInput",
id="file_extra_meta_name", maxlength="100",
name="name", type="text")
with div(cls="control-group"):
label("Value", cls="control-label requiredField",
fr="file_extra_meta_value")
with div(cls="controls"):
textarea(cls="form-control input-sm textarea",
cols="40", rows="10",
id="file_extra_meta_value",
style="resize: vertical;",
name="value", type="text")
with div(cls="modal-footer"):
button("Cancel", type="button", cls="btn btn-default",
data_dismiss="modal")
button("OK", type="button", cls="btn btn-primary",
id="btn-confirm-add-metadata") # TODO: TESTING
return modal_div
def _get_edit_key_value_modal_forms(self):
# TODO: See if can use one modal dialog to edit any pair of key/value
form_action = "/hydroshare/hsapi/_internal/{0}/{1}/update-file-keyvalue-metadata/"
form_action = form_action.format(self.logical_file.__class__.__name__, self.logical_file.id)
counter = 0
root_div = div(id="edit-keyvalue-filetype-modals")
with root_div:
for k, v in self.extra_metadata.iteritems():
counter += 1
modal_div = div(cls="modal fade",
id="edit-keyvalue-filetype-modal-{}".format(counter),
tabindex="-1",
role="dialog", aria_labelledby="edit-key-value-metadata",
aria_hidden="true")
with modal_div:
with div(cls="modal-dialog", role="document"):
with div(cls="modal-content"):
form_id = "edit-keyvalue-filetype-metadata-{}".format(counter)
with form(action=form_action,
id=form_id, data_counter="{}".format(counter),
method="post", enctype="multipart/form-data"):
div("{% csrf_token %}")
with div(cls="modal-header"):
button("x", type="button", cls="close", data_dismiss="modal",
aria_hidden="true")
h4("Update Key/Value Metadata", cls="modal-title",
id="edit-key-value-metadata")
with div(cls="modal-body"):
with div(cls="form-group"):
with div(cls="control-group"):
label("Key(Original)",
cls="control-label requiredField",
fr="file_extra_meta_key_original")
with div(cls="controls"):
input(value=k, readonly="readonly",
cls="form-control input-sm textinput "
"textInput",
id="file_extra_meta_key_original",
maxlength="100",
name="key_original", type="text")
with div(cls="control-group"):
label("Key", cls="control-label requiredField",
fr="file_extra_meta_key")
with div(cls="controls"):
input(value=k,
cls="form-control input-sm textinput "
"textInput",
id="file_extra_meta_key", maxlength="100",
name="key", type="text")
with div(cls="control-group"):
label("Value", cls="control-label requiredField",
fr="file_extra_meta_value")
with div(cls="controls"):
textarea(v,
cls="form-control input-sm textarea",
cols="40", rows="10",
id="file_extra_meta_value",
style="resize: vertical;",
name="value", type="text")
with div(cls="modal-footer"):
button("Cancel", type="button", cls="btn btn-default",
data_dismiss="modal")
button("OK", id="btn-confirm-edit-key-value",
type="button", cls="btn btn-primary")
return root_div
def _get_delete_key_value_modal_forms(self):
form_action = "/hydroshare/hsapi/_internal/{0}/{1}/delete-file-keyvalue-metadata/"
form_action = form_action.format(self.logical_file.__class__.__name__, self.logical_file.id)
counter = 0
root_div = div(id="delete-keyvalue-filetype-modals")
with root_div:
for k, v in self.extra_metadata.iteritems():
counter += 1
modal_div = div(cls="modal fade",
id="delete-keyvalue-filetype-modal-{}".format(counter),
tabindex="-1",
role="dialog", aria_labelledby="delete-key-value-metadata",
aria_hidden="true")
with modal_div:
with div(cls="modal-dialog", role="document"):
with div(cls="modal-content"):
form_id = "delete-keyvalue-filetype-metadata-{}".format(counter)
with form(action=form_action,
id=form_id,
method="post", enctype="multipart/form-data"):
div("{% csrf_token %}")
with div(cls="modal-header"):
button("x", type="button", cls="close", data_dismiss="modal",
aria_hidden="true")
h4("Confirm to Delete Key/Value Metadata", cls="modal-title",
id="delete-key-value-metadata")
with div(cls="modal-body"):
with div(cls="form-group"):
with div(cls="control-group"):
label("Key", cls="control-label requiredField",
fr="file_extra_meta_name")
with div(cls="controls"):
input(cls="form-control input-sm textinput "
"textInput", value=k,
id="file_extra_meta_key", maxlength="100",
name="key", type="text", readonly="readonly")
with div(cls="control-group"):
label("Value", cls="control-label requiredField",
fr="file_extra_meta_value")
with div(cls="controls"):
textarea(v, cls="form-control input-sm textarea",
cols="40", rows="10",
id="file_extra_meta_value",
style="resize: vertical;",
name="value", type="text",
readonly="readonly")
with div(cls="modal-footer"):
button("Cancel", type="button", cls="btn btn-default",
data_dismiss="modal")
button("Delete", type="button", cls="btn btn-danger",
id="btn-delete-key-value") # TODO: TESTING
return root_div
class AbstractLogicalFile(models.Model):
""" base class for MyHPOM file types """
# files associated with this logical file group
files = GenericRelation(ResourceFile, content_type_field='logical_file_content_type',
object_id_field='logical_file_object_id')
# the dataset name will allow us to identify a logical file group on user interface
dataset_name = models.CharField(max_length=255, null=True, blank=True)
# this will be used for dc:type in resourcemetadata.xml
# each specific logical type needs to reset this field
# also this data type needs to be defined in in terms.html page
data_type = "Generic"
class Meta:
abstract = True
@classmethod
def get_allowed_uploaded_file_types(cls):
# any file can be part of this logical file group - subclass needs to override this
return [".*"]
@classmethod
def get_allowed_storage_file_types(cls):
# can store any file types in this logical file group - subclass needs to override this
return [".*"]
@classmethod
def type_name(cls):
return cls.__name__
@property
def has_metadata(self):
return hasattr(self, 'metadata')
@property
def size(self):
# get total size (in bytes) of all files in this file type
return sum([f.size for f in self.files.all()])
@property
def resource(self):
res_file = self.files.all().first()
if res_file is not None:
return res_file.resource
else:
return None
@property
def supports_resource_file_move(self):
"""allows a resource file that is part of this logical file type to be moved"""
return True
@property
def supports_resource_file_add(self):
"""allows a resource file to be added"""
return True
@property
def supports_resource_file_rename(self):
"""allows a resource file that is part of this logical file type to be renamed"""
return True
@property
def supports_zip(self):
"""allows a folder containing resource file(s) that are part of this logical file type
to be zipped"""
return True
@property
def supports_delete_folder_on_zip(self):
"""allows the original folder to be deleted upon zipping of that folder"""
return True
@property
def supports_unzip(self):
"""allows a zip file that is part of this logical file type to get unzipped"""
return True
def add_resource_file(self, res_file):
"""Makes a ResourceFile (res_file) object part of this logical file object. If res_file
is already associated with any other logical file object, this function does not do
anything to that logical object. The caller needs to take necessary action for the
previously associated logical file object. If res_file is already part of this
logical file, it raise ValidationError.
:param res_file an instance of ResourceFile
"""
if res_file in self.files.all():
raise ValidationError("Resource file is already part of this logical file.")
res_file.logical_file_content_object = self
res_file.save()
# TODO: unit test this
def reset_to_generic(self, user):
"""
This sets all files in this logical file group to GenericLogicalFile type
:param user: user who is re-setting to generic file type
:return:
"""
from .generic import GenericLogicalFile
for res_file in self.files.all():
if res_file.has_logical_file:
res_file.logical_file.logical_delete(user=user, delete_res_files=False)
logical_file = GenericLogicalFile.create()
res_file.logical_file_content_object = logical_file
res_file.save()
def get_copy(self):
"""creates a copy of this logical file object with associated metadata needed to support
resource copy.
Note: This copied logical file however does not have any association with resource files
"""
copy_of_logical_file = type(self).create()
copy_of_logical_file.dataset_name = self.dataset_name
copy_of_logical_file.metadata.extra_metadata = copy.deepcopy(self.metadata.extra_metadata)
copy_of_logical_file.metadata.keywords = self.metadata.keywords
copy_of_logical_file.metadata.save()
copy_of_logical_file.save()
# copy the metadata elements
elements_to_copy = self.metadata.get_metadata_elements()
for element in elements_to_copy:
element_args = model_to_dict(element)
element_args.pop('content_type')
element_args.pop('id')
element_args.pop('object_id')
copy_of_logical_file.metadata.create_element(element.term, **element_args)
return copy_of_logical_file
@classmethod
def compute_file_type_folder(cls, resource, file_folder, file_name):
"""
Computes the new folder path where the file type files will be stored
:param resource: an instance of BaseResource
:param file_folder: current file folder of the file which is being set to a specific file
type
:param file_name: name of the file (without extension) which is being set to a specific
file type
:return: computed new folder path
"""
current_folder_path = 'data/contents'
if file_folder is not None:
current_folder_path = os.path.join(current_folder_path, file_folder)
new_folder_path = os.path.join(current_folder_path, file_name)
# To avoid folder creation failure when there is already matching
# directory path, first check that the folder does not exist
# If folder path exists then change the folder name by adding a number
# to the end
istorage = resource.get_irods_storage()
counter = 0
while istorage.exists(os.path.join(resource.short_id, new_folder_path)):
new_file_name = file_name + "_{}".format(counter)
new_folder_path = os.path.join(current_folder_path, new_file_name)
counter += 1
return new_folder_path
def logical_delete(self, user, delete_res_files=True):
"""
Deletes the logical file as well as all resource files associated with this logical file.
This function is primarily used by the system to delete logical file object and associated
metadata as part of deleting a resource file object. Any time a request is made to
deleted a specific resource file object, if the the requested file is part of a
logical file then all files in the same logical file group will be deleted. if custom logic
requires deleting logical file object (LFO) then instead of using LFO.delete(), you must
use LFO.logical_delete()
:param delete_res_files If True all resource files that are part of this logical file will
be deleted
"""
from hs_core.hydroshare.resource import delete_resource_file
# delete all resource files associated with this instance of logical file
if delete_res_files:
for f in self.files.all():
delete_resource_file(f.resource.short_id, f.id, user,
delete_logical_file=False)
# delete logical file first then delete the associated metadata file object
# deleting the logical file object will not automatically delete the associated
# metadata file object
metadata = self.metadata if self.has_metadata else None
super(AbstractLogicalFile, self).delete()
if metadata is not None:
# this should also delete on all metadata elements that have generic relations with
# the metadata object
metadata.delete()
| bsd-3-clause |
forrestv/pyderiv | deriv.py | 1 | 5500 | # This file is part of pyderiv. http://forre.st/pyderiv
#
# pyderiv is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# at your option) any later version.
#
# pyderiv is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pyderiv. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import math
import itertools
import _deriv
class Variable(object):
def __init__(self, v):
self.v = v
def __add__(self, other):
if not isinstance(other, (Variable, int, long, float)): return NotImplemented
if isinstance(other, Variable): other = other.v
return Variable(_deriv._add(self.v, other))
__radd__ = __add__
def __sub__(self, other):
if not isinstance(other, (Variable, int, long, float)): return NotImplemented
if isinstance(other, Variable): other = other.v
return Variable(_deriv._sub(self.v, other))
def __rsub__(self, other):
if not isinstance(other, (Variable, int, long, float)): return NotImplemented
if isinstance(other, Variable): other = other.v
return Variable(_deriv._sub(other, self.v))
def __mul__(self, other):
if not isinstance(other, (Variable, int, long, float)): return NotImplemented
if isinstance(other, Variable): other = other.v
return Variable(_deriv._mul(self.v, other))
__rmul__ = __mul__
def __div__(self, other):
if not isinstance(other, (Variable, int, long, float)): return NotImplemented
if isinstance(other, Variable): other = other.v
return Variable(_deriv._div(self.v, other))
__truediv__ = __div__
def __rdiv__(self, other):
if not isinstance(other, (Variable, int, long, float)): return NotImplemented
if isinstance(other, Variable): other = other.v
return Variable(_deriv._div(other, self.v))
__rtruediv__ = __rdiv__
def __pow__(self, other):
if not isinstance(other, (Variable, int, long, float)): return NotImplemented
if isinstance(other, Variable): other = other.v
return Variable(_deriv._pow(self.v, other))
def __rpow__(self, other):
if not isinstance(other, (Variable, int, long, float)): return NotImplemented
if isinstance(other, Variable): other = other.v
return Variable(_deriv._pow(other, self.v))
def __repr__(self):
return "Variable(%r)" % (self.v,)
id_generator = itertools.count()
def varying(value, d_count=1, noise=False):
assert d_count >= 0
if d_count == 0:
return value
o = id_generator.next()
if noise:
o = ~o
v = (value, {o: 1})
p = v
for i in xrange(d_count - 1):
p[1][o] = (p[1][o], {o: 0})
p = p[1][o]
return Variable(v)
def sin(self):
if isinstance(self, Variable):
return Variable(_deriv._sin(self.v))
else:
return math.sin(self)
def cos(self):
if isinstance(self, Variable):
return Variable(_deriv._cos(self.v))
else:
return math.cos(self)
def log(self):
if isinstance(self, Variable):
return Variable(_deriv.log(self.v))
else:
return math.log(self)
def v(self):
if not isinstance(self, Variable):
return self
return _deriv._v(self.v)
def d(self, *others):
if not isinstance(self, Variable):
return 0
for other in others:
assert isinstance(other, Variable)
assert len(other.v[1]) == 1
self = Variable(_deriv._d(self.v).get(other.v[1].keys()[0], 0))
return self
def get_matrix(output, input, n=1):
return [[v(d(y, *(x,)*n)) for x in input] for y in output]
def matrix_wrapper(n):
def b(f):
def a(args, *extra_args, **extra_kwargs):
args2 = [varying(x) for x in args]
result2 = f(args2, *extra_args, **extra_kwargs)
result = result2[0]
return (([v(x) for x in result], get_matrix(result, args2, n)),) + tuple(result2[1:])
return a
return b
jacobian_decorator = matrix_wrapper(1)
hessian_decorator = matrix_wrapper(2)
if __name__ == "__main__":
w, x, y, z = [varying(n) for n in [3, 4, 5, 6]]
#q = w / x + y * z + 1 / w
q = \
(w + 4) + (4 + w) + (w + x) + (x + w) + \
(w - 4) + (4 - w) + (w - x) + (x - w) + \
(w * 4) + (4 * w) + (w * x) + (x * w) + \
(w / 4) + (4 / w) + (w / x) + (x / w) + \
(w **4) + (4 **w) + (w **x) + (x **w) + \
0
#q = w ** x
print q
print "q", v(q)
print "dq/dw", v(d(q, w))
print "dq/dx", v(d(q, x))
print "dq/dy", v(d(q, y))
print "dq/dz", v(d(q, z))
print "ddq/dw", v(d(q, w, w))
print "ddq/dx", v(d(q, x, x))
print "ddq/dy", v(d(q, y, y))
print "ddq/dz", v(d(q, z, z))
def f((a, b, c)):
return ((a + b + c, a * b * c, a * b + c, a + b * c, a * a + b * b + c * c), 5)
j = jacobian_decorator(f)
h = hessian_decorator(f)
print f((1., 2., 3.))
print j((1., 2., 3.))
print h((1., 2., 3.))
| gpl-3.0 |
David-Amaro/bank-payment | __unported__/account_banking/migrations/6.1.0.1.81/post-set-statement-line-state.py | 14 | 1439 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2011 Therp BV (<http://therp.nl>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
""" r81: introduction of bank statement line state
"""
__name__ = ("account.bank.statement.line:: set new field 'state' to "
"confirmed for all statement lines belonging to confirmed "
"statements")
def migrate(cr, version):
cr.execute("UPDATE account_bank_statement_line as sl "
" SET state = 'confirmed'"
" FROM account_bank_statement as s "
" WHERE sl.statement_id = s.id "
" AND s.state = 'confirm' "
)
| agpl-3.0 |
chrta/canfestival-3-ct | objdictgen/eds_utils.py | 2 | 40678 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#This file is part of CanFestival, a library implementing CanOpen Stack.
#
#Copyright (C): Edouard TISSERANT, Francis DUPIN and Laurent BESSARD
#
#See COPYING file for copyrights details.
#
#This library is free software; you can redistribute it and/or
#modify it under the terms of the GNU Lesser General Public
#License as published by the Free Software Foundation; either
#version 2.1 of the License, or (at your option) any later version.
#
#This library is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
#Lesser General Public License for more details.
#
#You should have received a copy of the GNU Lesser General Public
#License along with this library; if not, write to the Free Software
#Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
import node
from node import nosub, var, array, rec, plurivar, pluriarray, plurirec
try:
set
except NameError:
from sets import Set as set
from types import *
from time import *
import os,re
# Regular expression for finding index section names
index_model = re.compile('([0-9A-F]{1,4}$)')
# Regular expression for finding subindex section names
subindex_model = re.compile('([0-9A-F]{1,4})SUB([0-9A-F]{1,2}$)')
# Regular expression for finding index section names
index_objectlinks_model = re.compile('([0-9A-F]{1,4}OBJECTLINKS$)')
# Regular expression for finding NodeXPresent keynames
nodepresent_model = re.compile('NODE([0-9]{1,3})PRESENT$')
# Regular expression for finding NodeXName keynames
nodename_model = re.compile('NODE([0-9]{1,3})NAME$')
# Regular expression for finding NodeXDCFName keynames
nodedcfname_model = re.compile('NODE([0-9]{1,3})DCFNAME$')
# Dictionary for quickly translate boolean into integer value
BOOL_TRANSLATE = {True : "1", False : "0"}
# Dictionary for quickly translate eds access value into canfestival access value
ACCESS_TRANSLATE = {"RO" : "ro", "WO" : "wo", "RW" : "rw", "RWR" : "rw", "RWW" : "rw", "CONST" : "ro"}
# Function for verifying data values
is_integer = lambda x: type(x) in (IntType, LongType)
is_string = lambda x: type(x) in (StringType, UnicodeType)
is_boolean = lambda x: x in (0, 1)
# Define checking of value for each attribute
ENTRY_ATTRIBUTES = {"SUBNUMBER" : is_integer,
"PARAMETERNAME" : is_string,
"OBJECTTYPE" : lambda x: x in (2, 7, 8, 9),
"DATATYPE" : is_integer,
"LOWLIMIT" : is_integer,
"HIGHLIMIT" : is_integer,
"ACCESSTYPE" : lambda x: x.upper() in ACCESS_TRANSLATE.keys(),
"DEFAULTVALUE" : lambda x: True,
"PDOMAPPING" : is_boolean,
"OBJFLAGS" : is_integer,
"PARAMETERVALUE" : lambda x: True,
"UPLOADFILE" : is_string,
"DOWNLOADFILE" : is_string}
# Define entry parameters by entry ObjectType number
ENTRY_TYPES = {2 : {"name" : " DOMAIN",
"require" : ["PARAMETERNAME", "OBJECTTYPE"],
"optional" : ["DATATYPE", "ACCESSTYPE", "DEFAULTVALUE", "OBJFLAGS"]},
7 : {"name" : " VAR",
"require" : ["PARAMETERNAME", "DATATYPE", "ACCESSTYPE"],
"optional" : ["OBJECTTYPE", "DEFAULTVALUE", "PDOMAPPING", "LOWLIMIT", "HIGHLIMIT", "OBJFLAGS", "PARAMETERVALUE"]},
8 : {"name" : "n ARRAY",
"require" : ["PARAMETERNAME", "OBJECTTYPE", "SUBNUMBER"],
"optional" : ["OBJFLAGS"]},
9 : {"name" : " RECORD",
"require" : ["PARAMETERNAME", "OBJECTTYPE", "SUBNUMBER"],
"optional" : ["OBJFLAGS"]}}
# Function that search into Node Mappings the informations about an index or a subindex
# and return the default value
def GetDefaultValue(Node, index, subIndex = None):
infos = Node.GetEntryInfos(index)
if infos["struct"] & node.OD_MultipleSubindexes:
# First case entry is a record
if infos["struct"] & node.OD_IdenticalSubindexes:
subentry_infos = Node.GetSubentryInfos(index, 1)
# Second case entry is an array
else:
subentry_infos = Node.GetSubentryInfos(index, subIndex)
# If a default value is defined for this subindex, returns it
if "default" in subentry_infos:
return subentry_infos["default"]
# If not, returns the default value for the subindex type
else:
return Node.GetTypeDefaultValue(subentry_infos["type"])
# Third case entry is a var
else:
subentry_infos = Node.GetSubentryInfos(index, 0)
# If a default value is defined for this subindex, returns it
if "default" in subentry_infos:
return subentry_infos["default"]
# If not, returns the default value for the subindex type
else:
return Node.GetTypeDefaultValue(subentry_infos["type"])
return None
#-------------------------------------------------------------------------------
# Parse file
#-------------------------------------------------------------------------------
# List of section names that are not index and subindex and that we can meet in
# an EDS file
SECTION_KEYNAMES = ["FILEINFO", "DEVICEINFO", "DUMMYUSAGE", "COMMENTS",
"MANDATORYOBJECTS", "OPTIONALOBJECTS", "MANUFACTUREROBJECTS",
"STANDARDDATATYPES", "SUPPORTEDMODULES"]
# Function that extract sections from a file and returns a dictionary of the informations
def ExtractSections(file):
return [(blocktuple[0], # EntryName : Assignements dict
blocktuple[-1].splitlines()) # all the lines
for blocktuple in [ # Split the eds files into
block.split("]", 1) # (EntryName,Assignements) tuple
for block in # for each blocks staring with '['
("\n"+file).split("\n[")]
if blocktuple[0].isalnum()] # if EntryName exists
# Function that parse an CPJ file and returns a dictionary of the informations
def ParseCPJFile(filepath):
networks = []
# Read file text
cpj_file = open(filepath,'r').read()
sections = ExtractSections(cpj_file)
# Parse assignments for each section
for section_name, assignments in sections:
# Verify that section name is TOPOLOGY
if section_name.upper() in "TOPOLOGY":
# Reset values for topology
topology = {"Name" : "", "Nodes" : {}}
for assignment in assignments:
# Escape any comment
if assignment.startswith(";"):
pass
# Verify that line is a valid assignment
elif assignment.find('=') > 0:
# Split assignment into the two values keyname and value
keyname, value = assignment.split("=", 1)
# keyname must be immediately followed by the "=" sign, so we
# verify that there is no whitespace into keyname
if keyname.isalnum():
# value can be preceded and followed by whitespaces, so we escape them
value = value.strip()
# First case, value starts with "0x" or "-0x", then it's an hexadecimal value
if value.startswith("0x") or value.startswith("-0x"):
try:
computed_value = int(value, 16)
except:
raise SyntaxError, _("\"%s\" is not a valid value for attribute \"%s\" of section \"[%s]\"")%(value, keyname, section_name)
elif value.isdigit() or value.startswith("-") and value[1:].isdigit():
# Second case, value is a number and starts with "0" or "-0", then it's an octal value
if value.startswith("0") or value.startswith("-0"):
computed_value = int(value, 8)
# Third case, value is a number and don't start with "0", then it's a decimal value
else:
computed_value = int(value)
# In any other case, we keep string value
else:
computed_value = value
# Search if the section name match any cpj expression
nodepresent_result = nodepresent_model.match(keyname.upper())
nodename_result = nodename_model.match(keyname.upper())
nodedcfname_result = nodedcfname_model.match(keyname.upper())
if keyname.upper() == "NETNAME":
if not is_string(computed_value):
raise SyntaxError, _("Invalid value \"%s\" for keyname \"%s\" of section \"[%s]\"")%(value, keyname, section_name)
topology["Name"] = computed_value
elif keyname.upper() == "NODES":
if not is_integer(computed_value):
raise SyntaxError, _("Invalid value \"%s\" for keyname \"%s\" of section \"[%s]\"")%(value, keyname, section_name)
topology["Number"] = computed_value
elif keyname.upper() == "EDSBASENAME":
if not is_string(computed_value):
raise SyntaxError, _("Invalid value \"%s\" for keyname \"%s\" of section \"[%s]\"")%(value, keyname, section_name)
topology["Path"] = computed_value
elif nodepresent_result:
if not is_boolean(computed_value):
raise SyntaxError, _("Invalid value \"%s\" for keyname \"%s\" of section \"[%s]\"")%(value, keyname, section_name)
nodeid = int(nodepresent_result.groups()[0])
if nodeid not in topology["Nodes"].keys():
topology["Nodes"][nodeid] = {}
topology["Nodes"][nodeid]["Present"] = computed_value
elif nodename_result:
if not is_string(value):
raise SyntaxError, _("Invalid value \"%s\" for keyname \"%s\" of section \"[%s]\"")%(value, keyname, section_name)
nodeid = int(nodename_result.groups()[0])
if nodeid not in topology["Nodes"].keys():
topology["Nodes"][nodeid] = {}
topology["Nodes"][nodeid]["Name"] = computed_value
elif nodedcfname_result:
if not is_string(computed_value):
raise SyntaxError, _("Invalid value \"%s\" for keyname \"%s\" of section \"[%s]\"")%(value, keyname, section_name)
nodeid = int(nodedcfname_result.groups()[0])
if nodeid not in topology["Nodes"].keys():
topology["Nodes"][nodeid] = {}
topology["Nodes"][nodeid]["DCFName"] = computed_value
else:
raise SyntaxError, _("Keyname \"%s\" not recognised for section \"[%s]\"")%(keyname, section_name)
# All lines that are not empty and are neither a comment neither not a valid assignment
elif assignment.strip() != "":
raise SyntaxError, _("\"%s\" is not a valid CPJ line")%assignment.strip()
if "Number" not in topology.keys():
raise SyntaxError, _("\"Nodes\" keyname in \"[%s]\" section is missing")%section_name
if topology["Number"] != len(topology["Nodes"]):
raise SyntaxError, _("\"Nodes\" value not corresponding to number of nodes defined")
for nodeid, node in topology["Nodes"].items():
if "Present" not in node.keys():
raise SyntaxError, _("\"Node%dPresent\" keyname in \"[%s]\" section is missing")%(nodeid, section_name)
networks.append(topology)
# In other case, there is a syntax problem into CPJ file
else:
raise SyntaxError, _("Section \"[%s]\" is unrecognized")%section_name
return networks
# Function that parse an EDS file and returns a dictionary of the informations
def ParseEDSFile(filepath):
eds_dict = {}
# Read file text
eds_file = open(filepath,'r').read()
sections = ExtractSections(eds_file)
# Parse assignments for each section
for section_name, assignments in sections:
# Reset values of entry
values = {}
# Search if the section name match an index or subindex expression
index_result = index_model.match(section_name.upper())
subindex_result = subindex_model.match(section_name.upper())
index_objectlinks_result = index_objectlinks_model.match(section_name.upper())
# Compilation of the EDS information dictionary
is_entry = False
# First case, section name is in SECTION_KEYNAMES
if section_name.upper() in SECTION_KEYNAMES:
# Verify that entry is not already defined
if section_name.upper() not in eds_dict:
eds_dict[section_name.upper()] = values
else:
raise SyntaxError, _("\"[%s]\" section is defined two times")%section_name
# Second case, section name is an index name
elif index_result:
# Extract index number
index = int(index_result.groups()[0], 16)
# If index hasn't been referenced before, we add an entry into the dictionary
if index not in eds_dict:
eds_dict[index] = values
eds_dict[index]["subindexes"] = {}
elif eds_dict[index].keys() == ["subindexes"]:
values["subindexes"] = eds_dict[index]["subindexes"]
eds_dict[index] = values
else:
raise SyntaxError, _("\"[%s]\" section is defined two times")%section_name
is_entry = True
# Third case, section name is a subindex name
elif subindex_result:
# Extract index and subindex number
index, subindex = [int(value, 16) for value in subindex_result.groups()]
# If index hasn't been referenced before, we add an entry into the dictionary
# that will be updated later
if index not in eds_dict:
eds_dict[index] = {"subindexes" : {}}
if subindex not in eds_dict[index]["subindexes"]:
eds_dict[index]["subindexes"][subindex] = values
else:
raise SyntaxError, _("\"[%s]\" section is defined two times")%section_name
is_entry = True
# Third case, section name is a subindex name
elif index_objectlinks_result:
pass
# In any other case, there is a syntax problem into EDS file
else:
raise SyntaxError, _("Section \"[%s]\" is unrecognized")%section_name
for assignment in assignments:
# Escape any comment
if assignment.startswith(";"):
pass
# Verify that line is a valid assignment
elif assignment.find('=') > 0:
# Split assignment into the two values keyname and value
keyname, value = assignment.split("=", 1)
# keyname must be immediately followed by the "=" sign, so we
# verify that there is no whitespace into keyname
if keyname.isalnum():
# value can be preceded and followed by whitespaces, so we escape them
value = value.strip().replace(" ", "")
# First case, value starts with "$NODEID", then it's a formula
if value.upper().startswith("$NODEID"):
try:
test = int(value.upper().replace("$NODEID+", ""), 16)
computed_value = "\"%s\""%value
except:
raise SyntaxError, _("\"%s\" is not a valid formula for attribute \"%s\" of section \"[%s]\"")%(value, keyname, section_name)
# Second case, value starts with "0x", then it's an hexadecimal value
elif value.startswith("0x") or value.startswith("-0x"):
try:
computed_value = int(value, 16)
except:
raise SyntaxError, _("\"%s\" is not a valid value for attribute \"%s\" of section \"[%s]\"")%(value, keyname, section_name)
elif value.isdigit() or value.startswith("-") and value[1:].isdigit():
# Third case, value is a number and starts with "0", then it's an octal value
if value.startswith("0") or value.startswith("-0"):
computed_value = int(value, 8)
# Forth case, value is a number and don't start with "0", then it's a decimal value
else:
computed_value = int(value)
# In any other case, we keep string value
else:
computed_value = value
# Add value to values dictionary
if computed_value != "":
# If entry is an index or a subindex
if is_entry:
# Verify that keyname is a possible attribute
if keyname.upper() not in ENTRY_ATTRIBUTES:
raise SyntaxError, _("Keyname \"%s\" not recognised for section \"[%s]\"")%(keyname, section_name)
# Verify that value is valid
elif not ENTRY_ATTRIBUTES[keyname.upper()](computed_value):
raise SyntaxError, _("Invalid value \"%s\" for keyname \"%s\" of section \"[%s]\"")%(value, keyname, section_name)
else:
values[keyname.upper()] = computed_value
else:
values[keyname.upper()] = computed_value
# All lines that are not empty and are neither a comment neither not a valid assignment
elif assignment.strip() != "":
raise SyntaxError, _("\"%s\" is not a valid EDS line")%assignment.strip()
# If entry is an index or a subindex
if is_entry:
# Verify that entry has an ObjectType
values["OBJECTTYPE"] = values.get("OBJECTTYPE", 7)
# Extract parameters defined
keys = set(values.keys())
keys.discard("subindexes")
# Extract possible parameters and parameters required
possible = set(ENTRY_TYPES[values["OBJECTTYPE"]]["require"] +
ENTRY_TYPES[values["OBJECTTYPE"]]["optional"])
required = set(ENTRY_TYPES[values["OBJECTTYPE"]]["require"])
# Verify that parameters defined contains all the parameters required
if not keys.issuperset(required):
missing = required.difference(keys)._data.keys()
if len(missing) > 1:
attributes = _("Attributes %s are")%_(", ").join(["\"%s\""%attribute for attribute in missing])
else:
attributes = _("Attribute \"%s\" is")%missing[0]
raise SyntaxError, _("Error on section \"[%s]\":\n%s required for a %s entry")%(section_name, attributes, ENTRY_TYPES[values["OBJECTTYPE"]]["name"])
# Verify that parameters defined are all in the possible parameters
if not keys.issubset(possible):
unsupported = keys.difference(possible)._data.keys()
if len(unsupported) > 1:
attributes = _("Attributes %s are")%_(", ").join(["\"%s\""%attribute for attribute in unsupported])
else:
attributes = _("Attribute \"%s\" is")%unsupported[0]
raise SyntaxError, _("Error on section \"[%s]\":\n%s unsupported for a %s entry")%(section_name, attributes, ENTRY_TYPES[values["OBJECTTYPE"]]["name"])
VerifyValue(values, section_name, "ParameterValue")
VerifyValue(values, section_name, "DefaultValue")
return eds_dict
def VerifyValue(values, section_name, param):
if param.upper() in values:
try:
if values["DATATYPE"] in (0x09, 0x0A, 0x0B, 0x0F):
values[param.upper()] = str(values[param.upper()])
elif values["DATATYPE"] in (0x08, 0x11):
values[param.upper()] = float(values[param.upper()])
elif values["DATATYPE"] == 0x01:
values[param.upper()] = {0 : False, 1 : True}[values[param.upper()]]
else:
if not isinstance(values[param.upper()], (IntType, LongType)) and values[param.upper()].upper().find("$NODEID") == -1:
raise
except:
raise SyntaxError, _("Error on section \"[%s]\":\n%s incompatible with DataType")%(section_name, param)
# Function that write an EDS file after generate it's content
def WriteFile(filepath, content):
# Open file in write mode
cfile = open(filepath,"w")
# Write content
cfile.write(content)
# Close file
cfile.close()
# Function that generate the EDS file content for the current node in the manager
def GenerateFileContent(Node, filepath):
# Dictionary of each index contents
indexContents = {}
# Extract local time
current_time = localtime()
# Extract node informations
nodename = Node.GetNodeName()
nodeid = Node.GetNodeID()
nodetype = Node.GetNodeType()
description = Node.GetNodeDescription()
# Retreiving lists of indexes defined
entries = Node.GetIndexes()
# Generate FileInfo section
fileContent = "[FileInfo]\n"
fileContent += "FileName=%s\n"%os.path.split(filepath)[-1]
fileContent += "FileVersion=1\n"
fileContent += "FileRevision=1\n"
fileContent += "EDSVersion=4.0\n"
fileContent += "Description=%s\n"%description
fileContent += "CreationTime=%s"%strftime("%I:%M", current_time)
# %p option of strftime seems not working, then generate AM/PM by hands
if strftime("%I", current_time) == strftime("%H", current_time):
fileContent += "AM\n"
else:
fileContent += "PM\n"
fileContent += "CreationDate=%s\n"%strftime("%m-%d-%Y", current_time)
fileContent += "CreatedBy=CANFestival\n"
fileContent += "ModificationTime=%s"%strftime("%I:%M", current_time)
# %p option of strftime seems not working, then generate AM/PM by hands
if strftime("%I", current_time) == strftime("%H", current_time):
fileContent += "AM\n"
else:
fileContent += "PM\n"
fileContent += "ModificationDate=%s\n"%strftime("%m-%d-%Y", current_time)
fileContent += "ModifiedBy=CANFestival\n"
# Generate DeviceInfo section
fileContent += "\n[DeviceInfo]\n"
fileContent += "VendorName=CANFestival\n"
# Use information typed by user in Identity entry
fileContent += "VendorNumber=0x%8.8X\n"%Node.GetEntry(0x1018, 1)
fileContent += "ProductName=%s\n"%nodename
fileContent += "ProductNumber=0x%8.8X\n"%Node.GetEntry(0x1018, 2)
fileContent += "RevisionNumber=0x%8.8X\n"%Node.GetEntry(0x1018, 3)
# CANFestival support all baudrates as soon as driver choosen support them
fileContent += "BaudRate_10=1\n"
fileContent += "BaudRate_20=1\n"
fileContent += "BaudRate_50=1\n"
fileContent += "BaudRate_125=1\n"
fileContent += "BaudRate_250=1\n"
fileContent += "BaudRate_500=1\n"
fileContent += "BaudRate_800=1\n"
fileContent += "BaudRate_1000=1\n"
# Select BootUp type from the informations given by user
fileContent += "SimpleBootUpMaster=%s\n"%BOOL_TRANSLATE[nodetype == "master"]
fileContent += "SimpleBootUpSlave=%s\n"%BOOL_TRANSLATE[nodetype == "slave"]
# CANFestival characteristics
fileContent += "Granularity=8\n"
fileContent += "DynamicChannelsSupported=0\n"
fileContent += "CompactPDO=0\n"
fileContent += "GroupMessaging=0\n"
# Calculate receive and tranmit PDO numbers with the entry available
fileContent += "NrOfRXPDO=%d\n"%len([idx for idx in entries if 0x1400 <= idx <= 0x15FF])
fileContent += "NrOfTXPDO=%d\n"%len([idx for idx in entries if 0x1800 <= idx <= 0x19FF])
# LSS not supported as soon as DS-302 was not fully implemented
fileContent += "LSS_Supported=0\n"
# Generate Dummy Usage section
fileContent += "\n[DummyUsage]\n"
fileContent += "Dummy0001=0\n"
fileContent += "Dummy0002=1\n"
fileContent += "Dummy0003=1\n"
fileContent += "Dummy0004=1\n"
fileContent += "Dummy0005=1\n"
fileContent += "Dummy0006=1\n"
fileContent += "Dummy0007=1\n"
# Generate Comments section
fileContent += "\n[Comments]\n"
fileContent += "Lines=0\n"
# List of entry by type (Mandatory, Optional or Manufacturer
mandatories = []
optionals = []
manufacturers = []
# Remove all unused PDO
## for entry in entries[:]:
## if 0x1600 <= entry < 0x1800 or 0x1A00 <= entry < 0x1C00:
## subentry_value = Node.GetEntry(entry, 1)
## if subentry_value is None or subentry_value == 0:
## entries.remove(entry)
## entries.remove(entry - 0x200)
# For each entry, we generate the entry section or sections if there is subindexes
for entry in entries:
# Extract infos and values for the entry
entry_infos = Node.GetEntryInfos(entry)
values = Node.GetEntry(entry, compute = False)
# Define section name
text = "\n[%X]\n"%entry
# If there is only one value, it's a VAR entry
if type(values) != ListType:
# Extract the informations of the first subindex
subentry_infos = Node.GetSubentryInfos(entry, 0)
# Generate EDS informations for the entry
text += "ParameterName=%s\n"%subentry_infos["name"]
text += "ObjectType=0x7\n"
text += "DataType=0x%4.4X\n"%subentry_infos["type"]
text += "AccessType=%s\n"%subentry_infos["access"]
if subentry_infos["type"] == 1:
text += "DefaultValue=%s\n"%BOOL_TRANSLATE[values]
else:
text += "DefaultValue=%s\n"%values
text += "PDOMapping=%s\n"%BOOL_TRANSLATE[subentry_infos["pdo"]]
else:
# Generate EDS informations for the entry
text += "ParameterName=%s\n"%entry_infos["name"]
if entry_infos["struct"] & node.OD_IdenticalSubindexes:
text += "ObjectType=0x9\n"
else:
text += "ObjectType=0x8\n"
# Generate EDS informations for subindexes of the entry in a separate text
subtext = ""
# Reset number of subindex defined
nb_subentry = 0
for subentry, value in enumerate(values):
# Extract the informations of each subindex
subentry_infos = Node.GetSubentryInfos(entry, subentry)
# If entry is not for the compatibility, generate informations for subindex
if subentry_infos["name"] != "Compatibility Entry":
subtext += "\n[%Xsub%X]\n"%(entry, subentry)
subtext += "ParameterName=%s\n"%subentry_infos["name"]
subtext += "ObjectType=0x7\n"
subtext += "DataType=0x%4.4X\n"%subentry_infos["type"]
subtext += "AccessType=%s\n"%subentry_infos["access"]
if subentry_infos["type"] == 1:
subtext += "DefaultValue=%s\n"%BOOL_TRANSLATE[value]
else:
subtext += "DefaultValue=%s\n"%value
subtext += "PDOMapping=%s\n"%BOOL_TRANSLATE[subentry_infos["pdo"]]
# Increment number of subindex defined
nb_subentry += 1
# Write number of subindex defined for the entry
text += "SubNumber=%d\n"%nb_subentry
# Write subindex definitions
text += subtext
# Then we add the entry in the right list
# First case, entry is between 0x2000 and 0x5FFF, then it's a manufacturer entry
if 0x2000 <= entry <= 0x5FFF:
manufacturers.append(entry)
# Second case, entry is required, then it's a mandatory entry
elif entry_infos["need"]:
mandatories.append(entry)
# In any other case, it's an optional entry
else:
optionals.append(entry)
# Save text of the entry in the dictiionary of contents
indexContents[entry] = text
# Before generate File Content we sort the entry list
manufacturers.sort()
mandatories.sort()
optionals.sort()
# Generate Definition of mandatory objects
fileContent += "\n[MandatoryObjects]\n"
fileContent += "SupportedObjects=%d\n"%len(mandatories)
for idx, entry in enumerate(mandatories):
fileContent += "%d=0x%4.4X\n"%(idx + 1, entry)
# Write mandatory entries
for entry in mandatories:
fileContent += indexContents[entry]
# Generate Definition of optional objects
fileContent += "\n[OptionalObjects]\n"
fileContent += "SupportedObjects=%d\n"%len(optionals)
for idx, entry in enumerate(optionals):
fileContent += "%d=0x%4.4X\n"%(idx + 1, entry)
# Write optional entries
for entry in optionals:
fileContent += indexContents[entry]
# Generate Definition of manufacturer objects
fileContent += "\n[ManufacturerObjects]\n"
fileContent += "SupportedObjects=%d\n"%len(manufacturers)
for idx, entry in enumerate(manufacturers):
fileContent += "%d=0x%4.4X\n"%(idx + 1, entry)
# Write manufacturer entries
for entry in manufacturers:
fileContent += indexContents[entry]
# Return File Content
return fileContent
# Function that generates EDS file from current node edited
def GenerateEDSFile(filepath, node):
try:
# Generate file content
content = GenerateFileContent(node, filepath)
# Write file
WriteFile(filepath, content)
return None
except ValueError, message:
return _("Unable to generate EDS file\n%s")%message
# Function that generate the CPJ file content for the nodelist
def GenerateCPJContent(nodelist):
nodes = nodelist.SlaveNodes.keys()
nodes.sort()
fileContent = "[TOPOLOGY]\n"
fileContent += "NetName=%s\n"%nodelist.GetNetworkName()
fileContent += "Nodes=0x%2.2X\n"%len(nodes)
for nodeid in nodes:
fileContent += "Node%dPresent=0x01\n"%nodeid
fileContent += "Node%dName=%s\n"%(nodeid, nodelist.SlaveNodes[nodeid]["Name"])
fileContent += "Node%dDCFName=%s\n"%(nodeid, nodelist.SlaveNodes[nodeid]["EDS"])
fileContent += "EDSBaseName=eds\n"
return fileContent
# Function that generates Node from an EDS file
def GenerateNode(filepath, nodeID = 0):
# Create a new node
Node = node.Node(id = nodeID)
try:
# Parse file and extract dictionary of EDS entry
eds_dict = ParseEDSFile(filepath)
# Extract Profile Number from Device Type entry
ProfileNb = eds_dict[0x1000].get("DEFAULTVALUE", 0) & 0x0000ffff
# If profile is not DS-301 or DS-302
if ProfileNb not in [0, 301, 302]:
# Compile Profile name and path to .prf file
ProfileName = "DS-%d"%ProfileNb
ProfilePath = os.path.join(os.path.split(__file__)[0], "config/%s.prf"%ProfileName)
# Verify that profile is available
if os.path.isfile(ProfilePath):
try:
# Load Profile
execfile(ProfilePath)
Node.SetProfileName(ProfileName)
Node.SetProfile(Mapping)
Node.SetSpecificMenu(AddMenuEntries)
except:
pass
# Read all entries in the EDS dictionary
for entry, values in eds_dict.iteritems():
# All sections with a name in keynames are escaped
if entry in SECTION_KEYNAMES:
pass
else:
# Extract informations for the entry
entry_infos = Node.GetEntryInfos(entry)
# If no informations are available, then we write them
if not entry_infos:
# First case, entry is a DOMAIN or VAR
if values["OBJECTTYPE"] in [2, 7]:
if values["OBJECTTYPE"] == 2:
values["DATATYPE"] = values.get("DATATYPE", 0xF)
if values["DATATYPE"] != 0xF:
raise SyntaxError, _("Domain entry 0x%4.4X DataType must be 0xF(DOMAIN) if defined")%entry
# Add mapping for entry
Node.AddMappingEntry(entry, name = values["PARAMETERNAME"], struct = 1)
# Add mapping for first subindex
Node.AddMappingEntry(entry, 0, values = {"name" : values["PARAMETERNAME"],
"type" : values["DATATYPE"],
"access" : ACCESS_TRANSLATE[values["ACCESSTYPE"].upper()],
"pdo" : values.get("PDOMAPPING", 0) == 1})
# Second case, entry is an ARRAY or RECORD
elif values["OBJECTTYPE"] in [8, 9]:
# Extract maximum subindex number defined
max_subindex = max(values["subindexes"].keys())
# Add mapping for entry
Node.AddMappingEntry(entry, name = values["PARAMETERNAME"], struct = 3)
# Add mapping for first subindex
Node.AddMappingEntry(entry, 0, values = {"name" : "Number of Entries", "type" : 0x05, "access" : "ro", "pdo" : False})
# Add mapping for other subindexes
for subindex in xrange(1, int(max_subindex) + 1):
# if subindex is defined
if subindex in values["subindexes"]:
Node.AddMappingEntry(entry, subindex, values = {"name" : values["subindexes"][subindex]["PARAMETERNAME"],
"type" : values["subindexes"][subindex]["DATATYPE"],
"access" : ACCESS_TRANSLATE[values["subindexes"][subindex]["ACCESSTYPE"].upper()],
"pdo" : values["subindexes"][subindex].get("PDOMAPPING", 0) == 1})
# if not, we add a mapping for compatibility
else:
Node.AddMappingEntry(entry, subindex, values = {"name" : "Compatibility Entry", "type" : 0x05, "access" : "rw", "pdo" : False})
## # Third case, entry is an RECORD
## elif values["OBJECTTYPE"] == 9:
## # Verify that the first subindex is defined
## if 0 not in values["subindexes"]:
## raise SyntaxError, "Error on entry 0x%4.4X:\nSubindex 0 must be defined for a RECORD entry"%entry
## # Add mapping for entry
## Node.AddMappingEntry(entry, name = values["PARAMETERNAME"], struct = 7)
## # Add mapping for first subindex
## Node.AddMappingEntry(entry, 0, values = {"name" : "Number of Entries", "type" : 0x05, "access" : "ro", "pdo" : False})
## # Verify that second subindex is defined
## if 1 in values["subindexes"]:
## Node.AddMappingEntry(entry, 1, values = {"name" : values["PARAMETERNAME"] + " %d[(sub)]",
## "type" : values["subindexes"][1]["DATATYPE"],
## "access" : ACCESS_TRANSLATE[values["subindexes"][1]["ACCESSTYPE"].upper()],
## "pdo" : values["subindexes"][1].get("PDOMAPPING", 0) == 1,
## "nbmax" : 0xFE})
## else:
## raise SyntaxError, "Error on entry 0x%4.4X:\nA RECORD entry must have at least 2 subindexes"%entry
# Define entry for the new node
# First case, entry is a DOMAIN or VAR
if values["OBJECTTYPE"] in [2, 7]:
# Take default value if it is defined
if "PARAMETERVALUE" in values:
value = values["PARAMETERVALUE"]
elif "DEFAULTVALUE" in values:
value = values["DEFAULTVALUE"]
# Find default value for value type of the entry
else:
value = GetDefaultValue(Node, entry)
Node.AddEntry(entry, 0, value)
# Second case, entry is an ARRAY or a RECORD
elif values["OBJECTTYPE"] in [8, 9]:
# Verify that "Subnumber" attribute is defined and has a valid value
if "SUBNUMBER" in values and values["SUBNUMBER"] > 0:
# Extract maximum subindex number defined
max_subindex = max(values["subindexes"].keys())
Node.AddEntry(entry, value = [])
# Define value for all subindexes except the first
for subindex in xrange(1, int(max_subindex) + 1):
# Take default value if it is defined and entry is defined
if subindex in values["subindexes"] and "PARAMETERVALUE" in values["subindexes"][subindex]:
value = values["subindexes"][subindex]["PARAMETERVALUE"]
elif subindex in values["subindexes"] and "DEFAULTVALUE" in values["subindexes"][subindex]:
value = values["subindexes"][subindex]["DEFAULTVALUE"]
# Find default value for value type of the subindex
else:
value = GetDefaultValue(Node, entry, subindex)
Node.AddEntry(entry, subindex, value)
else:
raise SyntaxError, _("Array or Record entry 0x%4.4X must have a \"SubNumber\" attribute")%entry
return Node
except SyntaxError, message:
return _("Unable to import EDS file\n%s")%message
#-------------------------------------------------------------------------------
# Main Function
#-------------------------------------------------------------------------------
if __name__ == '__main__':
print ParseEDSFile("examples/PEAK MicroMod.eds")
| lgpl-2.1 |
ZirkCoin/ZirkCoin | share/qt/extract_strings_qt.py | 1294 | 1784 | #!/usr/bin/python
'''
Extract _("...") strings for translation and convert to Qt4 stringdefs so that
they can be picked up by Qt linguist.
'''
from subprocess import Popen, PIPE
import glob
OUT_CPP="src/qt/bitcoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = glob.glob('src/*.cpp') + glob.glob('src/*.h')
# xgettext -n --keyword=_ $FILES
child = Popen(['xgettext','--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out)
f = open(OUT_CPP, 'w')
f.write("""#include <QtGlobal>
// Automatically generated by extract_strings.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *bitcoin_strings[] = {')
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("bitcoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};')
f.close()
| mit |
iwarobots/aerodynamics | src/test_section.py | 1 | 1447 | #!/usr/bin/env python
from __future__ import absolute_import, division
from common import Model
# TODO: Refinement needed.
class TestSection(Model):
def __init__(self,
in_mach,
in_area,
in_p,
in_t,
p01,
t01,
z_len,
t_len):
self._in_mach = in_mach
self._in_area = in_area
self._in_p = in_p
self._in_t = in_t
self._p01 = p01
self._t01 = t01
self._z_len = z_len
self._t_len = t_len
@property
def t_len(self):
return self._t_len
@property
def in_mach(self):
return self._in_mach
@property
def in_area(self):
return self._in_area
@property
def in_p(self):
return self._in_p
@property
def in_t(self):
return self._in_t
@property
def p01(self):
return self._p01
@property
def p02(self):
return self.p01
def x2a(self, x):
return self._in_area
def x2y(self, x):
return self.x2a(x) / self._z_len / 2
def x2m(self, x):
return self.in_mach
def x2p(self, x):
return self.in_p / self.p01
@property
def t01(self):
return self._t01
def x2t(self, x):
return self.in_t / self.t01
def x2rho(self, x):
return self.x2p(x) / (self.x2t(x)) ** -1
| apache-2.0 |
CeltonMcGrath/TACTIC | 3rd_party/CherryPy/cherrypy/tutorial/tut06_default_method.py | 9 | 2118 | """
Tutorial - The default method
Request handler objects can implement a method called "default" that
is called when no other suitable method/object could be found.
Essentially, if CherryPy2 can't find a matching request handler object
for the given request URI, it will use the default method of the object
located deepest on the URI path.
Using this mechanism you can easily simulate virtual URI structures
by parsing the extra URI string, which you can access through
cherrypy.request.virtualPath.
The application in this tutorial simulates an URI structure looking
like /users/<username>. Since the <username> bit will not be found (as
there are no matching methods), it is handled by the default method.
"""
import cherrypy
class UsersPage:
def index(self):
# Since this is just a stupid little example, we'll simply
# display a list of links to random, made-up users. In a real
# application, this could be generated from a database result set.
return '''
<a href="./remi">Remi Delon</a><br/>
<a href="./hendrik">Hendrik Mans</a><br/>
<a href="./lorenzo">Lorenzo Lamas</a><br/>
'''
index.exposed = True
def default(self, user):
# Here we react depending on the virtualPath -- the part of the
# path that could not be mapped to an object method. In a real
# application, we would probably do some database lookups here
# instead of the silly if/elif/else construct.
if user == 'remi':
out = "Remi Delon, CherryPy lead developer"
elif user == 'hendrik':
out = "Hendrik Mans, CherryPy co-developer & crazy German"
elif user == 'lorenzo':
out = "Lorenzo Lamas, famous actor and singer!"
else:
out = "Unknown user. :-("
return '%s (<a href="./">back</a>)' % out
default.exposed = True
cherrypy.tree.mount(UsersPage())
if __name__ == '__main__':
import os.path
thisdir = os.path.dirname(__file__)
cherrypy.quickstart(config=os.path.join(thisdir, 'tutorial.conf'))
| epl-1.0 |
Jumpscale/jumpscale_core8 | lib/JumpScale/baselib/atyourservice81/Service.py | 1 | 30866 | from JumpScale import j
import capnp
import time
class Service:
def __init__(self, aysrepo, actor=None, model=None, name="", args={}, path=None):
"""
init from a template or from a model
"""
self.model = None
self._schema = None
self._path = ""
self._schema = None
self.name = name
self.aysrepo = aysrepo
self.logger = j.atyourservice.logger
if actor is not None:
try:
self._initFromActor(actor, args=args, name=name)
except:
# cleanup if init fails
self.delete()
raise
elif model is not None:
self.model = model
elif path is not None:
self.loadFromFS(path)
else:
raise j.exceptions.Input(
message="template or model needs to be specified when creating an actor", level=1, source="", tags="", msgpub="")
@property
def path(self):
if self._path == "":
relpath = self.model.dbobj.gitRepo.path
assert self.model.dbobj.gitRepo.url == self.aysrepo.git.remoteUrl
self._path = j.sal.fs.joinPaths(self.aysrepo.path, relpath)
return self._path
def _initFromActor(self, actor, name, args={}):
self.logger.info("init service %s from %s" % (name, actor.model.name))
if j.data.types.string.check(actor):
raise j.exceptions.RuntimeError("no longer supported, pass actor")
if actor is None:
raise j.exceptions.RuntimeError("service actor cannot be None")
self.model = self.aysrepo.db.services.new()
dbobj = self.model.dbobj
dbobj.name = name
dbobj.actorName = actor.model.dbobj.name
dbobj.actorKey = actor.model.key
dbobj.state = "new"
dbobj.dataSchema = actor.model.dbobj.serviceDataSchema
skey = "%s!%s" % (self.model.role, self.model.dbobj.name)
dbobj.gitRepo.url = self.aysrepo.git.remoteUrl
dbobj.gitRepo.path = j.sal.fs.joinPaths("services", skey)
# actions
actions = dbobj.init("actions", len(actor.model.dbobj.actions))
counter = 0
for action in actor.model.dbobj.actions:
actionnew = actions[counter]
actionnew.state = "new"
actionnew.actionKey = action.actionKey
actionnew.name = action.name
actionnew.log = action.log
actionnew.period = action.period
counter += 1
# set default value for argument not specified in blueprint
template = self.aysrepo.templateGet(actor.model.name)
for k, v in template.schemaHrd.items.items():
if k not in args:
args[k] = v.default
# input will always happen in process
args2 = self.input(args=args)
# print("%s:%s" % (self, args2))
if args2 is not None and j.data.types.dict.check(args2):
args = args2
if not j.data.types.dict.check(args):
raise j.exceptions.Input(message="result from input needs to be dict,service:%s" % self,
level=1, source="", tags="", msgpub="")
self._validate_service_args(args)
dbobj.data = j.data.capnp.getBinaryData(j.data.capnp.getObj(dbobj.dataSchema, args=args, name='Schema'))
# parents/producers
parent = self._initParent(actor, args)
if parent is not None:
fullpath = j.sal.fs.joinPaths(parent.path, skey)
newpath = j.sal.fs.pathRemoveDirPart(fullpath, self.aysrepo.path)
if j.sal.fs.exists(dbobj.gitRepo.path):
j.sal.fs.moveDir(dbobj.gitRepo.path, newpath)
dbobj.gitRepo.path = newpath
self._initProducers(actor, args)
self.save()
self.init()
# make sure we have the last version of the model if something changed during init
self.reload()
# need to do this manually cause execution of input method is a bit special.
self.model.actions['input'].state = 'ok'
self.saveAll()
def _validate_service_args(self, args):
"""
validate the arguments passed to the service during initialization to be sure we don't pass not defined arguments.
"""
errors = []
schema = j.data.capnp.getSchemaFromText(self.model.dbobj.dataSchema)
for field in args:
normalizedfieldname = j.data.hrd.sanitize_key(field)
if normalizedfieldname not in schema.schema.fieldnames:
errors.append('- Invalid parameter [{field}] passed while creating {service}.\n'.format(
field=field,
service="%s!%s" % (self.model.role, self.model.dbobj.name)))
if errors:
msg = "The arguments passed to the service contains the following errors: \n" + "\n".join(errors)
msg += '\nDataSchema : {}'.format(self.model.dbobj.dataSchema)
raise j.exceptions.Input(msg)
def _initParent(self, actor, args):
if actor.model.dbobj.parent.actorRole is not "":
parent_role = actor.model.dbobj.parent.actorRole
# try to get the instance name from the args. Look for full actor name ('node.ssh') or just role (node)
# if none of the two is available in the args, don't use instance name and
# expect the parent service to be unique in the repo
parent_name = args.get(actor.model.dbobj.parent.argKey, args.get(parent_role, ''))
res = self.aysrepo.servicesFind(name=parent_name, actor='%s(\..*)?' % parent_role)
res = [s for s in res if s.model.role == parent_role]
if len(res) == 0:
if actor.model.dbobj.parent.optional:
return None
if actor.model.dbobj.parent.auto is False:
raise j.exceptions.Input(message="could not find parent:%s for %s, found 0" %
(parent_name, self), level=1, source="", tags="", msgpub="")
else:
auto_actor = self.aysrepo.actorGet(parent_role)
instance = j.data.idgenerator.generateIncrID('parent_%s' % parent_role)
res.append(auto_actor.serviceCreate(instance="auto_%d" % instance, args={}))
elif len(res) > 1:
raise j.exceptions.Input(message="could not find parent:%s for %s, found more than 1." %
(parent_name, self), level=1, source="", tags="", msgpub="")
parentobj = res[0]
self.model.dbobj.parent.actorName = parentobj.model.dbobj.actorName
self.model.dbobj.parent.key = parentobj.model.key
self.model.dbobj.parent.serviceName = parentobj.name
return parentobj
return None
def _initProducers(self, actor, args):
"""
Initialize the producers of an actor.
actor: is the actor to init its producers.
args: passed arguments in the blueprint (i.e {'ssh1':'main', 'sshlist':[]} )
"""
# for every producer model in the producers, we get the user set services `argKey` to be consumed in the blueprint itself.
# calculate the difference of the available services and the user set
# calculate the min required services and see if we should create new ones if auto is set
# create the services required till the minServices is reached.
# set add each to our producers and add ourself the their consumers list.
# maintain the parent relationship (parent is always a producer and we are always a consumer of the parent.)
for producer_model in actor.model.dbobj.producers:
producer_role = producer_model.actorRole
usersetservices = []
passedservicesnames = args.get(producer_model.argKey, args.get(producer_role, ""))
if not j.data.types.list.check(passedservicesnames):
passedservicesnames = [passedservicesnames]
for svname in passedservicesnames:
if svname:
foundservices = self.aysrepo.servicesFind(name=svname, actor="%s(\..*)?" % producer_model.actorRole)
usersetservices.extend(foundservices)
available_services = self.aysrepo.servicesFind(actor=producer_role)
available_services = list(set(available_services)-set(usersetservices))
extraservices = len(usersetservices) - producer_model.maxServices
if extraservices > 0:
raise j.exceptions.Input(message="Specified services [%s] are more than maximum services: [%s]"%(str(usersetservices), str(producer_model.maxServices)),
level=1, source="", tags="", msgpub="")
tocreate = producer_model.minServices-len(available_services)-len(usersetservices)
if tocreate > 0:
if producer_model.auto:
for idx in range(tocreate):
auto_actor = self.aysrepo.actorGet(producer_role)
available_services.append(auto_actor.serviceCreate(instance="auto_%s" % idx, args={}))
else:
raise j.exceptions.Input(message="Minimum number of services required is %s and only %s are provided. [Hint: Maybe you want to set auto to auto create the missing services?]" % (producer_model.minServices, len(usersetservices)),
level=1, source="", tags="", msgpub="")
for idx, producer_obj in enumerate(usersetservices + available_services):
if idx >= len(usersetservices) and idx >= producer_model.minServices:
break
self.model.producerAdd(
actorName=producer_obj.model.dbobj.actorName,
serviceName=producer_obj.model.dbobj.name,
key=producer_obj.model.key)
# add ourself to the consumers list of the producer
producer_obj.model.consumerAdd(
actorName=self.model.dbobj.actorName,
serviceName=self.model.dbobj.name,
key=self.model.key)
if self.parent is not None:
# add parent to the producers list.
self.model.producerAdd(
actorName=self.parent.model.dbobj.actorName,
serviceName=self.parent.model.dbobj.name,
key=self.parent.model.key)
# add ourself to the consumers list of the parent
self.parent.model.consumerAdd(
actorName=self.model.dbobj.actorName,
serviceName=self.model.dbobj.name,
key=self.model.key)
def _check_args(self, actor, args):
""" Checks whether if args are the same as in instance model """
data = j.data.serializer.json.loads(self.model.dataJSON)
for key, value in args.items():
sanitized_key = j.data.hrd.sanitize_key(key)
if sanitized_key in data and data[sanitized_key] != value:
self.processChange(actor=actor, changeCategory="dataschema", args=args)
break
def loadFromFS(self, path):
"""
get content from fs and load in object
only for DR purposes, std from key value stor
"""
self.logger.debug("load service from FS: %s" % path)
if self.model is None:
self.model = self.aysrepo.db.services.new()
model_json = j.data.serializer.json.load(j.sal.fs.joinPaths(path, "service.json"))
# for now we don't reload the actions codes.
# when using distributed DB, the actions code could still be available
actions_bak = model_json.pop('actions')
self.model.dbobj = self.aysrepo.db.services.capnp_schema.new_message(**model_json)
data_json = j.data.serializer.json.load(j.sal.fs.joinPaths(path, "data.json"))
self.model.dbobj.data = j.data.capnp.getBinaryData(j.data.capnp.getObj(self.model.dbobj.dataSchema, args=data_json))
# data_obj = j.data.capnp.getObj(self.model.dbobj.dataSchema, data_json)
# self.model._data = data_obj
# actions
# relink actions from the actor to be sure we have good keys
actor = self.aysrepo.actorGet(name=self.model.dbobj.actorName)
actions = self.model.dbobj.init("actions", len(actor.model.dbobj.actions))
counter = 0
for action in actor.model.dbobj.actions:
for backup_action in actions_bak:
if action.name == backup_action['name']:
break
actionnew = actions[counter]
actionnew.state = backup_action['state']
actionnew.actionKey = action.actionKey
actionnew.name = action.name
actionnew.log = backup_action['log']
actionnew.period = backup_action['period']
counter += 1
self.saveAll()
def saveToFS(self):
j.sal.fs.createDir(self.path)
path2 = j.sal.fs.joinPaths(self.path, "service.json")
j.sal.fs.writeFile(path2, self.model.dictJson, append=False)
path3 = j.sal.fs.joinPaths(self.path, "data.json")
j.sal.fs.writeFile(path3, self.model.dataJSON)
path4 = j.sal.fs.joinPaths(self.path, "schema.capnp")
j.sal.fs.writeFile(path4, self.model.dbobj.dataSchema)
def save(self):
self.model.save()
def saveAll(self):
self.model.save()
self.saveToFS()
def reload(self):
self.model._data = None
self.model.load(self.model.key)
def delete(self):
"""
delete this service completly.
remove it from db and from filesystem
all the children of this service are going to be deleted too
"""
# TODO should probably warn user relation may be broken
for prod_model in self.model.producers:
prod_model.consumerRemove(self)
for cons_model in self.model.consumers:
cons_model.producerRemove(self)
for service in self.children:
service.delete()
self.model.delete()
j.sal.fs.removeDirTree(self.path)
@property
def parent(self):
if self.model.parent is not None:
return self.model.parent.objectGet(self.aysrepo)
return None
@property
def parents(self):
chain = []
parent = self.parent
while parent is not None:
chain.append(parent)
parent = parent.parent
return chain
@property
def children(self):
res = []
for service in self.aysrepo.services:
if service.parent == self:
res.append(service)
return res
@property
def producers(self):
producers = {}
for prod_model in self.model.producers:
if prod_model.role not in producers:
producers[prod_model.role] = []
result = self.aysrepo.servicesFind(name=prod_model.dbobj.name, actor=prod_model.dbobj.actorName)
producers[prod_model.role].extend(result)
return producers
@property
def consumers(self):
consumers = {}
for prod_model in self.model.consumers:
if prod_model.role not in consumers:
consumers[prod_model.role] = []
result = self.aysrepo.servicesFind(name=prod_model.dbobj.name, actor=prod_model.dbobj.actorName)
consumers[prod_model.role].extend(result)
return consumers
def isConsumedBy(self, service):
consumers_keys = [model.key for model in self.model.consumers]
return service.model.key in consumers_keys
def findConsumersRecursive(self, target=None, out=set()):
"""
@return set of services that consumes target, recursivlely
"""
if target is None:
target = self
for service in target.consumers:
out.add(service)
self.findConsumersRecursive(service, out)
return out
def getProducersRecursive(self, producers=set(), callers=set(), action="", producerRoles="*"):
for role, producers_list in self.producers.items():
for producer in producers_list:
if action == "" or action in producer.model.actionsState.keys():
if producerRoles == "*" or producer.model.role in producerRoles:
producers.add(producer)
producers = producer.getProducersRecursive(producers=producers, callers=callers, action=action, producerRoles=producerRoles)
return producers.symmetric_difference(callers)
def printProducersRecursive(self, prefix=""):
for role, producers2 in self.producers.items():
# print ("%s%s"%(prefix,role))
for producer in producers2:
print("%s- %s" % (prefix, producer))
producer.printProducersRecursive(prefix + " ")
def getConsumersRecursive(self, consumers=set(), callers=set(), action="", consumerRole="*"):
for role, consumers_list in self.consumers.items():
for consumer in consumers_list:
if action == "" or action in consumer.model.actionsState.keys():
if consumerRole == "*" or consumer.model.role in consmersRole:
consumers.add(consumer)
consumers = consumer.getConsumersRecursive(
consumers=consumers, callers=callers, action=action, consumerRole=consumerRole)
return consumers.symmetric_difference(callers)
def getConsumersWaiting(self, action='uninstall', consumersChanged=set(), scope=None):
for consumer in self.getConsumersRecursive(set(), set()):
# check that the action exists, no need to wait for other actions,
# appart from when init or install not done
if consumer.model.actionsState['init'] != "ok":
consumersChanged.add(consumer)
if consumer.model.actionsState['install'] != "ok":
consumersChanged.add(consumer)
if action not in consumer.model.actionsState.keys():
continue
if consumer.model.actionsState[action] != "ok":
consumersChanged.add(consumer)
if scope is not None:
consumersChanged = consumersChanged.intersection(scope)
return consumersChanged
def consume(self, service):
"""
consume another service dynamicly
"""
if service in self.producers:
return
self.model.producerAdd(
actorName=service.model.dbobj.actorName,
serviceName=service.name,
key=service.model.key)
# add ourself to the consumers list of the producer
service.model.consumerAdd(
actorName=self.model.dbobj.actorName,
serviceName=self.model.dbobj.name,
key=self.model.key)
self.saveAll()
service.saveAll()
@property
def executor(self):
return self._getExecutor()
def _getExecutor(self):
executor = None
tocheck = [self]
tocheck.extend(self.parents)
for service in tocheck:
if 'getExecutor' in service.model.actionsState.keys():
job = service.getJob('getExecutor')
executor = job.method(job)
return executor
return j.tools.executor.getLocal()
def processChange(self, actor, changeCategory, args={}):
"""
template action change
categories :
- dataschema
- ui
- config
- action_new_actionname
- action_mod_actionname
"""
# TODO: implement different pre-define action for each category
# self.logger.debug('process change for %s (%s)' % (self, changeCategory)
if changeCategory == 'dataschema':
# We use the args passed without change
pass
elif changeCategory == 'ui':
# TODO
pass
elif changeCategory == 'config':
# update the recurrin and event actions
# then set the lastrun to the value it was before update
recurring_lastrun = {}
event_lastrun = {}
for event in self.model.actionsEvent.values():
event_lastrun[event.action] = event.lastRun
for recurring in self.model.actionsRecurring.values():
recurring_lastrun[recurring.action] = recurring.lastRun
self._initRecurringActions(actor)
self._initEventActions(actor)
for action, lastRun in event_lastrun.items():
self.model.actionsEvent[action].lastRun = lastRun
for action, lastRun in recurring_lastrun.items():
self.model.actionsRecurring[action].lastRun = lastRun
elif changeCategory.find('action_new') != -1:
action_name = changeCategory.split('action_new_')[1]
actor_action_pointer = actor.model.actions[action_name]
self.model.actionAdd(key=actor_action_pointer.actionKey, name=action_name)
elif changeCategory.find('action_mod') != -1:
# update state and pointer of the action pointer in service model
action_name = changeCategory.split('action_mod_')[1]
action_actor_pointer = actor.model.actions[action_name]
service_action_pointer = self.model.actions[action_name]
service_action_pointer.state = 'changed'
service_action_pointer.actionKey = action_actor_pointer.actionKey
# update the lastModDate of the action object
action = j.core.jobcontroller.db.actions.get(key=service_action_pointer.actionKey)
action.dbobj.lastModDate = j.data.time.epoch
action.save()
elif changeCategory.find('action_del') != -1:
action_name = action_name = changeCategory.split('action_del_')[1]
self.model.actionDelete(action_name)
# save the change for the service
self.saveAll()
# execute the processChange method if it exists
if 'processChange' in self.model.actions.keys():
args.update({'changeCategory': changeCategory})
job = self.getJob("processChange", args=args)
args = job.executeInProcess()
job.model.save()
def input(self, args={}):
job = self.getJob("input", args=args)
job._service = self
job.saveService = False # this is done to make sure we don't save the service at this point !!!
args = job.executeInProcess()
job.model.actorName = self.model.dbobj.actorName
job.model.save()
return args
def init(self):
job = self.getJob(actionName="init")
job.executeInProcess()
job.model.save()
return job
def checkActions(self, actions):
"""
will walk over all actions, and make sure the default are well set.
"""
from IPython import embed
print("DEBUG NOW checkactions")
embed()
raise RuntimeError("stop debug here")
def scheduleAction(self, action, args={}, period=None, log=True, force=False):
"""
Change the state of an action so it marked as need to be executed
if the period is specified, also create a recurring period for the action
"""
self.logger.info('schedule action %s on %s' % (action, self))
if action not in self.model.actions:
raise j.exceptions.Input(
"Trying to schedule action %s on %s. but this action doesn't exist" % (action, self))
action_model = self.model.actions[action]
if action_model.state == 'disabled':
raise j.exceptions.Input("Trying to schedule action %s on %s. but this action is disabled" % (action, self))
if period is not None and period != '':
# convert period to seconds
if j.data.types.string.check(period):
period = j.data.types.duration.convertToSeconds(period)
elif j.data.types.int.check(period) or j.data.types.float.check(period):
period = int(period)
# save period into actionCode model
action_model.period = period
if not force and action_model.state == 'ok':
self.logger.info("action %s already in ok state, don't schedule again" % action_model.name)
else:
action_model.state = 'scheduled'
self.saveAll()
def executeAction(self, action, args={}, inprocess=False):
if action[-1] == "_":
return self.executeActionService(action)
else:
return self.executeActionJob(action, args, inprocess=inprocess)
def executeActionService(self, action, args={}):
# execute an action in process without creating a job
# usefull for methods called very often.
action_id = self.model.actions[action].actionKey
action_model = j.core.jobcontroller.db.actions.get(action_id)
action_with_lines = ("\n %s \n" % action_model.code)
indented_action = '\n '.join(action_with_lines.splitlines())
complete_action = "def %s(%s): %s" % (action, action_model.argsText, indented_action)
exec(complete_action)
res = eval(action)(service=self, args=args)
return res
def executeActionJob(self, actionName, args={}, inprocess=False):
self.logger.debug('execute action %s on %s' % (actionName, self))
job = self.getJob(actionName=actionName, args=args)
# inprocess means we don't want to create subprocesses for this job
# used mainly for action called from other actions.
if inprocess:
job.model.dbobj.debug = True
now = j.data.time.epoch
p = job.execute()
if job.model.dbobj.debug is True:
return job
while not p.isDone():
time.sleep(0.5)
p.sync()
if p.new_stdout != "":
self.logger.info(p.new_stdout)
# just to make sure process is cleared
p.wait()
# if the action is a reccuring action, save last execution time in model
if actionName in self.model.actionsRecurring:
self.model.actionsRecurring[actionName].lastRun = now
service_action_obj = self.model.actions[actionName]
if p.state != 'success':
job.model.dbobj.state = 'error'
service_action_obj.state = 'error'
# processError creates the logs entry in job object
job._processError(p.error)
# print error
log = job.model.dbobj.logs[-1]
print(job.str_error(log.log))
else:
job.model.dbobj.state = 'ok'
service_action_obj.state = 'ok'
log_enable = j.core.jobcontroller.db.actions.get(service_action_obj.actionKey).dbobj.log
if log_enable:
if p.stdout != '':
job.model.log(msg=p.stdout, level=5, category='out')
if p.stderr != '':
job.model.log(msg=p.stderr, level=5, category='err')
self.logger.info("job {} done sucessfuly".format(str(job)))
job.model.save()
job.service.saveAll()
return job
def getJob(self, actionName, args={}):
action = self.model.actions[actionName]
jobobj = j.core.jobcontroller.db.jobs.new()
jobobj.dbobj.repoKey = self.aysrepo.model.key
jobobj.dbobj.actionKey = action.actionKey
jobobj.dbobj.actionName = action.name
jobobj.dbobj.actorName = self.model.dbobj.actorName
jobobj.dbobj.serviceName = self.model.dbobj.name
jobobj.dbobj.serviceKey = self.model.key
jobobj.dbobj.state = "new"
jobobj.dbobj.lastModDate = j.data.time.epoch
jobobj.args = args
job = j.core.jobcontroller.newJobFromModel(jobobj)
return job
def _build_actions_chain(self, action):
"""
this method returns a list of action that need to happens before the action passed in argument
can start
"""
ds = list()
self.model._build_actions_chain(ds=ds)
ds.reverse()
return ds
def __eq__(self, service):
if not service:
return False
return service.model.key == self.model.key
def __hash__(self):
return hash(self.model.key)
def __repr__(self):
return "service:%s!%s" % (self.model.role, self.model.dbobj.name)
def __str__(self):
return self.__repr__()
def _getDisabledProducers(self):
disabled = []
for producers_list in self.producers.values():
for producer in producers_list:
if producer.model.dbobj.state == 'disabled':
disabled.append(producer)
return disabled
# def disable(self):
# for consumer in self.getConsumers():
# candidates = self.aysrepo.findServices(role=self.model.role, first=False)
# if len(candidates) > 1:
# # Other candidates available. Should link consumer to new
# # candidate
# candidates.remove(self)
# candidate = candidates[0]
# producers = consumer.hrd.getList('producer.%s' % self.role, [])
# producers.remove(self.key)
# producers.append(candidate.key)
# consumer.hrd.set('producer.%s' % self.role, producers)
# else:
# # No other candidates already installed. Disable consumer as
# # well.
# consumer.disable()
#
# self.log("disable instance")
# self.model.hrd.set('disabled', True)
#
# def _canBeEnabled(self):
# for role, producers in list(self.producers.items()):
# for producer in producers:
# if producer.state.hrd.getBool('disabled', False):
# return False
# return True
#
# def enable(self):
# # Check that all dependencies are enabled
#
# if not self._canBeEnabled():
# self.log(
# "%s cannot be enabled because one or more of its producers is disabled" % self)
# return
#
# self.model.hrd.set('disabled', False)
# self.log("Enable instance")
# for consumer in self._getConsumers(include_disabled=True):
# consumer.enable()
# consumer.start()
#
| apache-2.0 |
dstufft/ooni-backend | oonib/otime.py | 1 | 2318 | import time
from oonib import errors as e
from datetime import datetime
def utcDateNow():
"""
Returns the datetime object of the current UTC time.
"""
return datetime.utcnow()
def utcTimeNow():
"""
Returns seconds since epoch in UTC time, it's of type float.
"""
return time.mktime(time.gmtime())
def dateToTime(date):
"""
Takes as input a datetime object and outputs the seconds since epoch.
"""
return time.mktime(date.timetuple())
def prettyDateNow():
"""
Returns a good looking string for the local time.
"""
return datetime.now().ctime()
def utcPrettyDateNow():
"""
Returns a good looking string for utc time.
"""
return datetime.utcnow().ctime()
def timeToPrettyDate(time_val):
return time.ctime(time_val)
def fromTimestamp(s):
"""
Converts a string that is output from the timestamp function back to a
datetime object
Args:
s (str): a ISO8601 formatted string.
ex. 1912-06-23T101234Z"
Note: we currently only support parsing strings that are generated from the
timestamp function and have no intention in supporting the full
standard.
"""
try:
date_part, time_part = s.split('T')
hours, minutes, seconds = time_part[:2], time_part[2:4], time_part[4:6]
year, month, day = date_part.split('-')
except:
raise e.InvalidTimestampFormat(s)
return datetime(int(year),
int(month),
int(day),
int(hours),
int(minutes),
int(seconds))
def timestamp(t=None):
"""
The timestamp for ooni reports follows ISO 8601 in
UTC time format.
We do not inlcude ':' and include seconds.
Example:
if the current date is "10:12:34 AM, June 23 1912" (datetime(1912, 6,
23, 10, 12, 34))
the timestamp will be:
"1912-06-23T101234Z"
Args:
t (datetime): a datetime object representing the
time to be represented (*MUST* be expressed
in UTC).
If not specified will default to the current time
in UTC.
"""
if t is None:
t = datetime.utcnow()
ISO8601 = "%Y-%m-%dT%H%M%SZ"
return t.strftime(ISO8601)
| bsd-2-clause |
Alwnikrotikz/cortex-vfx | python/IECoreMaya/ParameterUI.py | 12 | 14067 | ##########################################################################
#
# Copyright (c) 2007-2011, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import maya.cmds as cmds
import maya.mel
import maya.OpenMaya
import IECore
import IECoreMaya
## Base class for objects which are able to create an Attribute Editor widget for a single IECore.Parameter
# held on an IECoreMaya.ParameterisedHolder node.
# \todo Separate control drawing from labelling and layout, so these classes just create the right
# hand side of what they're doing at the moment. Then we can use them in different layouts like spreadsheets
# and wotnot.
class ParameterUI( IECoreMaya.UIElement ) :
textColumnWidthIndex = 145
singleWidgetWidthIndex = 70
sliderWidgetWidthIndex = 2 * 70
handlers = {}
## The parameterisedHolderNode is an MObject specifying the node holding the specified IECore.Parameter.
# Derived class __init__ implementations must create a layout to hold all their contents and pass this
# in the topLevelUI parameter (as for all UIElement derived classes).
# \todo Document the meaning of the various keyword arguments - perhaps the names of these should be
# prefixed with the name of the class which implements each argument so as to make it easier to find
# the documentation too.
def __init__( self, parameterisedHolderNode, parameter, topLevelUI, **kw ) :
IECoreMaya.UIElement.__init__( self, topLevelUI )
self.__node = maya.OpenMaya.MObjectHandle( parameterisedHolderNode )
self.parameter = parameter #IECore.Parameter
self.__labelWithNodeName = kw.get( "labelWithNodeName", False )
self.__longParameterName = kw.get( "longParameterName", parameter.name )
## Derived classes should override this method. The override should first call the base class method and
# then reconnect all created widgets to the new node/parameter. The node and parameter arguments are as
# for the __init__ function.
def replace( self, node, parameter ) :
self.__node = maya.OpenMaya.MObjectHandle( node )
self.parameter = parameter
## Returns the Maya node associated with this UI in the form of an OpenMaya.MObject
def node( self ) :
if not self.__node.isValid() :
raise RuntimeError, "IECoreMaya.ParameterUI.node(): The requested node is not valid"
return self.__node.object()
## Returns an umambiguous full path for the Maya node associated with this UI.
def nodeName( self ) :
fnPH = IECoreMaya.FnParameterisedHolder( self.node() )
return fnPH.fullPathName()
## Returns the Maya plug associated with this UI in the form an OpenMaya.MPlug
def plug( self ) :
fnPH = IECoreMaya.FnParameterisedHolder( self.node() )
return fnPH.parameterPlug( self.parameter )
## Returns an unambiguous full path to the plug this ui represents.
def plugName( self ) :
fnPH = IECoreMaya.FnParameterisedHolder( self.node() )
plug = fnPH.parameterPlug( self.parameter )
return str( fnPH.fullPathName() + "." + plug.partialName() )
def layout( self ) :
return self._topLevelUI()
## Computes a nice label for the ui.
def label( self ):
if self.__labelWithNodeName :
n = self.nodeName() + "." + self.__longParameterName
if not self.__longParameterName :
# Top-level parameter comes through into here without a name
n = self.nodeName() + ".parameters"
return IECoreMaya.mel( "interToUI(\"" + n + "\")" ).value
else :
return IECoreMaya.mel( "interToUI(\"" + self.parameter.name + "\")" ).value
## Computes a wrapped annotation/tooltip for the ui
def description( self ):
extended = "%s\n\n%s" % ( self.plugName().split(".")[1], self.parameter.description )
return IECore.StringUtil.wrap( extended, 48 )
@staticmethod
def _defaultDragCallback( dragControl, x, y, modifiers, **kw ):
# Pass the dictionary of arguments as a string so that it can be captured and eval'ed in the drop callback
return [ 'ParameterUI', repr( kw ) ]
def addDragCallback( self, ctrl, **kw ) :
maya.cmds.control(
ctrl,
edit = True,
dragCallback = IECore.curry( ParameterUI._defaultDragCallback, nodeName = self.nodeName(), layoutName = self.layout(), **kw )
)
## Can be called by derived classes to add a useful popup menu to the specified ui element. This
# will replace any existing popup menus that are already there.
## \todo Understand and document the available keyword arguments. I think the only one is "attributeName",
# which is used to allow the name of specific elements of compound plugs to be specified to improve the box
# and vector uis. That needs rethinking in any case, as we shouldn't be storing attribute names anywhere as it
# makes us vulnerable to the names changing behind our backs.
def _addPopupMenu( self, parentUI, **kw ) :
existingMenus = maya.cmds.control( parentUI, query=True, popupMenuArray=True )
if existingMenus :
for m in existingMenus :
maya.cmds.deleteUI( m, menu=True )
IECoreMaya.createMenu( definition = IECore.curry( self.__popupMenuDefinition, **kw ), parent = parentUI, useInterToUI=False )
if "button1" in kw and kw["button1"] :
IECoreMaya.createMenu( definition = IECore.curry( self.__popupMenuDefinition, **kw ), parent = parentUI, button = 1, useInterToUI=False )
## Returns an IECore.MenuDefinition used to create a popup menu for the ParameterUI. This may
# be overridden by derived classes to add their own menu items. In this case they should first
# call the base class implementation before adding their items to the result.
def _popupMenuDefinition( self, **kw ) :
definition = IECore.MenuDefinition()
if cmds.getAttr( kw['attributeName'], lock = True) == 0:
settable = maya.cmds.getAttr( kw["attributeName"], settable=True )
if settable :
# make menu items for all presets and for the default value
for k in self.parameter.presetNames() :
definition.append( "/" + k, { "command" : IECore.curry( self.__selectValue, selection = k ) } )
if len( self.parameter.presetNames() ) > 0 :
definition.append( "/PresetDivider", { "divider" : True } )
definition.append( "/Default", { "command" : IECore.curry( self.__selectValue, selection = self.parameter.defaultValue ) } )
definition.append( "/ValueDivider", { "divider" : True } )
attrType = cmds.getAttr( kw["attributeName"], type=True )
if attrType in ( "float", "long" ) :
if cmds.getAttr( kw['attributeName'], keyable=True) and settable :
definition.append( "/Set Key", { "command" : IECore.curry( self.__setKey, **kw ) } )
expressions = cmds.listConnections(
kw['attributeName'],
d = False,
s = True,
type = "expression"
)
if not expressions :
hasConnections = self.__appendConnectionMenuDefinitions( definition, **kw )
if not hasConnections and settable :
definition.append( "/Create New Expression...", { "command" : IECore.curry( self.__expressionEditor, **kw ) } )
else:
definition.append( "/Edit Expression...", { "command" : IECore.curry( self.__expressionEditor, **kw ) } )
definition.append( "/Delete Expression", { "command" : IECore.curry( self.__deleteNode, nodeName = expressions[0] ) } )
else :
self.__appendConnectionMenuDefinitions( definition, **kw )
definition.append( "/ConnectionDivider", { "divider" : True } )
definition.append( "/Lock Attribute", { "command" : IECore.curry( self.__lock, **kw ) } )
else :
definition.append( "/Unlock Attribute", { "command" : IECore.curry( self.__unlock, **kw ) } )
return definition
def __appendConnectionMenuDefinitions( self, definition, **kw ) :
connections = cmds.listConnections(
kw['attributeName'],
d = False,
s = True,
plugs = True,
connections = True,
skipConversionNodes = True
)
definition.append( "/Connection Editor...", { "command" : IECore.curry( self.__connectionEditor ) } )
if connections :
definition.append( "/Open AE...",
{ "command" : IECore.curry( self.__showEditor, attributeName = connections[1] ) }
)
definition.append( "/Break Connection",
{
"command" : IECore.curry(
self.__disconnect,
source = connections[1],
destination = connections[0],
refreshAE = self.nodeName()
)
}
)
return True
else:
return False
def __popupMenuDefinition( self, **kw ) :
# call the protected function which can be overridden by
# derived classes. then let the callbacks do what they want.
definition = self._popupMenuDefinition( **kw )
for cb in self.__popupMenuCallbacks :
cb( definition, self.parameter, self.node() )
return definition
def __showEditor( self, attributeName ) :
split = attributeName.split('.', 1 )
node = split[0]
melCmd = 'showEditor "' + node + '"'
IECoreMaya.mel( melCmd.encode('ascii') )
def __deleteNode( self, nodeName = None ) :
cmds.delete( nodeName )
def __expressionEditor( self, attributeName = None ) :
split = attributeName.split('.', 1 )
node = split[0]
attr = split[1]
melCmd = 'expressionEditor EE "' + node + '" "' + attr + '"'
IECoreMaya.mel( melCmd.encode('ascii') )
def __connectionEditor( self ) :
maya.mel.eval(
str("ConnectionEditor;"+
"nodeOutliner -e -replace %(right)s connectWindow|tl|cwForm|connectWindowPane|rightSideCW;"+
"connectWindowSetRightLabel %(right)s;") % { 'right' : self.nodeName() } )
def __disconnect( self, source = None, destination = None, refreshAE = None ) :
cmds.disconnectAttr( source, destination )
if refreshAE :
maya.mel.eval( 'evalDeferred( "updateAE %s;")' % refreshAE )
def __setKey( self, **kw ):
cmds.setKeyframe(
kw['attributeName']
)
def __lock( self, **kw ):
cmds.setAttr(
kw['attributeName'],
lock = True
)
def __unlock( self, **kw ):
cmds.setAttr(
kw['attributeName'],
lock = False
)
def __selectValue( self, selection = None):
self.parameter.setValue( selection )
IECoreMaya.FnParameterisedHolder( self.node() ).setNodeValue( self.parameter )
@staticmethod
def registerUI( parameterTypeId, handlerType, uiTypeHint = None ):
key = (parameterTypeId, uiTypeHint)
if key in ParameterUI.handlers :
IECore.msg( IECore.Msg.Level.Warning, "ParameterUI.registerUI", "Handler for %s already registered." % str( key ) )
ParameterUI.handlers[key] = handlerType
## Returns a new ParameterUI instance suitable for representing
# the specified parameter on the specified parameterisedHolderNode.
# The node may either be specified as an OpenMaya.MObject or as
# a string or unicode object representing the node name.
@staticmethod
def create( parameterisedHolderNode, parameter, **kw ) :
if not isinstance( parameterisedHolderNode, maya.OpenMaya.MObject ) :
parameterisedHolderNode = IECoreMaya.StringUtil.dependencyNodeFromString( parameterisedHolderNode )
if not parameter.isInstanceOf( IECore.Parameter.staticTypeId() ) :
raise TypeError( "Parameter argument must derive from IECore.Parameter." )
if parameter.presetsOnly and len( parameter.presets() ) :
return IECoreMaya.PresetsOnlyParameterUI( parameterisedHolderNode, parameter, **kw )
uiTypeHint = None
try:
uiTypeHint = parameter.userData()['UI']['typeHint'].value
except:
pass
handlerType = None
typeId = parameter.typeId()
while typeId!=IECore.TypeId.Invalid :
handlerType = ParameterUI.handlers.get( ( typeId, uiTypeHint ), None )
if handlerType is not None :
break
handlerType = ParameterUI.handlers.get( ( typeId, None ), None )
if handlerType is not None :
break
typeId = IECore.RunTimeTyped.baseTypeId( typeId )
if handlerType is None :
IECore.msg( IECore.Msg.Level.Warning, "ParameterUI.create", "No UI registered for parameters of type \"%s\"" % parameter.typeName() )
return None
if 'longParameterName' in kw and len( kw['longParameterName'] ) :
kw['longParameterName'] += "." + parameter.name
else :
kw['longParameterName'] = parameter.name
parameterUI = handlerType( parameterisedHolderNode, parameter, **kw )
return parameterUI
__popupMenuCallbacks = []
## Registers a callback which is able to modify the popup menus associated
# with ParameterUIs. The callback should have the following signature :
#
# callback( menuDefinition, parameter, holderNode ).
@classmethod
def registerPopupMenuCallback( cls, callback ) :
cls.__popupMenuCallbacks.append( callback )
| bsd-3-clause |
jaggu303619/asylum | openerp/addons/portal_crm/__openerp__.py | 55 | 1734 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Portal CRM',
'version': '0.1',
'category': 'Tools',
'complexity': 'easy',
'description': """
This module adds a contact page (with a contact form creating a lead when submitted) to your portal if crm and portal are installed.
====================================================================================================================================
""",
'author': 'OpenERP SA',
'depends': ['crm','portal'],
'data': [
'contact_view.xml',
],
'test': [
'test/contact_form.yml',
],
'installable': True,
'auto_install': True,
'category': 'Hidden',
'css': ['static/src/css/portal_crm.css'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
JuliBakagianni/META-SHARE | misc/tools/generateDS-2.7a/generate_coverage.py | 33 | 3800 | #!/usr/bin/env python
#
# Imports
import sys
from optparse import OptionParser
import os
import re
#
# Globals and constants
EXCLUDE_BASE = [
'MixedContainer',
'_MemberSpec',
]
PATTERN = "^class\s*(\w*)"
RE_PATTERN = re.compile(PATTERN)
#
# Functions for external use
def generate_coverage(outfile, infilename, options):
exclude_classes = EXCLUDE_BASE
exclude_classes.extend(options.exclude_additional.split())
wrt = outfile.write
mod_name = get_mod_name(infilename)
wrt(HEADER % (mod_name, ))
generate_coverage_1(wrt, infilename, mod_name, exclude_classes)
wrt(FOOTER)
def generate_coverage_1(wrt, infilename, mod_name, exclude_classes):
infile = open(infilename, 'r')
for line in infile:
mo = RE_PATTERN.search(line)
if mo:
name = mo.group(1)
if (name not in exclude_classes and
not name.startswith('Sax')):
wrt(" '%s': %s.%s,\n" % (name, mod_name, name, ))
infile.close()
def get_mod_name(infilename):
s1 = os.path.split(infilename)[1]
s2 = os.path.splitext(s1)[0]
return s2
#
# Classes
#
# Functions for internal use and testing
#
# Templates
HEADER = '''\
#!/usr/bin/env python
#
# Imports
import sys
from optparse import OptionParser
import %s
#
# Globals and constants
CLASSES = {
'''
FOOTER = '''\
}
#
# Functions for external use
#
# Classes
#
# Functions for internal use and testing
def test(verbose):
instances = []
for name, class_ in CLASSES.iteritems():
instance = class_()
instances.append(instance)
return instances
USAGE_TEXT = """
python %prog [options] <somefile.xxx>
example:
python %prog somefile.xxx"""
def usage(parser):
parser.print_help()
sys.exit(1)
def main():
parser = OptionParser(USAGE_TEXT)
parser.add_option("-v", "--verbose", action="store_true",
dest="verbose", default=False,
help="produce additional (verbose) output")
(options, args) = parser.parse_args()
if len(args) != 0:
usage(parser)
test(options.verbose)
if __name__ == "__main__":
#import pdb; pdb.set_trace()
main()
'''
# End Templates
#
USAGE_TEXT = """
Generate a dictionary of class names and classes from a (superclass)
module generated by generateDS.py.
python %prog [options] <somefile.py>
Example:
python %prog somefile.py"""
def usage(parser):
parser.print_help()
sys.exit(1)
def main():
parser = OptionParser(USAGE_TEXT)
parser.add_option("-x", "--exclude-additional", type="string",
dest="exclude_additional", default='',
help="additional class names to be excluded (blank separated).")
parser.add_option("-f", "--force", action="store_true",
dest="force", default=False,
help="force over-write of outfile without asking.")
(options, args) = parser.parse_args()
outfilename = None
if len(args) == 2:
infilename = args[0]
outfilename = args[1]
elif len(args) == 1:
infilename = args[0]
outfilename = None
else:
usage(parser)
if outfilename is None:
outfile = sys.stdout
else:
if os.path.exists(outfilename):
if options.force:
outfile = open(outfilename, 'w')
else:
sys.stderr.write('Outfile (%s) exists. '
'Use -f (or --force) to override.\n' %
(outfilename, ))
sys.exit(1)
else:
outfile = open(outfilename, 'w')
generate_coverage(outfile, infilename, options)
if outfilename is not None:
outfile.close()
if __name__ == "__main__":
#import pdb; pdb.set_trace()
main()
| bsd-3-clause |
cernops/nova | nova/tests/functional/api_sample_tests/test_networks_associate.py | 9 | 3149 | # Copyright 2012 Nebula, Inc.
# Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.api_sample_tests import api_sample_base
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class NetworksAssociateJsonTests(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
extension_name = "os-networks-associate"
extra_extensions_to_load = ["os-networks"]
_sentinel = object()
def _get_flags(self):
f = super(NetworksAssociateJsonTests, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# Networks_associate requires Networks to be update
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.os_networks.Os_networks')
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.networks_associate.'
'Networks_associate')
return f
def setUp(self):
super(NetworksAssociateJsonTests, self).setUp()
def fake_associate(self, context, network_id,
host=NetworksAssociateJsonTests._sentinel,
project=NetworksAssociateJsonTests._sentinel):
return True
self.stub_out("nova.network.api.API.associate", fake_associate)
def test_disassociate(self):
response = self._do_post('os-networks/1/action',
'network-disassociate-req',
{})
self.assertEqual(202, response.status_code)
self.assertEqual("", response.content)
def test_disassociate_host(self):
response = self._do_post('os-networks/1/action',
'network-disassociate-host-req',
{})
self.assertEqual(202, response.status_code)
self.assertEqual("", response.content)
def test_disassociate_project(self):
response = self._do_post('os-networks/1/action',
'network-disassociate-project-req',
{})
self.assertEqual(202, response.status_code)
self.assertEqual("", response.content)
def test_associate_host(self):
response = self._do_post('os-networks/1/action',
'network-associate-host-req',
{"host": "testHost"})
self.assertEqual(202, response.status_code)
self.assertEqual("", response.content)
| apache-2.0 |
linvictor88/vse-lbaas-driver | quantum/plugins/bigswitch/tests/test_server.py | 3 | 6568 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Big Switch Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mandeep Dhami, Big Switch Networks, Inc.
"""Test server mocking a REST based network ctrl.
Used for QuantumRestProxy tests
"""
import json
import re
from wsgiref.simple_server import make_server
class TestNetworkCtrl(object):
def __init__(self, host='', port=8000,
default_status='404 Not Found',
default_response='404 Not Found',
debug=False):
self.host = host
self.port = port
self.default_status = default_status
self.default_response = default_response
self.debug = debug
self.debug_env = False
self.debug_resp = False
self.matches = []
def match(self, prior, method_regexp, uri_regexp, handler, data=None,
multi=True):
"""Add to the list of exptected inputs.
The incoming request is matched in the order of priority. For same
priority, match the oldest match request first.
:param prior: intgere priority of this match (e.g. 100)
:param method_regexp: regexp to match method (e.g. 'PUT|POST')
:param uri_regexp: regexp to match uri (e.g. '/quantum/v?.?/')
:param handler: function with signature:
lambda(method, uri, body, **kwargs) : status, body
where
- method: HTTP method for this request
- uri: URI for this HTTP request
- body: body of this HTTP request
- kwargs are:
- data: data object that was in the match call
- node: TestNetworkCtrl object itself
- id: offset of the matching tuple
and return values is:
(status, body) where:
- status: HTTP resp status (e.g. '200 OK').
If None, use default_status
- body: HTTP resp body. If None, use ''
"""
assert int(prior) == prior, 'Priority should an integer be >= 0'
assert prior >= 0, 'Priority should an integer be >= 0'
lo, hi = 0, len(self.matches)
while lo < hi:
mid = (lo + hi) // 2
if prior < self.matches[mid]:
hi = mid
else:
lo = mid + 1
self.matches.insert(lo, (prior, method_regexp, uri_regexp, handler,
data, multi))
def remove_id(self, id_):
assert id_ >= 0, 'remove_id: id < 0'
assert id_ <= len(self.matches), 'remove_id: id > len()'
self.matches.pop(id_)
def request_handler(self, method, uri, body):
retstatus = self.default_status
retbody = self.default_response
for i in xrange(len(self.matches)):
(prior, method_regexp, uri_regexp, handler, data, multi) = \
self.matches[i]
if re.match(method_regexp, method) and re.match(uri_regexp, uri):
kwargs = {
'data': data,
'node': self,
'id': i,
}
retstatus, retbody = handler(method, uri, body, **kwargs)
if multi is False:
self.remove_id(i)
break
if retbody is None:
retbody = ''
return (retstatus, retbody)
def server(self):
def app(environ, start_response):
uri = environ['PATH_INFO']
method = environ['REQUEST_METHOD']
headers = [('Content-type', 'text/json')]
content_len_str = environ['CONTENT_LENGTH']
content_len = 0
request_data = None
if content_len_str:
content_len = int(content_len_str)
request_data = environ.get('wsgi.input').read(content_len)
if request_data:
try:
request_data = json.loads(request_data)
except Exception:
# OK for it not to be json! Ignore it
pass
if self.debug:
print '\n'
if self.debug_env:
print '%s:' % 'environ:'
for (key, value) in sorted(environ.iteritems()):
print ' %16s : %s' % (key, value)
print '%s %s' % (method, uri)
if request_data:
print '%s' % (
json.dumps(request_data, sort_keys=True, indent=4))
status, body = self.request_handler(method, uri, None)
body_data = None
if body:
try:
body_data = json.loads(body)
except Exception:
# OK for it not to be json! Ignore it
pass
start_response(status, headers)
if self.debug:
if self.debug_env:
print '%s: %s' % ('Response',
json.dumps(body_data, sort_keys=True, indent=4))
return body
return make_server(self.host, self.port, app)
def run(self):
print "Serving on port %d ..." % self.port
try:
self.server().serve_forever()
except KeyboardInterrupt:
pass
if __name__ == "__main__":
import sys
port = 8899
if len(sys.argv) > 1:
port = int(sys.argv[1])
debug = False
if len(sys.argv) > 2:
if sys.argv[2].lower() in ['debug', 'true']:
debug = True
ctrl = TestNetworkCtrl(port=port,
default_status='200 OK',
default_response='{"status":"200 OK"}',
debug=debug)
ctrl.match(100, 'GET', '/test',
lambda m, u, b, **k: ('200 OK', '["200 OK"]'))
ctrl.run()
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.