repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
jpurma/Kataja | kataja/saved/DerivationTree.py | 1 | 9184 | # coding=utf-8
# ############################################################################
#
# *** Kataja - Biolinguistic Visualization tool ***
#
# Copyright 2013 Jukka Purma
#
# This file is part of Kataja.
#
# Kataja is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Kataja is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Kataja. If not, see <http://www.gnu.org/licenses/>.
#
# ############################################################################
from kataja.SavedField import SavedField
from kataja.SavedObject import SavedObject
from kataja.saved.DerivationStep import DerivationStep
from kataja.singletons import log, ctrl
from kataja.syntactic_state_to_nodes import syntactic_state_to_nodes
from kataja.syntax.SyntaxState import SyntaxState
from collections import defaultdict
DONE_SUCCESS = 7
class DerivationTree(SavedObject):
""" Stores derivation steps for one forest and takes care of related
logic """
def __init__(self, forest=None):
super().__init__()
self.forest = forest
self.d = {}
self.branches = [] # state_id:s, last step of the branch
self.branch = [] # state_id:s
self.current_step_index = 0
self.current_step_id = 0
self.current_branch_id = 0 # state_id
self.current_branch_index = 0
self.child_map = defaultdict(list)
def add_step(self, d_step: SyntaxState or DerivationStep):
""" Store given syntactic state as a derivation step. Forest can switch which derivation
state it is currently displaying.
:param d_step: SyntaxState or DerivationStep object
:return:
"""
if isinstance(d_step, SyntaxState):
d_step = DerivationStep(d_step)
self.save_derivation_step(d_step)
def save_derivation_step(self, derivation_step: DerivationStep):
if not derivation_step.frozen:
derivation_step.freeze()
self.d[derivation_step.state_id] = derivation_step.frozen
def build_active_branch(self):
self.branch = self.build_branch(self.current_branch_id)
def build_branch(self, branch_id):
b = []
step = self.d.get(branch_id, None)
done = set()
while step:
uid, data, msg, state_id, parent_id, state_type, sort_order = step
b.append(state_id)
if parent_id in done:
print('looping branch, at parent ', parent_id)
break
step = self.d.get(parent_id, None)
done.add(parent_id)
return list(reversed(b))
def collect_states(self):
states = {}
for key, val in self.d.items():
state_key = key.rsplit('_', 1)[-1]
if state_key not in states:
uid, data, msg, state_id, parent_id, state_type, sort_order = val
states[int(state_key)] = msg, state_type
return states
def build_branches(self):
parents = {parent_id for uid, data, msg, state_id, parent_id, state_type, sort_order in self.d.values()}
sortable_branches = [(sort_order, state_id) for uid, data, msg, state_id, parent_id, state_type, sort_order in self.d.values() if state_id not in parents]
sortable_branches.sort()
self.branches = [state_id for sort_order, state_id in sortable_branches]
def build_child_map(self):
self.child_map = defaultdict(list)
sortable_values = [(sort_order, state_id, parent_id) for uid, data, msg, state_id, parent_id, state_type, sort_order in self.d.values() if parent_id]
sortable_values.sort()
for sort_order, state_id, parent_id in sortable_values:
self.child_map[parent_id].append(state_id)
def iterate_branch(self, branch_id):
step = self.d.get(branch_id, None)
while step:
uid, data, msg, state_id, parent_id, state_type, sort_order = step
yield state_id
step = self.d.get(parent_id, None)
def get_roots(self):
return [state_id for uid, data, msg, state_id, parent_id, state_type, sort_order in self.d.values() if not parent_id]
def update_dimensions(self):
self.build_branches()
self.build_child_map()
# @time_me
def restore_derivation_step(self):
d_step = self.get_derivation_step_by_id(self.current_step_id)
if d_step:
syntactic_state_to_nodes(self.forest, d_step.to_syn_state())
if d_step.msg:
log.info(f'<b>msg: {d_step.msg}</b>')
for log_msg in d_step.log:
if log_msg.strip():
log_msg = log_msg.replace("\t", " ")
log.info(f'<font color="#859900">{log_msg}</font>')
ctrl.main.parse_changed.emit()
def get_derivation_step_by_index(self, index):
state_id = self.branch[index]
return self.get_derivation_step_by_id(state_id)
def get_derivation_step_by_id(self, state_id):
uid, frozen_data, msg, state_id, parent_id, state_type, sort_order = self.d[state_id]
d_step = DerivationStep(None, uid=uid)
d_step.load_objects(frozen_data)
return d_step
def _find_branch_for(self, state_id):
for i, branch in enumerate(self.branches):
for step_id in self.iterate_branch(branch):
if step_id == state_id:
return branch
return 0
def jump_to_derivation_step_by_id(self, state_id):
self.current_step_id = state_id
if state_id in self.branch:
self.current_step_index = self.branch.index(state_id)
else:
self.current_branch_id = self._find_branch_for(state_id)
self.current_branch_index = self.branches.index(self.current_branch_id)
self.build_active_branch()
self.current_step_index = self.branch.index(state_id)
self.restore_derivation_step()
def next_derivation_step(self):
"""
:return:
"""
if self.current_step_index + 1 < len(self.branch):
self.current_step_index += 1
else:
self.current_step_index = 0
self.current_step_id = self.branch[self.current_step_index]
self.restore_derivation_step()
def previous_derivation_step(self):
"""
:return:
"""
if self.current_step_index > 0:
self.current_step_index -= 1
else:
self.current_step_index = len(self.branch) - 1
self.current_step_id = self.branch[self.current_step_index]
self.restore_derivation_step()
def jump_to_derivation_step(self, i):
"""
:return:
"""
self.current_step_index = i
self.current_step_id = self.branch[self.current_step_index]
self.restore_derivation_step()
def jump_to_first_step(self):
self.jump_to_derivation_step(0)
def jump_to_last_step(self):
self.jump_to_derivation_step(len(self.branch) - 1)
def next_parse(self):
if self.current_branch_index + 1 < len(self.branches):
self.current_branch_index += 1
else:
self.current_branch_index = 0
self.show_parse(self.current_branch_index)
def previous_parse(self):
if self.current_branch_index > 0:
self.current_branch_index -= 1
else:
self.current_branch_index = len(self.branches) - 1
self.show_parse(self.current_branch_index)
def show_parse(self, parse_index):
if self.branches:
self.current_branch_id = self.branches[parse_index]
self.build_active_branch()
self.jump_to_last_step()
ctrl.main.parse_changed.emit()
def show_first_passing_parse(self):
passing = []
for i, branch in enumerate(self.branches):
step = self.d.get(branch, None)
if step:
uid, data, msg, state_id, parent_id, state_type, sort_order = step
if state_type == DONE_SUCCESS:
passing.append((sort_order, i))
i = 0
if passing:
passing.sort()
sort_order, i = passing[0]
self.current_branch_index = i
self.show_parse(i)
# ############## #
# #
# Save support #
# #
# ############## #
forest = SavedField("forest")
d = SavedField("d")
branches = SavedField("branches")
branch = SavedField("branch")
current_step_index = SavedField("current_step_index")
current_step_id = SavedField("current_step_id")
current_branch_index = SavedField("current_branch_index")
current_branch_id = SavedField("current_branch_id")
| gpl-3.0 |
bxshi/gem5 | src/arch/x86/isa/insts/x87/arithmetic/change_sign.py | 70 | 2266 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop FABS {
absfp st(0), st(0), SetStatus=True
};
def macroop FCHS {
chsfp st(0), st(0), SetStatus=True
};
'''
| bsd-3-clause |
tedi3231/openerp | build/lib/openerp/addons/account/project/project.py | 38 | 2477 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_analytic_journal(osv.osv):
_name = 'account.analytic.journal'
_description = 'Analytic Journal'
_columns = {
'name': fields.char('Journal Name', size=64, required=True),
'code': fields.char('Journal Code', size=8),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the analytic journal without removing it."),
'type': fields.selection([('sale','Sale'), ('purchase','Purchase'), ('cash','Cash'), ('general','General'), ('situation','Situation')], 'Type', size=32, required=True, help="Gives the type of the analytic journal. When it needs for a document (eg: an invoice) to create analytic entries, OpenERP will look for a matching journal of the same type."),
'line_ids': fields.one2many('account.analytic.line', 'journal_id', 'Lines'),
'company_id': fields.many2one('res.company', 'Company', required=True),
}
_defaults = {
'active': True,
'type': 'general',
'company_id': lambda self,cr,uid,c: self.pool.get('res.users').browse(cr, uid, uid, c).company_id.id,
}
account_analytic_journal()
class account_journal(osv.osv):
_inherit="account.journal"
_columns = {
'analytic_journal_id':fields.many2one('account.analytic.journal','Analytic Journal', help="Journal for analytic entries"),
}
account_journal()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
zhanghenry/stocks | django/conf/locale/ml/formats.py | 394 | 1815 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'N j, Y'
TIME_FORMAT = 'P'
DATETIME_FORMAT = 'N j, Y, P'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'F j'
SHORT_DATE_FORMAT = 'm/d/Y'
SHORT_DATETIME_FORMAT = 'm/d/Y P'
FIRST_DAY_OF_WEEK = 0 # Sunday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = (
'%Y-%m-%d', '%m/%d/%Y', '%m/%d/%y', # '2006-10-25', '10/25/2006', '10/25/06'
# '%b %d %Y', '%b %d, %Y', # 'Oct 25 2006', 'Oct 25, 2006'
# '%d %b %Y', '%d %b, %Y', # '25 Oct 2006', '25 Oct, 2006'
# '%B %d %Y', '%B %d, %Y', # 'October 25 2006', 'October 25, 2006'
# '%d %B %Y', '%d %B, %Y', # '25 October 2006', '25 October, 2006'
)
DATETIME_INPUT_FORMATS = (
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%Y %H:%M:%S', # '10/25/2006 14:30:59'
'%m/%d/%Y %H:%M:%S.%f', # '10/25/2006 14:30:59.000200'
'%m/%d/%Y %H:%M', # '10/25/2006 14:30'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%y %H:%M:%S', # '10/25/06 14:30:59'
'%m/%d/%y %H:%M:%S.%f', # '10/25/06 14:30:59.000200'
'%m/%d/%y %H:%M', # '10/25/06 14:30'
'%m/%d/%y', # '10/25/06'
)
DECIMAL_SEPARATOR = '.'
THOUSAND_SEPARATOR = ','
NUMBER_GROUPING = 3
| bsd-3-clause |
speef/linux | scripts/rt-tester/rt-tester.py | 11005 | 5307 | #!/usr/bin/python
#
# rt-mutex tester
#
# (C) 2006 Thomas Gleixner <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
import os
import sys
import getopt
import shutil
import string
# Globals
quiet = 0
test = 0
comments = 0
sysfsprefix = "/sys/devices/system/rttest/rttest"
statusfile = "/status"
commandfile = "/command"
# Command opcodes
cmd_opcodes = {
"schedother" : "1",
"schedfifo" : "2",
"lock" : "3",
"locknowait" : "4",
"lockint" : "5",
"lockintnowait" : "6",
"lockcont" : "7",
"unlock" : "8",
"signal" : "11",
"resetevent" : "98",
"reset" : "99",
}
test_opcodes = {
"prioeq" : ["P" , "eq" , None],
"priolt" : ["P" , "lt" , None],
"priogt" : ["P" , "gt" , None],
"nprioeq" : ["N" , "eq" , None],
"npriolt" : ["N" , "lt" , None],
"npriogt" : ["N" , "gt" , None],
"unlocked" : ["M" , "eq" , 0],
"trylock" : ["M" , "eq" , 1],
"blocked" : ["M" , "eq" , 2],
"blockedwake" : ["M" , "eq" , 3],
"locked" : ["M" , "eq" , 4],
"opcodeeq" : ["O" , "eq" , None],
"opcodelt" : ["O" , "lt" , None],
"opcodegt" : ["O" , "gt" , None],
"eventeq" : ["E" , "eq" , None],
"eventlt" : ["E" , "lt" , None],
"eventgt" : ["E" , "gt" , None],
}
# Print usage information
def usage():
print "rt-tester.py <-c -h -q -t> <testfile>"
print " -c display comments after first command"
print " -h help"
print " -q quiet mode"
print " -t test mode (syntax check)"
print " testfile: read test specification from testfile"
print " otherwise from stdin"
return
# Print progress when not in quiet mode
def progress(str):
if not quiet:
print str
# Analyse a status value
def analyse(val, top, arg):
intval = int(val)
if top[0] == "M":
intval = intval / (10 ** int(arg))
intval = intval % 10
argval = top[2]
elif top[0] == "O":
argval = int(cmd_opcodes.get(arg, arg))
else:
argval = int(arg)
# progress("%d %s %d" %(intval, top[1], argval))
if top[1] == "eq" and intval == argval:
return 1
if top[1] == "lt" and intval < argval:
return 1
if top[1] == "gt" and intval > argval:
return 1
return 0
# Parse the commandline
try:
(options, arguments) = getopt.getopt(sys.argv[1:],'chqt')
except getopt.GetoptError, ex:
usage()
sys.exit(1)
# Parse commandline options
for option, value in options:
if option == "-c":
comments = 1
elif option == "-q":
quiet = 1
elif option == "-t":
test = 1
elif option == '-h':
usage()
sys.exit(0)
# Select the input source
if arguments:
try:
fd = open(arguments[0])
except Exception,ex:
sys.stderr.write("File not found %s\n" %(arguments[0]))
sys.exit(1)
else:
fd = sys.stdin
linenr = 0
# Read the test patterns
while 1:
linenr = linenr + 1
line = fd.readline()
if not len(line):
break
line = line.strip()
parts = line.split(":")
if not parts or len(parts) < 1:
continue
if len(parts[0]) == 0:
continue
if parts[0].startswith("#"):
if comments > 1:
progress(line)
continue
if comments == 1:
comments = 2
progress(line)
cmd = parts[0].strip().lower()
opc = parts[1].strip().lower()
tid = parts[2].strip()
dat = parts[3].strip()
try:
# Test or wait for a status value
if cmd == "t" or cmd == "w":
testop = test_opcodes[opc]
fname = "%s%s%s" %(sysfsprefix, tid, statusfile)
if test:
print fname
continue
while 1:
query = 1
fsta = open(fname, 'r')
status = fsta.readline().strip()
fsta.close()
stat = status.split(",")
for s in stat:
s = s.strip()
if s.startswith(testop[0]):
# Separate status value
val = s[2:].strip()
query = analyse(val, testop, dat)
break
if query or cmd == "t":
break
progress(" " + status)
if not query:
sys.stderr.write("Test failed in line %d\n" %(linenr))
sys.exit(1)
# Issue a command to the tester
elif cmd == "c":
cmdnr = cmd_opcodes[opc]
# Build command string and sys filename
cmdstr = "%s:%s" %(cmdnr, dat)
fname = "%s%s%s" %(sysfsprefix, tid, commandfile)
if test:
print fname
continue
fcmd = open(fname, 'w')
fcmd.write(cmdstr)
fcmd.close()
except Exception,ex:
sys.stderr.write(str(ex))
sys.stderr.write("\nSyntax error in line %d\n" %(linenr))
if not test:
fd.close()
sys.exit(1)
# Normal exit pass
print "Pass"
sys.exit(0)
| gpl-2.0 |
Luckyseal/wechatpy | wechatpy/client/api/device.py | 8 | 8280 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import base64
import urllib
from wechatpy.utils import to_text, to_binary
from wechatpy.client.api.base import BaseWeChatAPI
class WeChatDevice(BaseWeChatAPI):
API_BASE_URL = 'https://api.weixin.qq.com/device/'
def send_message(self, device_type, device_id, user_id, content):
"""
主动发送消息给设备
详情请参考
http://iot.weixin.qq.com/document-2_3.html
:param device_type: 设备类型,目前为“公众账号原始ID”
:param device_id: 设备ID
:param user_id: 微信用户账号的openid
:param content: 消息内容,BASE64编码
:return: 返回的 JSON 数据包
"""
content = to_text(base64.b64encode(to_binary(content)))
return self._post(
'transmsg',
data={
'device_type': device_type,
'device_id': device_id,
'openid': user_id,
'content': content
}
)
def create_qrcode(self, device_ids):
"""
获取设备二维码
详情请参考
http://iot.weixin.qq.com/document-2_5.html
:param device_ids: 设备id的列表
:return: 返回的 JSON 数据包
"""
return self._post(
'create_qrcode',
data={
'device_num': len(device_ids),
'device_id_list': device_ids
}
)
def get_qrcode_url(self, ticket, data=None):
"""
通过 ticket 换取二维码地址
详情请参考
http://iot.weixin.qq.com/document-2_5.html
:param ticket: 二维码 ticket
:param data: 额外数据
:return: 二维码地址
"""
url = 'http://we.qq.com/d/{ticket}'.format(ticket=ticket)
if data:
if isinstance(data, (dict, tuple, list)):
data = urllib.urlencode(data)
data = to_text(base64.b64encode(to_binary(data)))
url = '{base}#{data}'.format(base=url, data=data)
return url
def bind(self, ticket, device_id, user_id):
"""
绑定设备
详情请参考
http://iot.weixin.qq.com/document-2_12.html
:param ticket: 绑定操作合法性的凭证(由微信后台生成,第三方H5通过客户端jsapi获得)
:param device_id: 设备id
:param user_id: 用户对应的openid
:return: 返回的 JSON 数据包
"""
return self._post(
'bind',
data={
'ticket': ticket,
'device_id': device_id,
'openid': user_id
}
)
def unbind(self, ticket, device_id, user_id):
"""
解绑设备
详情请参考
http://iot.weixin.qq.com/document-2_12.html
:param ticket: 绑定操作合法性的凭证(由微信后台生成,第三方H5通过客户端jsapi获得)
:param device_id: 设备id
:param user_id: 用户对应的openid
:return: 返回的 JSON 数据包
"""
return self._post(
'unbind',
data={
'ticket': ticket,
'device_id': device_id,
'openid': user_id
}
)
def compel_bind(self, device_id, user_id):
"""
强制绑定用户和设备
详情请参考
http://iot.weixin.qq.com/document-2_12.html
:param device_id: 设备id
:param user_id: 用户对应的openid
:return: 返回的 JSON 数据包
"""
return self._post(
'compel_bind',
data={
'device_id': device_id,
'openid': user_id
}
)
force_bind = compel_bind
def compel_unbind(self, device_id, user_id):
"""
强制解绑用户和设备
详情请参考
http://iot.weixin.qq.com/document-2_12.html
:param device_id: 设备id
:param user_id: 用户对应的openid
:return: 返回的 JSON 数据包
"""
return self._post(
'compel_unbind',
data={
'device_id': device_id,
'openid': user_id
}
)
force_unbind = compel_unbind
def get_stat(self, device_id):
"""
设备状态查询
详情请参考
http://iot.weixin.qq.com/document-2_7.html
:param device_id: 设备id
:return: 返回的 JSON 数据包
"""
return self._post(
'get_stat',
data={'device_id': device_id}
)
def verify_qrcode(self, ticket):
"""
验证二维码
详情请参考
http://iot.weixin.qq.com/document-2_9.html
:param ticket: 设备二维码的ticket
:return: 返回的 JSON 数据包
"""
return self._post(
'verify_qrcode',
data={'ticket': ticket}
)
def get_user_id(self, device_type, device_id):
"""
获取设备绑定openID
详情请参考
http://iot.weixin.qq.com/document-2_4.html
:param device_type: 设备类型,目前为“公众账号原始ID”
:param device_id: 设备id
:return: 返回的 JSON 数据包
"""
return self._post(
'get_openid',
data={
'device_type': device_type,
'device_id': device_id
}
)
get_open_id = get_user_id
def get_binded_devices(self, user_id):
"""
通过openid获取用户在当前devicetype下绑定的deviceid列表
详情请参考
http://iot.weixin.qq.com/document-2_13.html
:param user_id: 要查询的用户的openid
:return: 返回的 JSON 数据包
"""
return self._post(
'get_bind_device',
data={'openid': user_id}
)
get_bind_device = get_binded_devices
def send_status_message(self, device_type, device_id, user_id, status):
"""
主动发送设备状态消息给微信终端
详情请参考
http://iot.weixin.qq.com/document-2_10.html
:param device_type: 设备类型,目前为“公众账号原始ID”
:param device_id: 设备ID
:param user_id: 微信用户账号的openid
:param status: 设备状态:0--未连接, 1--已连接
:return: 返回的 JSON 数据包
"""
return self._post(
'transmsg',
data={
'device_type': device_type,
'device_id': device_id,
'open_id': user_id,
'device_status': status
}
)
def authorize(self, devices, op_type=0):
"""
设备授权
详情请参考
http://iot.weixin.qq.com/document-2_6.html
:param devices: 设备信息的列表
:param op_type: 请求操作的类型,限定取值为:0:设备授权 1:设备更新
:return: 返回的 JSON 数据包
"""
return self._post(
'authorize',
data={
'device_num': len(devices),
'device_list': devices,
'op_type': op_type
}
)
def get_qrcode(self):
"""
获取deviceid和二维码
详情请参考
http://iot.weixin.qq.com/document-2_11.html
:return: 返回的 JSON 数据包
"""
return self._get('getqrcode')
def authorize_device(self, devices, op_type=1):
"""
设备授权
详情请参考
http://iot.weixin.qq.com/document-2_6.html
:param devices: 设备信息的列表
:param op_type: 请求操作的类型,限定取值为:0:设备授权 1:设备更新
:return: 返回的 JSON 数据包
"""
return self._post(
'authorize_device',
data={
'device_num': len(devices),
'device_list': devices,
'op_type': op_type
}
)
| mit |
fuhongliang/odoo | addons/document/__init__.py | 434 | 1128 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import content_index
import std_index
import document
import report
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
paulzhousz/shadowsocks | shadowsocks/crypto/rc4_md5.py | 1042 | 1339 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import hashlib
from shadowsocks.crypto import openssl
__all__ = ['ciphers']
def create_cipher(alg, key, iv, op, key_as_bytes=0, d=None, salt=None,
i=1, padding=1):
md5 = hashlib.md5()
md5.update(key)
md5.update(iv)
rc4_key = md5.digest()
return openssl.OpenSSLCrypto(b'rc4', rc4_key, b'', op)
ciphers = {
'rc4-md5': (16, 16, create_cipher),
}
def test():
from shadowsocks.crypto import util
cipher = create_cipher('rc4-md5', b'k' * 32, b'i' * 16, 1)
decipher = create_cipher('rc4-md5', b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
if __name__ == '__main__':
test()
| apache-2.0 |
BhallaLab/moose | moose-examples/paper-2015/Fig2_elecModels/Fig2D.py | 2 | 3872 | #########################################################################
## This program is part of 'MOOSE', the
## Messaging Object Oriented Simulation Environment.
## Copyright (C) 2015 Upinder S. Bhalla. and NCBS
## It is made available under the terms of the
## GNU Lesser General Public License version 2.1
## See the file COPYING.LIB for the full notice.
#########################################################################
# This example illustrates loading a model from an SWC file, inserting
# spines, and viewing it.
try:
import moogli
except Exception as e:
print( "[INFO ] Could not import moogli. Quitting..." )
quit()
import moose
from PyQt4 import Qt, QtCore, QtGui
import sys
import os
import rdesigneur as rd
PI = 3.14159265358979
frameRunTime = 0.0001
runtime = 0.1
inject = 15e-10
simdt = 5e-5
RM = 1.0
RA = 1.0
CM = 0.01
# This is the expression used to set spine spacing:
spineSpacing = "dia * 2"
minSpacing = 0.1e-6
spineSize = 1.0
spineSizeDistrib = 0.5
spineAngle = 0
spineAngleDistrib = 2*PI
def create_vm_viewer(rdes):
network = moogli.extensions.moose.read(rdes.elecid.path,
vertices=10)
normalizer = moogli.utilities.normalizer(-0.08,
0.02,
clipleft=True,
clipright=True)
colormap = moogli.colors.UniformColorMap([moogli.colors.Color(0.0,
0.5,
1.0,
1.0),
moogli.colors.Color(1.0,
0.0,
0.0,
0.9)])
mapper = moogli.utilities.mapper(colormap, normalizer)
vms = [moose.element(x).Vm for x in list(network.shapes.keys())]
network.set("color", vms, mapper)
def interlude(view):
moose.start(frameRunTime)
#vms = [moose.element(x).Vm for x in network.shapes.keys()]
#network.set("color", vms, mapper)
view.pitch(0.01)
currTime = moose.element('/clock').currentTime
if currTime >= runtime:
view.stop()
viewer = moogli.Viewer("vm-viewer")
viewer.attach_shapes(list(network.shapes.values()))
view = moogli.View("vm-view",
interlude=interlude)
viewer.attach_view(view)
return viewer
def main():
######## Put your favourite cell model here ######
##This one is from PMID 19146814: Peng et al Neuron 2009
filename = 'cells/K-18.CNG.swc'
moose.Neutral( '/library' )
rdes = rd.rdesigneur( \
cellProto = [[ filename, 'elec' ] ],\
spineProto = [['makeSpineProto()', 'spine' ]] ,\
spineDistrib = [ \
['spine', '#', \
'spacing', spineSpacing, \
'spacingDistrib', str( minSpacing ), \
'angle', str( spineAngle ), \
'angleDistrib', str( spineAngleDistrib ), \
'size', str( spineSize ), \
'sizeDistrib', str( spineSizeDistrib ) ] \
] \
)
rdes.buildModel('/model')
moose.reinit()
compts = moose.wildcardFind( "/model/elec/#[ISA=CompartmentBase]" )
compts[0].inject = inject
################## Now we set up the display ########################
print("Setting Up 3D Display")
app = QtGui.QApplication(sys.argv)
vm_viewer = create_vm_viewer(rdes)
vm_viewer.showMaximized()
vm_viewer.start()
return app.exec_()
if __name__ == '__main__':
main()
| gpl-3.0 |
AlphaCluster/NewsBlur | apps/rss_feeds/migrations/0039_feedicon.py | 18 | 6742 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'FeedIcon'
db.create_table('rss_feeds_feedicon', (
('feed', self.gf('utils.fields.AutoOneToOneField')(related_name='icon', unique=True, primary_key=True, to=orm['rss_feeds.Feed'])),
('color', self.gf('django.db.models.fields.CharField')(max_length=6, null=True, blank=True)),
('data', self.gf('django.db.models.fields.TextField')()),
('icon_url', self.gf('django.db.models.fields.CharField')(max_length=2000, null=True, blank=True)),
('not_found', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal('rss_feeds', ['FeedIcon'])
def backwards(self, orm):
# Deleting model 'FeedIcon'
db.delete_table('rss_feeds_feedicon')
models = {
'rss_feeds.duplicatefeed': {
'Meta': {'object_name': 'DuplicateFeed'},
'duplicate_address': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'duplicate_feed_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'duplicate_addresses'", 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rss_feeds.feed': {
'Meta': {'ordering': "['feed_title']", 'object_name': 'Feed', 'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'active_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'db_index': 'True'}),
'average_stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'exception_code': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'feed_address': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fetched_once': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_feed_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'has_page_exception': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'premium_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'queued_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'stories_last_month': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'rss_feeds.feeddata': {
'Meta': {'object_name': 'FeedData'},
'feed': ('utils.fields.AutoOneToOneField', [], {'related_name': "'data'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'feed_tagline': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'popular_authors': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'popular_tags': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'story_count_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedicon': {
'Meta': {'object_name': 'FeedIcon'},
'color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {}),
'feed': ('utils.fields.AutoOneToOneField', [], {'related_name': "'icon'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['rss_feeds.Feed']"}),
'icon_url': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'not_found': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'rss_feeds.feedloadtime': {
'Meta': {'object_name': 'FeedLoadtime'},
'date_accessed': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'loadtime': ('django.db.models.fields.FloatField', [], {})
},
'rss_feeds.feedupdatehistory': {
'Meta': {'object_name': 'FeedUpdateHistory'},
'average_per_feed': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '1'}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number_of_feeds': ('django.db.models.fields.IntegerField', [], {}),
'seconds_taken': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['rss_feeds']
| mit |
ebaskoro/node-gyp | gyp/test/actions/gyptest-default.py | 243 | 2407 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies simple actions when using the default build target.
"""
import TestGyp
test = TestGyp.TestGyp(workdir='workarea_default')
test.run_gyp('actions.gyp', chdir='src')
test.relocate('src', 'relocate/src')
# Some gyp files use an action that mentions an output but never
# writes it as a means to making the action run on every build. That
# doesn't mesh well with ninja's semantics. TODO(evan): figure out
# how to work always-run actions in to ninja.
# Android also can't do this as it doesn't have order-only dependencies.
if test.format in ['ninja', 'android']:
test.build('actions.gyp', test.ALL, chdir='relocate/src')
else:
# Test that an "always run" action increases a counter on multiple
# invocations, and that a dependent action updates in step.
test.build('actions.gyp', chdir='relocate/src')
test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '1')
test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '1')
test.build('actions.gyp', chdir='relocate/src')
test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '2')
test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '2')
# The "always run" action only counts to 2, but the dependent target
# will count forever if it's allowed to run. This verifies that the
# dependent target only runs when the "always run" action generates
# new output, not just because the "always run" ran.
test.build('actions.gyp', test.ALL, chdir='relocate/src')
test.must_match('relocate/src/subdir1/actions-out/action-counter.txt', '2')
test.must_match('relocate/src/subdir1/actions-out/action-counter_2.txt', '2')
expect = """\
Hello from program.c
Hello from make-prog1.py
Hello from make-prog2.py
"""
if test.format == 'xcode':
chdir = 'relocate/src/subdir1'
else:
chdir = 'relocate/src'
test.run_built_executable('program', chdir=chdir, stdout=expect)
test.must_match('relocate/src/subdir2/file.out', "Hello from make-file.py\n")
expect = "Hello from generate_main.py\n"
if test.format == 'xcode':
chdir = 'relocate/src/subdir3'
else:
chdir = 'relocate/src'
test.run_built_executable('null_input', chdir=chdir, stdout=expect)
test.pass_test()
| mit |
pudo/aleph | aleph/migrate/versions/666668eae682_refactor_alerts.py | 5 | 1856 | """refactor alerts
Revision ID: 666668eae682
Revises: 8526f853643a
Create Date: 2016-05-05 16:46:05.656646
"""
from datetime import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '666668eae682'
down_revision = '8526f853643a'
def upgrade():
op.add_column('alert', sa.Column('entity_id', sa.String(length=32),
nullable=True))
op.add_column('alert', sa.Column('query_text', sa.Unicode(), nullable=True))
op.create_foreign_key(None, 'alert', 'entity', ['entity_id'], ['id'])
bind = op.get_bind()
meta = sa.MetaData()
meta.bind = bind
meta.reflect()
alert_table = meta.tables['alert']
rp = bind.execute(sa.select([alert_table]))
for alert in rp.fetchall():
deleted_at = alert.deleted_at
query_text = alert.query.get('q', [None])[0]
entity_id = alert.query.get('entity', [None])[0]
if entity_id is not None and len(entity_id) < 30:
entity_id = None
if entity_id is None and query_text is None:
deleted_at = datetime.utcnow()
q = sa.update(alert_table).where(alert_table.c.id == alert.id)
q = q.values(query_text=query_text,
entity_id=entity_id,
deleted_at=deleted_at)
bind.execute(q)
op.drop_column('alert', 'query')
op.drop_column('alert', 'signature')
def downgrade():
op.add_column('alert', sa.Column('signature', sa.VARCHAR(),
autoincrement=False, nullable=True))
op.add_column('alert', sa.Column('query', postgresql.JSONB(),
autoincrement=False, nullable=True))
op.drop_constraint(None, 'alert', type_='foreignkey')
op.drop_column('alert', 'query_text')
op.drop_column('alert', 'entity_id')
| mit |
Tillsten/PyTournament | src/TourneyModes_v2.py | 1 | 3600 | '''
Created on 26.01.2010
@author: Tillsten
'''
from TeamsAndPlayers import *
import random
class Round(object):
def __init__(self, left_teams, freedraw):
self.games_open = []
self.games_finnished = []
self.games_in_progress = []
self.games = []
while left_teams != []:
a = left_teams.pop(0)
b = left_teams.pop()
g = Game(a, b)
self.games.append(g)
if b != freedraw:
self.games_open.append(g)
else:
g.insert_result([(7, 3)])
self.games_finnished.append(g)
#print self.games
class Single_ko(object):
def __init__(self, players_per_team = 1):
self.participants = []
self.players_per_team = players_per_team
self.rounds = []
self.games_open = []
self.games_finnished = []
self.games_in_progress = []
self.finnished = False
def add_team(self, player):
if self.players_per_team == 1:
self.participants.append(Single_team(player))
if self.players_per_team == 2:
self.participants.append(Double_team(player[0], player[1]))
def start(self):
self.freedraw = Single_team(Player("Freilos", "", 0))
n = 1
random.shuffle(self.participants)
while len(self.participants) > 2 ** n:
n += 1
for i in range(len(self.participants), 2 ** n):
self.participants.append(self.freedraw)
self.build_gametree(n)
def build_gametree(self, n):
self.games = []
left_teams = self.participants
first_round = []
while left_teams != []:
a = left_teams.pop(0)
b = left_teams.pop()
g = Game(a, b)
self.games.append(g)
first_round.append(g)
if b != self.freedraw:
self.games_open.append(g)
else:
g.insert_result([(7, 3)])
self.games_finnished.append(g)
while len(last_round) < 4:
left_teams = [g.winner for g in last_round]
while left_teams != []:
a = left_teams.pop(0)
b = left_teams.pop()
g = Game(a, b)
self.games.append(g)
for i in self.games: print i
if __name__ == '__main__':
import psyco
psyco.full()
anton = Single_team(Player("Anton", "A.", 1))
bart = Single_team(Player("Bart", "B.", 2))
caro = Single_team(Player("Caro", "C.", 3))
dieter = Single_team(Player("Dieter", "D.", 4))
edwin = Single_team(Player("Edwin", "E.", 5))
fi = Single_team(Player("Fieter", "F.", 6))
sko = Single_ko()
sko.add_team(anton)
sko.add_team(bart)
sko.add_team(caro)
sko.add_team(dieter)
sko.add_team(edwin)
sko.add_team(fi)
sko.start()
# sko.start_open_game()
# sko.insert_result([(3, 8)], 0)
# sko.start_open_game()
# sko.insert_result([(8, 3)], 0)
# sko.start_open_game()
# sko.start_open_game()
# sko.insert_result([(3, 4), (5, 9)])
# sko.insert_result([(3, 4), (5, 9)])
# sko.start_open_game()
# sko.start_open_game()
# sko.insert_result([(3, 4), (5, 9)])
# sko.insert_result([(3, 4), (5, 9)])
# print "rounds"
# for i in sko.rounds:
# for j in (i.games):
# print j
# sko.rankings()
| gpl-3.0 |
orchidinfosys/odoo | addons/website/tests/test_crawl.py | 54 | 3414 | # -*- coding: utf-8 -*-
import logging
import urlparse
import time
import lxml.html
import openerp
import re
_logger = logging.getLogger(__name__)
class Crawler(openerp.tests.HttpCase):
""" Test suite crawling an openerp CMS instance and checking that all
internal links lead to a 200 response.
If a username and a password are provided, authenticates the user before
starting the crawl
"""
at_install = False
post_install = True
def crawl(self, url, seen=None, msg=''):
if seen == None:
seen = set()
url_slug = re.sub(r"[/](([^/=?&]+-)?[0-9]+)([/]|$)", '/<slug>/', url)
url_slug = re.sub(r"([^/=?&]+)=[^/=?&]+", '\g<1>=param', url_slug)
if url_slug in seen:
return seen
else:
seen.add(url_slug)
_logger.info("%s %s", msg, url)
r = self.url_open(url)
code = r.getcode()
self.assertIn( code, xrange(200, 300), "%s Fetching %s returned error response (%d)" % (msg, url, code))
if r.info().gettype() == 'text/html':
doc = lxml.html.fromstring(r.read())
for link in doc.xpath('//a[@href]'):
href = link.get('href')
parts = urlparse.urlsplit(href)
# href with any fragment removed
href = urlparse.urlunsplit((
parts.scheme,
parts.netloc,
parts.path,
parts.query,
''
))
# FIXME: handle relative link (not parts.path.startswith /)
if parts.netloc or \
not parts.path.startswith('/') or \
parts.path == '/web' or\
parts.path.startswith('/web/') or \
parts.path.startswith('/en_US/') or \
(parts.scheme and parts.scheme not in ('http', 'https')):
continue
self.crawl(href, seen, msg)
return seen
def test_10_crawl_public(self):
t0 = time.time()
t0_sql = self.registry.test_cr.sql_log_count
seen = self.crawl('/', msg='Anonymous Coward')
count = len(seen)
duration = time.time() - t0
sql = self.registry.test_cr.sql_log_count - t0_sql
_logger.log(25, "public crawled %s urls in %.2fs %s queries, %.3fs %.2fq per request, ", count, duration, sql, duration/count, float(sql)/count)
def test_20_crawl_demo(self):
t0 = time.time()
t0_sql = self.registry.test_cr.sql_log_count
self.authenticate('demo', 'demo')
seen = self.crawl('/', msg='demo')
count = len(seen)
duration = time.time() - t0
sql = self.registry.test_cr.sql_log_count - t0_sql
_logger.log(25, "demo crawled %s urls in %.2fs %s queries, %.3fs %.2fq per request", count, duration, sql, duration/count, float(sql)/count)
def test_30_crawl_admin(self):
t0 = time.time()
t0_sql = self.registry.test_cr.sql_log_count
self.authenticate('admin', 'admin')
seen = self.crawl('/', msg='admin')
count = len(seen)
duration = time.time() - t0
sql = self.registry.test_cr.sql_log_count - t0_sql
_logger.log(25, "admin crawled %s urls in %.2fs %s queries, %.3fs %.2fq per request", count, duration, sql, duration/count, float(sql)/count)
| gpl-3.0 |
flyfei/python-for-android | python-build/python-libs/gdata/build/lib/gdata/tlslite/utils/keyfactory.py | 361 | 8791 | """Factory functions for asymmetric cryptography.
@sort: generateRSAKey, parseXMLKey, parsePEMKey, parseAsPublicKey,
parseAsPrivateKey
"""
from compat import *
from RSAKey import RSAKey
from Python_RSAKey import Python_RSAKey
import cryptomath
if cryptomath.m2cryptoLoaded:
from OpenSSL_RSAKey import OpenSSL_RSAKey
if cryptomath.pycryptoLoaded:
from PyCrypto_RSAKey import PyCrypto_RSAKey
# **************************************************************************
# Factory Functions for RSA Keys
# **************************************************************************
def generateRSAKey(bits, implementations=["openssl", "python"]):
"""Generate an RSA key with the specified bit length.
@type bits: int
@param bits: Desired bit length of the new key's modulus.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: A new RSA private key.
"""
for implementation in implementations:
if implementation == "openssl" and cryptomath.m2cryptoLoaded:
return OpenSSL_RSAKey.generate(bits)
elif implementation == "python":
return Python_RSAKey.generate(bits)
raise ValueError("No acceptable implementations")
def parseXMLKey(s, private=False, public=False, implementations=["python"]):
"""Parse an XML-format key.
The XML format used here is specific to tlslite and cryptoIDlib. The
format can store the public component of a key, or the public and
private components. For example::
<publicKey xmlns="http://trevp.net/rsa">
<n>4a5yzB8oGNlHo866CAspAC47M4Fvx58zwK8pou...
<e>Aw==</e>
</publicKey>
<privateKey xmlns="http://trevp.net/rsa">
<n>4a5yzB8oGNlHo866CAspAC47M4Fvx58zwK8pou...
<e>Aw==</e>
<d>JZ0TIgUxWXmL8KJ0VqyG1V0J3ern9pqIoB0xmy...
<p>5PreIj6z6ldIGL1V4+1C36dQFHNCQHJvW52GXc...
<q>/E/wDit8YXPCxx126zTq2ilQ3IcW54NJYyNjiZ...
<dP>mKc+wX8inDowEH45Qp4slRo1YveBgExKPROu6...
<dQ>qDVKtBz9lk0shL5PR3ickXDgkwS576zbl2ztB...
<qInv>j6E8EA7dNsTImaXexAmLA1DoeArsYeFAInr...
</privateKey>
@type s: str
@param s: A string containing an XML public or private key.
@type private: bool
@param private: If True, a L{SyntaxError} will be raised if the private
key component is not present.
@type public: bool
@param public: If True, the private key component (if present) will be
discarded, so this function will always return a public key.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA key.
@raise SyntaxError: If the key is not properly formatted.
"""
for implementation in implementations:
if implementation == "python":
key = Python_RSAKey.parseXML(s)
break
else:
raise ValueError("No acceptable implementations")
return _parseKeyHelper(key, private, public)
#Parse as an OpenSSL or Python key
def parsePEMKey(s, private=False, public=False, passwordCallback=None,
implementations=["openssl", "python"]):
"""Parse a PEM-format key.
The PEM format is used by OpenSSL and other tools. The
format is typically used to store both the public and private
components of a key. For example::
-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQDYscuoMzsGmW0pAYsmyHltxB2TdwHS0dImfjCMfaSDkfLdZY5+
dOWORVns9etWnr194mSGA1F0Pls/VJW8+cX9+3vtJV8zSdANPYUoQf0TP7VlJxkH
dSRkUbEoz5bAAs/+970uos7n7iXQIni+3erUTdYEk2iWnMBjTljfgbK/dQIDAQAB
AoGAJHoJZk75aKr7DSQNYIHuruOMdv5ZeDuJvKERWxTrVJqE32/xBKh42/IgqRrc
esBN9ZregRCd7YtxoL+EVUNWaJNVx2mNmezEznrc9zhcYUrgeaVdFO2yBF1889zO
gCOVwrO8uDgeyj6IKa25H6c1N13ih/o7ZzEgWbGG+ylU1yECQQDv4ZSJ4EjSh/Fl
aHdz3wbBa/HKGTjC8iRy476Cyg2Fm8MZUe9Yy3udOrb5ZnS2MTpIXt5AF3h2TfYV
VoFXIorjAkEA50FcJmzT8sNMrPaV8vn+9W2Lu4U7C+K/O2g1iXMaZms5PC5zV5aV
CKXZWUX1fq2RaOzlbQrpgiolhXpeh8FjxwJBAOFHzSQfSsTNfttp3KUpU0LbiVvv
i+spVSnA0O4rq79KpVNmK44Mq67hsW1P11QzrzTAQ6GVaUBRv0YS061td1kCQHnP
wtN2tboFR6lABkJDjxoGRvlSt4SOPr7zKGgrWjeiuTZLHXSAnCY+/hr5L9Q3ZwXG
6x6iBdgLjVIe4BZQNtcCQQDXGv/gWinCNTN3MPWfTW/RGzuMYVmyBFais0/VrgdH
h1dLpztmpQqfyH/zrBXQ9qL/zR4ojS6XYneO/U18WpEe
-----END RSA PRIVATE KEY-----
To generate a key like this with OpenSSL, run::
openssl genrsa 2048 > key.pem
This format also supports password-encrypted private keys. TLS
Lite can only handle password-encrypted private keys when OpenSSL
and M2Crypto are installed. In this case, passwordCallback will be
invoked to query the user for the password.
@type s: str
@param s: A string containing a PEM-encoded public or private key.
@type private: bool
@param private: If True, a L{SyntaxError} will be raised if the
private key component is not present.
@type public: bool
@param public: If True, the private key component (if present) will
be discarded, so this function will always return a public key.
@type passwordCallback: callable
@param passwordCallback: This function will be called, with no
arguments, if the PEM-encoded private key is password-encrypted.
The callback should return the password string. If the password is
incorrect, SyntaxError will be raised. If no callback is passed
and the key is password-encrypted, a prompt will be displayed at
the console.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA key.
@raise SyntaxError: If the key is not properly formatted.
"""
for implementation in implementations:
if implementation == "openssl" and cryptomath.m2cryptoLoaded:
key = OpenSSL_RSAKey.parse(s, passwordCallback)
break
elif implementation == "python":
key = Python_RSAKey.parsePEM(s)
break
else:
raise ValueError("No acceptable implementations")
return _parseKeyHelper(key, private, public)
def _parseKeyHelper(key, private, public):
if private:
if not key.hasPrivateKey():
raise SyntaxError("Not a private key!")
if public:
return _createPublicKey(key)
if private:
if hasattr(key, "d"):
return _createPrivateKey(key)
else:
return key
return key
def parseAsPublicKey(s):
"""Parse an XML or PEM-formatted public key.
@type s: str
@param s: A string containing an XML or PEM-encoded public or private key.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA public key.
@raise SyntaxError: If the key is not properly formatted.
"""
try:
return parsePEMKey(s, public=True)
except:
return parseXMLKey(s, public=True)
def parsePrivateKey(s):
"""Parse an XML or PEM-formatted private key.
@type s: str
@param s: A string containing an XML or PEM-encoded private key.
@rtype: L{tlslite.utils.RSAKey.RSAKey}
@return: An RSA private key.
@raise SyntaxError: If the key is not properly formatted.
"""
try:
return parsePEMKey(s, private=True)
except:
return parseXMLKey(s, private=True)
def _createPublicKey(key):
"""
Create a new public key. Discard any private component,
and return the most efficient key possible.
"""
if not isinstance(key, RSAKey):
raise AssertionError()
return _createPublicRSAKey(key.n, key.e)
def _createPrivateKey(key):
"""
Create a new private key. Return the most efficient key possible.
"""
if not isinstance(key, RSAKey):
raise AssertionError()
if not key.hasPrivateKey():
raise AssertionError()
return _createPrivateRSAKey(key.n, key.e, key.d, key.p, key.q, key.dP,
key.dQ, key.qInv)
def _createPublicRSAKey(n, e, implementations = ["openssl", "pycrypto",
"python"]):
for implementation in implementations:
if implementation == "openssl" and cryptomath.m2cryptoLoaded:
return OpenSSL_RSAKey(n, e)
elif implementation == "pycrypto" and cryptomath.pycryptoLoaded:
return PyCrypto_RSAKey(n, e)
elif implementation == "python":
return Python_RSAKey(n, e)
raise ValueError("No acceptable implementations")
def _createPrivateRSAKey(n, e, d, p, q, dP, dQ, qInv,
implementations = ["pycrypto", "python"]):
for implementation in implementations:
if implementation == "pycrypto" and cryptomath.pycryptoLoaded:
return PyCrypto_RSAKey(n, e, d, p, q, dP, dQ, qInv)
elif implementation == "python":
return Python_RSAKey(n, e, d, p, q, dP, dQ, qInv)
raise ValueError("No acceptable implementations")
| apache-2.0 |
thonkify/thonkify | src/lib/unidecode/x06e.py | 252 | 4640 | data = (
'Ben ', # 0x00
'Yuan ', # 0x01
'Wen ', # 0x02
'Re ', # 0x03
'Fei ', # 0x04
'Qing ', # 0x05
'Yuan ', # 0x06
'Ke ', # 0x07
'Ji ', # 0x08
'She ', # 0x09
'Yuan ', # 0x0a
'Shibui ', # 0x0b
'Lu ', # 0x0c
'Zi ', # 0x0d
'Du ', # 0x0e
'[?] ', # 0x0f
'Jian ', # 0x10
'Min ', # 0x11
'Pi ', # 0x12
'Tani ', # 0x13
'Yu ', # 0x14
'Yuan ', # 0x15
'Shen ', # 0x16
'Shen ', # 0x17
'Rou ', # 0x18
'Huan ', # 0x19
'Zhu ', # 0x1a
'Jian ', # 0x1b
'Nuan ', # 0x1c
'Yu ', # 0x1d
'Qiu ', # 0x1e
'Ting ', # 0x1f
'Qu ', # 0x20
'Du ', # 0x21
'Feng ', # 0x22
'Zha ', # 0x23
'Bo ', # 0x24
'Wo ', # 0x25
'Wo ', # 0x26
'Di ', # 0x27
'Wei ', # 0x28
'Wen ', # 0x29
'Ru ', # 0x2a
'Xie ', # 0x2b
'Ce ', # 0x2c
'Wei ', # 0x2d
'Ge ', # 0x2e
'Gang ', # 0x2f
'Yan ', # 0x30
'Hong ', # 0x31
'Xuan ', # 0x32
'Mi ', # 0x33
'Ke ', # 0x34
'Mao ', # 0x35
'Ying ', # 0x36
'Yan ', # 0x37
'You ', # 0x38
'Hong ', # 0x39
'Miao ', # 0x3a
'Xing ', # 0x3b
'Mei ', # 0x3c
'Zai ', # 0x3d
'Hun ', # 0x3e
'Nai ', # 0x3f
'Kui ', # 0x40
'Shi ', # 0x41
'E ', # 0x42
'Pai ', # 0x43
'Mei ', # 0x44
'Lian ', # 0x45
'Qi ', # 0x46
'Qi ', # 0x47
'Mei ', # 0x48
'Tian ', # 0x49
'Cou ', # 0x4a
'Wei ', # 0x4b
'Can ', # 0x4c
'Tuan ', # 0x4d
'Mian ', # 0x4e
'Hui ', # 0x4f
'Mo ', # 0x50
'Xu ', # 0x51
'Ji ', # 0x52
'Pen ', # 0x53
'Jian ', # 0x54
'Jian ', # 0x55
'Hu ', # 0x56
'Feng ', # 0x57
'Xiang ', # 0x58
'Yi ', # 0x59
'Yin ', # 0x5a
'Zhan ', # 0x5b
'Shi ', # 0x5c
'Jie ', # 0x5d
'Cheng ', # 0x5e
'Huang ', # 0x5f
'Tan ', # 0x60
'Yu ', # 0x61
'Bi ', # 0x62
'Min ', # 0x63
'Shi ', # 0x64
'Tu ', # 0x65
'Sheng ', # 0x66
'Yong ', # 0x67
'Qu ', # 0x68
'Zhong ', # 0x69
'Suei ', # 0x6a
'Jiu ', # 0x6b
'Jiao ', # 0x6c
'Qiou ', # 0x6d
'Yin ', # 0x6e
'Tang ', # 0x6f
'Long ', # 0x70
'Huo ', # 0x71
'Yuan ', # 0x72
'Nan ', # 0x73
'Ban ', # 0x74
'You ', # 0x75
'Quan ', # 0x76
'Chui ', # 0x77
'Liang ', # 0x78
'Chan ', # 0x79
'Yan ', # 0x7a
'Chun ', # 0x7b
'Nie ', # 0x7c
'Zi ', # 0x7d
'Wan ', # 0x7e
'Shi ', # 0x7f
'Man ', # 0x80
'Ying ', # 0x81
'Ratsu ', # 0x82
'Kui ', # 0x83
'[?] ', # 0x84
'Jian ', # 0x85
'Xu ', # 0x86
'Lu ', # 0x87
'Gui ', # 0x88
'Gai ', # 0x89
'[?] ', # 0x8a
'[?] ', # 0x8b
'Po ', # 0x8c
'Jin ', # 0x8d
'Gui ', # 0x8e
'Tang ', # 0x8f
'Yuan ', # 0x90
'Suo ', # 0x91
'Yuan ', # 0x92
'Lian ', # 0x93
'Yao ', # 0x94
'Meng ', # 0x95
'Zhun ', # 0x96
'Sheng ', # 0x97
'Ke ', # 0x98
'Tai ', # 0x99
'Da ', # 0x9a
'Wa ', # 0x9b
'Liu ', # 0x9c
'Gou ', # 0x9d
'Sao ', # 0x9e
'Ming ', # 0x9f
'Zha ', # 0xa0
'Shi ', # 0xa1
'Yi ', # 0xa2
'Lun ', # 0xa3
'Ma ', # 0xa4
'Pu ', # 0xa5
'Wei ', # 0xa6
'Li ', # 0xa7
'Cai ', # 0xa8
'Wu ', # 0xa9
'Xi ', # 0xaa
'Wen ', # 0xab
'Qiang ', # 0xac
'Ze ', # 0xad
'Shi ', # 0xae
'Su ', # 0xaf
'Yi ', # 0xb0
'Zhen ', # 0xb1
'Sou ', # 0xb2
'Yun ', # 0xb3
'Xiu ', # 0xb4
'Yin ', # 0xb5
'Rong ', # 0xb6
'Hun ', # 0xb7
'Su ', # 0xb8
'Su ', # 0xb9
'Ni ', # 0xba
'Ta ', # 0xbb
'Shi ', # 0xbc
'Ru ', # 0xbd
'Wei ', # 0xbe
'Pan ', # 0xbf
'Chu ', # 0xc0
'Chu ', # 0xc1
'Pang ', # 0xc2
'Weng ', # 0xc3
'Cang ', # 0xc4
'Mie ', # 0xc5
'He ', # 0xc6
'Dian ', # 0xc7
'Hao ', # 0xc8
'Huang ', # 0xc9
'Xi ', # 0xca
'Zi ', # 0xcb
'Di ', # 0xcc
'Zhi ', # 0xcd
'Ying ', # 0xce
'Fu ', # 0xcf
'Jie ', # 0xd0
'Hua ', # 0xd1
'Ge ', # 0xd2
'Zi ', # 0xd3
'Tao ', # 0xd4
'Teng ', # 0xd5
'Sui ', # 0xd6
'Bi ', # 0xd7
'Jiao ', # 0xd8
'Hui ', # 0xd9
'Gun ', # 0xda
'Yin ', # 0xdb
'Gao ', # 0xdc
'Long ', # 0xdd
'Zhi ', # 0xde
'Yan ', # 0xdf
'She ', # 0xe0
'Man ', # 0xe1
'Ying ', # 0xe2
'Chun ', # 0xe3
'Lu ', # 0xe4
'Lan ', # 0xe5
'Luan ', # 0xe6
'[?] ', # 0xe7
'Bin ', # 0xe8
'Tan ', # 0xe9
'Yu ', # 0xea
'Sou ', # 0xeb
'Hu ', # 0xec
'Bi ', # 0xed
'Biao ', # 0xee
'Zhi ', # 0xef
'Jiang ', # 0xf0
'Kou ', # 0xf1
'Shen ', # 0xf2
'Shang ', # 0xf3
'Di ', # 0xf4
'Mi ', # 0xf5
'Ao ', # 0xf6
'Lu ', # 0xf7
'Hu ', # 0xf8
'Hu ', # 0xf9
'You ', # 0xfa
'Chan ', # 0xfb
'Fan ', # 0xfc
'Yong ', # 0xfd
'Gun ', # 0xfe
'Man ', # 0xff
)
| mit |
jymannob/CouchPotatoServer | libs/requests/packages/urllib3/util/request.py | 304 | 1924 | from base64 import b64encode
from ..packages import six
ACCEPT_ENCODING = 'gzip,deflate'
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
Example: ::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(six.b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(six.b(proxy_basic_auth)).decode('utf-8')
return headers
| gpl-3.0 |
midonet/kuryr | doc/source/conf.py | 2 | 2475 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
'oslosphinx',
'reno.sphinxext'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'kuryr'
copyright = u'2013, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
| apache-2.0 |
atty303/pyfilesystem | fs/httpfs.py | 1 | 1659 | """
fs.httpfs
=========
"""
from fs.base import FS
from fs.path import normpath
from fs.errors import ResourceNotFoundError, UnsupportedError
from urlparse import urlparse
from urllib2 import urlopen, URLError
class HTTPFS(FS):
"""Can barely be called a filesystem, but this enables the opener system
to open http files"""
def __init__(self, url):
self.root_url = url
def _make_url(self, path):
path = normpath(path)
url = '%s/%s' % (self.root_url.rstrip('/'), path.lstrip('/'))
return url
def open(self, path, mode="r"):
if '+' in mode or 'w' in mode or 'a' in mode:
raise UnsupportedError('write')
url = self._make_url(path)
try:
f = urlopen(url)
except URLError, e:
raise ResourceNotFoundError(path)
except OSError, e:
raise ResourceNotFoundError(path)
return f
def exists(self, path):
return self.isfile(path)
def isdir(self, path):
return False
def isfile(self, path):
url = self._make_url(path)
f = None
try:
try:
f = urlopen(url)
except (URLError, OSError):
return False
finally:
if f is not None:
f.close()
return True
def listdir(self, path="./",
wildcard=None,
full=False,
absolute=False,
dirs_only=False,
files_only=False):
return []
| bsd-3-clause |
maleficarium/youtube-dl | youtube_dl/extractor/rtvnh.py | 17 | 2265 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import ExtractorError
class RTVNHIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?rtvnh\.nl/video/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.rtvnh.nl/video/131946',
'md5': 'cdbec9f44550763c8afc96050fa747dc',
'info_dict': {
'id': '131946',
'ext': 'mp4',
'title': 'Grote zoektocht in zee bij Zandvoort naar vermiste vrouw',
'thumbnail': 're:^https?:.*\.jpg$'
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
meta = self._parse_json(self._download_webpage(
'http://www.rtvnh.nl/video/json?m=' + video_id, video_id), video_id)
status = meta.get('status')
if status != 200:
raise ExtractorError(
'%s returned error code %d' % (self.IE_NAME, status), expected=True)
formats = []
rtmp_formats = self._extract_smil_formats(
'http://www.rtvnh.nl/video/smil?m=' + video_id, video_id)
formats.extend(rtmp_formats)
for rtmp_format in rtmp_formats:
rtmp_url = '%s/%s' % (rtmp_format['url'], rtmp_format['play_path'])
rtsp_format = rtmp_format.copy()
del rtsp_format['play_path']
del rtsp_format['ext']
rtsp_format.update({
'format_id': rtmp_format['format_id'].replace('rtmp', 'rtsp'),
'url': rtmp_url.replace('rtmp://', 'rtsp://'),
'protocol': 'rtsp',
})
formats.append(rtsp_format)
http_base_url = rtmp_url.replace('rtmp://', 'http://')
formats.extend(self._extract_m3u8_formats(
http_base_url + '/playlist.m3u8', video_id, 'mp4',
'm3u8_native', m3u8_id='hls', fatal=False))
formats.extend(self._extract_f4m_formats(
http_base_url + '/manifest.f4m',
video_id, f4m_id='hds', fatal=False))
self._sort_formats(formats)
return {
'id': video_id,
'title': meta['title'].strip(),
'thumbnail': meta.get('image'),
'formats': formats
}
| unlicense |
south-coast-science/scs_mfr | src/scs_mfr/cmd/cmd_csv_logger_conf.py | 1 | 3307 | """
Created on 18 Apr 2018
@author: Bruno Beloff ([email protected])
"""
import optparse
# --------------------------------------------------------------------------------------------------------------------
class CmdCSVLoggerConf(object):
"""unix command line handler"""
def __init__(self):
"""
Constructor
"""
self.__parser = optparse.OptionParser(usage="%prog { [-r ROOT_PATH] [-o DELETE_OLDEST] [-i WRITE_INTERVAL] | "
"-d } [-v]", version="%prog 1.0")
# optional...
self.__parser.add_option("--root", "-r", type="string", nargs=1, action="store", dest="root_path",
help="set filesystem logging directory")
self.__parser.add_option("--del-oldest", "-o", type="int", nargs=1, action="store", dest="delete_oldest",
help="delete oldest logs to recover space (1) or stop when full (0)")
self.__parser.add_option("--write-int", "-i", type="int", nargs=1, action="store", dest="write_interval",
help="write interval in seconds (0 for immediate writes)")
self.__parser.add_option("--delete", "-d", action="store_true", dest="delete", default=False,
help="delete the logger configuration")
self.__parser.add_option("--verbose", "-v", action="store_true", dest="verbose", default=False,
help="report narrative to stderr")
self.__opts, self.__args = self.__parser.parse_args()
# ----------------------------------------------------------------------------------------------------------------
def is_valid(self):
if self.set() and self.delete:
return False
if self.write_interval is not None and self.write_interval < 0:
return False
return True
def is_complete(self):
if self.root_path is None or self.delete_oldest is None or self.write_interval is None:
return False
return True
def set(self):
if self.root_path is not None or self.delete_oldest is not None or self.write_interval is not None:
return True
return False
# ----------------------------------------------------------------------------------------------------------------
@property
def root_path(self):
return self.__opts.root_path
@property
def delete_oldest(self):
return None if self.__opts.delete_oldest is None else bool(self.__opts.delete_oldest)
@property
def write_interval(self):
return self.__opts.write_interval
@property
def delete(self):
return self.__opts.delete
@property
def verbose(self):
return self.__opts.verbose
# ----------------------------------------------------------------------------------------------------------------
def print_help(self, file):
self.__parser.print_help(file)
def __str__(self, *args, **kwargs):
return "CmdCSVLoggerConf:{root_path:%s, delete_oldest:%s, write_interval:%s, delete:%s, verbose:%s}" % \
(self.root_path, self.delete_oldest, self.write_interval, self.delete, self.verbose)
| mit |
proversity-org/edx-platform | cms/djangoapps/contentstore/views/tests/test_tabs.py | 24 | 8492 | """ Tests for tab functions (just primitive). """
import json
from contentstore.tests.utils import CourseTestCase
from contentstore.utils import reverse_course_url
from contentstore.views import tabs
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.tabs import CourseTabList
from xmodule.x_module import STUDENT_VIEW
class TabsPageTests(CourseTestCase):
"""Test cases for Tabs (a.k.a Pages) page"""
def setUp(self):
"""Common setup for tests"""
# call super class to setup course, etc.
super(TabsPageTests, self).setUp()
# Set the URL for tests
self.url = reverse_course_url('tabs_handler', self.course.id)
# add a static tab to the course, for code coverage
self.test_tab = ItemFactory.create(
parent_location=self.course.location,
category="static_tab",
display_name="Static_1"
)
self.reload_course()
def check_invalid_tab_id_response(self, resp):
"""Verify response is an error listing the invalid_tab_id"""
self.assertEqual(resp.status_code, 400)
resp_content = json.loads(resp.content)
self.assertIn("error", resp_content)
self.assertIn("invalid_tab_id", resp_content['error'])
def test_not_implemented(self):
"""Verify not implemented errors"""
# JSON GET request not supported
with self.assertRaises(NotImplementedError):
self.client.get(self.url)
# JSON POST request not supported
with self.assertRaises(NotImplementedError):
self.client.ajax_post(
self.url,
data=json.dumps({
'tab_id_locator': {'tab_id': 'courseware'},
'unsupported_request': None,
}),
)
# invalid JSON POST request
with self.assertRaises(NotImplementedError):
self.client.ajax_post(
self.url,
data={'invalid_request': None},
)
def test_view_index(self):
"""Basic check that the Pages page responds correctly"""
resp = self.client.get_html(self.url)
self.assertEqual(resp.status_code, 200)
self.assertIn('course-nav-list', resp.content)
def test_reorder_tabs(self):
"""Test re-ordering of tabs"""
# get the original tab ids
orig_tab_ids = [tab.tab_id for tab in self.course.tabs]
tab_ids = list(orig_tab_ids)
num_orig_tabs = len(orig_tab_ids)
# make sure we have enough tabs to play around with
self.assertGreaterEqual(num_orig_tabs, 5)
# reorder the last two tabs
tab_ids[num_orig_tabs - 1], tab_ids[num_orig_tabs - 2] = tab_ids[num_orig_tabs - 2], tab_ids[num_orig_tabs - 1]
# remove the middle tab
# (the code needs to handle the case where tabs requested for re-ordering is a subset of the tabs in the course)
removed_tab = tab_ids.pop(num_orig_tabs / 2)
self.assertEqual(len(tab_ids), num_orig_tabs - 1)
# post the request
resp = self.client.ajax_post(
self.url,
data={'tabs': [{'tab_id': tab_id} for tab_id in tab_ids]},
)
self.assertEqual(resp.status_code, 204)
# reload the course and verify the new tab order
self.reload_course()
new_tab_ids = [tab.tab_id for tab in self.course.tabs]
self.assertEqual(new_tab_ids, tab_ids + [removed_tab])
self.assertNotEqual(new_tab_ids, orig_tab_ids)
def test_reorder_tabs_invalid_list(self):
"""Test re-ordering of tabs with invalid tab list"""
orig_tab_ids = [tab.tab_id for tab in self.course.tabs]
tab_ids = list(orig_tab_ids)
# reorder the first two tabs
tab_ids[0], tab_ids[1] = tab_ids[1], tab_ids[0]
# post the request
resp = self.client.ajax_post(
self.url,
data={'tabs': [{'tab_id': tab_id} for tab_id in tab_ids]},
)
self.assertEqual(resp.status_code, 400)
resp_content = json.loads(resp.content)
self.assertIn("error", resp_content)
def test_reorder_tabs_invalid_tab(self):
"""Test re-ordering of tabs with invalid tab"""
invalid_tab_ids = ['courseware', 'info', 'invalid_tab_id']
# post the request
resp = self.client.ajax_post(
self.url,
data={'tabs': [{'tab_id': tab_id} for tab_id in invalid_tab_ids]},
)
self.check_invalid_tab_id_response(resp)
def check_toggle_tab_visiblity(self, tab_type, new_is_hidden_setting):
"""Helper method to check changes in tab visibility"""
# find the tab
old_tab = CourseTabList.get_tab_by_type(self.course.tabs, tab_type)
# visibility should be different from new setting
self.assertNotEqual(old_tab.is_hidden, new_is_hidden_setting)
# post the request
resp = self.client.ajax_post(
self.url,
data=json.dumps({
'tab_id_locator': {'tab_id': old_tab.tab_id},
'is_hidden': new_is_hidden_setting,
}),
)
self.assertEqual(resp.status_code, 204)
# reload the course and verify the new visibility setting
self.reload_course()
new_tab = CourseTabList.get_tab_by_type(self.course.tabs, tab_type)
self.assertEqual(new_tab.is_hidden, new_is_hidden_setting)
def test_toggle_tab_visibility(self):
"""Test toggling of tab visibility"""
self.check_toggle_tab_visiblity('wiki', True)
self.check_toggle_tab_visiblity('wiki', False)
def test_toggle_invalid_tab_visibility(self):
"""Test toggling visibility of an invalid tab"""
# post the request
resp = self.client.ajax_post(
self.url,
data=json.dumps({
'tab_id_locator': {'tab_id': 'invalid_tab_id'}
}),
)
self.check_invalid_tab_id_response(resp)
def test_tab_preview_html(self):
"""
Verify that the static tab renders itself with the correct HTML
"""
preview_url = '/xblock/{}/{}'.format(self.test_tab.location, STUDENT_VIEW)
resp = self.client.get(preview_url, HTTP_ACCEPT='application/json')
self.assertEqual(resp.status_code, 200)
resp_content = json.loads(resp.content)
html = resp_content['html']
# Verify that the HTML contains the expected elements
self.assertIn('<span class="action-button-text">Edit</span>', html)
self.assertIn('<span class="sr">Duplicate this component</span>', html)
self.assertIn('<span class="sr">Delete this component</span>', html)
self.assertIn('<span data-tooltip="Drag to reorder" class="drag-handle action"></span>', html)
class PrimitiveTabEdit(ModuleStoreTestCase):
"""Tests for the primitive tab edit data manipulations"""
def test_delete(self):
"""Test primitive tab deletion."""
course = CourseFactory.create()
with self.assertRaises(ValueError):
tabs.primitive_delete(course, 0)
with self.assertRaises(ValueError):
tabs.primitive_delete(course, 1)
with self.assertRaises(IndexError):
tabs.primitive_delete(course, 6)
tabs.primitive_delete(course, 2)
self.assertNotIn({u'type': u'textbooks'}, course.tabs)
# Check that discussion has shifted up
self.assertEquals(course.tabs[2], {'type': 'discussion', 'name': 'Discussion'})
def test_insert(self):
"""Test primitive tab insertion."""
course = CourseFactory.create()
tabs.primitive_insert(course, 2, 'notes', 'aname')
self.assertEquals(course.tabs[2], {'type': 'notes', 'name': 'aname'})
with self.assertRaises(ValueError):
tabs.primitive_insert(course, 0, 'notes', 'aname')
with self.assertRaises(ValueError):
tabs.primitive_insert(course, 3, 'static_tab', 'aname')
def test_save(self):
"""Test course saving."""
course = CourseFactory.create()
tabs.primitive_insert(course, 3, 'notes', 'aname')
course2 = modulestore().get_course(course.id)
self.assertEquals(course2.tabs[3], {'type': 'notes', 'name': 'aname'})
| agpl-3.0 |
IndonesiaX/edx-platform | lms/djangoapps/lms_xblock/mixin.py | 21 | 7673 | """
Namespace that defines fields common to all blocks used in the LMS
"""
#from django.utils.translation import ugettext_noop as _
from lazy import lazy
from xblock.fields import Boolean, Scope, String, XBlockMixin, Dict
from xblock.validation import ValidationMessage
from xmodule.modulestore.inheritance import UserPartitionList
from xmodule.partitions.partitions import NoSuchUserPartitionError, NoSuchUserPartitionGroupError
# Please do not remove, this is a workaround for Django 1.8.
# more information can be found here: https://openedx.atlassian.net/browse/PLAT-902
_ = lambda text: text
class GroupAccessDict(Dict):
"""Special Dict class for serializing the group_access field"""
def from_json(self, access_dict):
if access_dict is not None:
return {int(k): access_dict[k] for k in access_dict}
def to_json(self, access_dict):
if access_dict is not None:
return {unicode(k): access_dict[k] for k in access_dict}
class LmsBlockMixin(XBlockMixin):
"""
Mixin that defines fields common to all blocks used in the LMS
"""
hide_from_toc = Boolean(
help=_("Whether to display this module in the table of contents"),
default=False,
scope=Scope.settings
)
format = String(
# Translators: "TOC" stands for "Table of Contents"
help=_("What format this module is in (used for deciding which "
"grader to apply, and what to show in the TOC)"),
scope=Scope.settings,
)
chrome = String(
display_name=_("Courseware Chrome"),
# Translators: DO NOT translate the words in quotes here, they are
# specific words for the acceptable values.
help=_("Enter the chrome, or navigation tools, to use for the XBlock in the LMS. Valid values are: \n"
"\"chromeless\" -- to not use tabs or the accordion; \n"
"\"tabs\" -- to use tabs only; \n"
"\"accordion\" -- to use the accordion only; or \n"
"\"tabs,accordion\" -- to use tabs and the accordion."),
scope=Scope.settings,
default=None,
)
default_tab = String(
display_name=_("Default Tab"),
help=_("Enter the tab that is selected in the XBlock. If not set, the Courseware tab is selected."),
scope=Scope.settings,
default=None,
)
source_file = String(
display_name=_("LaTeX Source File Name"),
help=_("Enter the source file name for LaTeX."),
scope=Scope.settings,
deprecated=True
)
visible_to_staff_only = Boolean(
help=_("If true, can be seen only by course staff, regardless of start date."),
default=False,
scope=Scope.settings,
)
group_access = GroupAccessDict(
help=_(
"A dictionary that maps which groups can be shown this block. The keys "
"are group configuration ids and the values are a list of group IDs. "
"If there is no key for a group configuration or if the set of group IDs "
"is empty then the block is considered visible to all. Note that this "
"field is ignored if the block is visible_to_staff_only."
),
default={},
scope=Scope.settings,
)
@lazy
def merged_group_access(self):
"""
This computes access to a block's group_access rules in the context of its position
within the courseware structure, in the form of a lazily-computed attribute.
Each block's group_access rule is merged recursively with its parent's, guaranteeing
that any rule in a parent block will be enforced on descendants, even if a descendant
also defined its own access rules. The return value is always a dict, with the same
structure as that of the group_access field.
When merging access rules results in a case where all groups are denied access in a
user partition (which effectively denies access to that block for all students),
the special value False will be returned for that user partition key.
"""
parent = self.get_parent()
if not parent:
return self.group_access or {}
merged_access = parent.merged_group_access.copy()
if self.group_access is not None:
for partition_id, group_ids in self.group_access.items():
if group_ids: # skip if the "local" group_access for this partition is None or empty.
if partition_id in merged_access:
if merged_access[partition_id] is False:
# special case - means somewhere up the hierarchy, merged access rules have eliminated
# all group_ids from this partition, so there's no possible intersection.
continue
# otherwise, if the parent defines group access rules for this partition,
# intersect with the local ones.
merged_access[partition_id] = list(
set(merged_access[partition_id]).intersection(group_ids)
) or False
else:
# add the group access rules for this partition to the merged set of rules.
merged_access[partition_id] = group_ids
return merged_access
# Specified here so we can see what the value set at the course-level is.
user_partitions = UserPartitionList(
help=_("The list of group configurations for partitioning students in content experiments."),
default=[],
scope=Scope.settings
)
def _get_user_partition(self, user_partition_id):
"""
Returns the user partition with the specified id. Raises
`NoSuchUserPartitionError` if the lookup fails.
"""
for user_partition in self.user_partitions:
if user_partition.id == user_partition_id:
return user_partition
raise NoSuchUserPartitionError("could not find a UserPartition with ID [{}]".format(user_partition_id))
def validate(self):
"""
Validates the state of this xblock instance.
"""
_ = self.runtime.service(self, "i18n").ugettext
validation = super(LmsBlockMixin, self).validate()
has_invalid_user_partitions = False
has_invalid_groups = False
for user_partition_id, group_ids in self.group_access.iteritems():
try:
user_partition = self._get_user_partition(user_partition_id)
except NoSuchUserPartitionError:
has_invalid_user_partitions = True
else:
# Skip the validation check if the partition has been disabled
if user_partition.active:
for group_id in group_ids:
try:
user_partition.get_group(group_id)
except NoSuchUserPartitionGroupError:
has_invalid_groups = True
if has_invalid_user_partitions:
validation.add(
ValidationMessage(
ValidationMessage.ERROR,
_(u"This component refers to deleted or invalid content group configurations.")
)
)
if has_invalid_groups:
validation.add(
ValidationMessage(
ValidationMessage.ERROR,
_(u"This component refers to deleted or invalid content groups.")
)
)
return validation
| agpl-3.0 |
10clouds/edx-platform | lms/djangoapps/survey/tests/test_views.py | 29 | 5654 | """
Python tests for the Survey views
"""
import json
from collections import OrderedDict
from django.test.client import Client
from django.core.urlresolvers import reverse
from survey.models import SurveyForm, SurveyAnswer
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
class SurveyViewsTests(ModuleStoreTestCase):
"""
All tests for the views.py file
"""
def setUp(self):
"""
Set up the test data used in the specific tests
"""
super(SurveyViewsTests, self).setUp()
self.client = Client()
# Create two accounts
self.password = 'abc'
self.student = UserFactory.create(username='student', email='[email protected]', password=self.password)
self.test_survey_name = 'TestSurvey'
self.test_form = '''
<input name="field1" /><input name="field2" /><select name="ddl"><option>1</option></select>
<textarea name="textarea" />
'''
self.student_answers = OrderedDict({
u'field1': u'value1',
u'field2': u'value2',
u'ddl': u'1',
u'textarea': u'textarea'
})
self.course = CourseFactory.create(
display_name='Test Course',
course_survey_required=True,
course_survey_name=self.test_survey_name
)
self.survey = SurveyForm.create(self.test_survey_name, self.test_form)
self.view_url = reverse('view_survey', args=[self.test_survey_name])
self.postback_url = reverse('submit_answers', args=[self.test_survey_name])
self.client.login(username=self.student.username, password=self.password)
def test_unauthenticated_survey_view(self):
"""
Asserts that an unauthenticated user cannot access a survey
"""
anon_user = Client()
resp = anon_user.get(self.view_url)
self.assertEquals(resp.status_code, 302)
def test_survey_not_found(self):
"""
Asserts that if we ask for a Survey that does not exist, then we get a 302 redirect
"""
resp = self.client.get(reverse('view_survey', args=['NonExisting']))
self.assertEquals(resp.status_code, 302)
def test_authenticated_survey_view(self):
"""
Asserts that an authenticated user can see the survey
"""
resp = self.client.get(self.view_url)
self.assertEquals(resp.status_code, 200)
# is the SurveyForm html present in the HTML response?
self.assertIn(self.test_form, resp.content)
def test_unautneticated_survey_postback(self):
"""
Asserts that an anonymous user cannot answer a survey
"""
anon_user = Client()
resp = anon_user.post(
self.postback_url,
self.student_answers
)
self.assertEquals(resp.status_code, 302)
def test_survey_postback_to_nonexisting_survey(self):
"""
Asserts that any attempts to post back to a non existing survey returns a 404
"""
resp = self.client.post(
reverse('submit_answers', args=['NonExisting']),
self.student_answers
)
self.assertEquals(resp.status_code, 404)
def test_survey_postback(self):
"""
Asserts that a well formed postback of survey answers is properly stored in the
database
"""
resp = self.client.post(
self.postback_url,
self.student_answers
)
self.assertEquals(resp.status_code, 200)
data = json.loads(resp.content)
self.assertIn('redirect_url', data)
answers = self.survey.get_answers(self.student)
self.assertEquals(answers[self.student.id], self.student_answers)
def test_strip_extra_fields(self):
"""
Verify that any not expected field name in the post-back is not stored
in the database
"""
data = dict.copy(self.student_answers)
data['csrfmiddlewaretoken'] = 'foo'
data['_redirect_url'] = 'bar'
data['course_id'] = unicode(self.course.id)
resp = self.client.post(
self.postback_url,
data
)
self.assertEquals(resp.status_code, 200)
answers = self.survey.get_answers(self.student)
self.assertNotIn('csrfmiddlewaretoken', answers[self.student.id])
self.assertNotIn('_redirect_url', answers[self.student.id])
self.assertNotIn('course_id', answers[self.student.id])
# however we want to make sure we persist the course_id
answer_objs = SurveyAnswer.objects.filter(
user=self.student,
form=self.survey
)
for answer_obj in answer_objs:
self.assertEquals(unicode(answer_obj.course_key), data['course_id'])
def test_encoding_answers(self):
"""
Verify that if some potentially harmful input data is sent, that is is properly HTML encoded
"""
data = dict.copy(self.student_answers)
data['field1'] = '<script type="javascript">alert("Deleting filesystem...")</script>'
resp = self.client.post(
self.postback_url,
data
)
self.assertEquals(resp.status_code, 200)
answers = self.survey.get_answers(self.student)
self.assertEqual(
'<script type="javascript">alert("Deleting filesystem...")</script>',
answers[self.student.id]['field1']
)
| agpl-3.0 |
OpenPymeMx/OCB | addons/account/report/account_invoice_report.py | 14 | 12081 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
import openerp.addons.decimal_precision as dp
from openerp.osv import fields,osv
class account_invoice_report(osv.osv):
_name = "account.invoice.report"
_description = "Invoices Statistics"
_auto = False
_rec_name = 'date'
def _compute_amounts_in_user_currency(self, cr, uid, ids, field_names, args, context=None):
"""Compute the amounts in the currency of the user
"""
if context is None:
context={}
currency_obj = self.pool.get('res.currency')
currency_rate_obj = self.pool.get('res.currency.rate')
user_currency_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id
currency_rate_id = currency_rate_obj.search(cr, uid, [('rate', '=', 1)], limit=1, context=context)[0]
base_currency_id = currency_rate_obj.browse(cr, uid, currency_rate_id, context=context).currency_id.id
res = {}
ctx = context.copy()
for item in self.browse(cr, uid, ids, context=context):
ctx['date'] = item.date
price_total = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.price_total, context=ctx)
price_average = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.price_average, context=ctx)
residual = currency_obj.compute(cr, uid, base_currency_id, user_currency_id, item.residual, context=ctx)
res[item.id] = {
'user_currency_price_total': price_total,
'user_currency_price_average': price_average,
'user_currency_residual': residual,
}
return res
_columns = {
'date': fields.date('Date', readonly=True),
'year': fields.char('Year', size=4, readonly=True),
'day': fields.char('Day', size=128, readonly=True),
'month': fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'),
('05','May'), ('06','June'), ('07','July'), ('08','August'), ('09','September'),
('10','October'), ('11','November'), ('12','December')], 'Month', readonly=True),
'product_id': fields.many2one('product.product', 'Product', readonly=True),
'product_qty':fields.float('Qty', readonly=True),
'uom_name': fields.char('Reference Unit of Measure', size=128, readonly=True),
'payment_term': fields.many2one('account.payment.term', 'Payment Term', readonly=True),
'period_id': fields.many2one('account.period', 'Force Period', domain=[('state','<>','done')], readonly=True),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position', readonly=True),
'currency_id': fields.many2one('res.currency', 'Currency', readonly=True),
'categ_id': fields.many2one('product.category','Category of Product', readonly=True),
'journal_id': fields.many2one('account.journal', 'Journal', readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'user_id': fields.many2one('res.users', 'Salesperson', readonly=True),
'price_total': fields.float('Total Without Tax', readonly=True),
'user_currency_price_total': fields.function(_compute_amounts_in_user_currency, string="Total Without Tax", type='float', digits_compute=dp.get_precision('Account'), multi="_compute_amounts"),
'price_average': fields.float('Average Price', readonly=True, group_operator="avg"),
'user_currency_price_average': fields.function(_compute_amounts_in_user_currency, string="Average Price", type='float', digits_compute=dp.get_precision('Account'), multi="_compute_amounts"),
'currency_rate': fields.float('Currency Rate', readonly=True),
'nbr':fields.integer('# of Lines', readonly=True),
'type': fields.selection([
('out_invoice','Customer Invoice'),
('in_invoice','Supplier Invoice'),
('out_refund','Customer Refund'),
('in_refund','Supplier Refund'),
],'Type', readonly=True),
'state': fields.selection([
('draft','Draft'),
('proforma','Pro-forma'),
('proforma2','Pro-forma'),
('open','Open'),
('paid','Done'),
('cancel','Cancelled')
], 'Invoice Status', readonly=True),
'date_due': fields.date('Due Date', readonly=True),
'account_id': fields.many2one('account.account', 'Account',readonly=True),
'account_line_id': fields.many2one('account.account', 'Account Line',readonly=True),
'partner_bank_id': fields.many2one('res.partner.bank', 'Bank Account',readonly=True),
'residual': fields.float('Total Residual', readonly=True),
'user_currency_residual': fields.function(_compute_amounts_in_user_currency, string="Total Residual", type='float', digits_compute=dp.get_precision('Account'), multi="_compute_amounts"),
}
_order = 'date desc'
def _select(self):
select_str = """
SELECT sub.id, sub.date, sub.year, sub.month, sub.day, sub.product_id, sub.partner_id,
sub.payment_term, sub.period_id, sub.uom_name, sub.currency_id, sub.journal_id,
sub.fiscal_position, sub.user_id, sub.company_id, sub.nbr, sub.type, sub.state,
sub.categ_id, sub.date_due, sub.account_id, sub.account_line_id, sub.partner_bank_id,
sub.product_qty, sub.price_total / cr.rate as price_total, sub.price_average /cr.rate as price_average,
cr.rate as currency_rate, sub.residual / cr.rate as residual
"""
return select_str
def _sub_select(self):
select_str = """
SELECT min(ail.id) AS id,
ai.date_invoice AS date,
to_char(ai.date_invoice::timestamp with time zone, 'YYYY'::text) AS year,
to_char(ai.date_invoice::timestamp with time zone, 'MM'::text) AS month,
to_char(ai.date_invoice::timestamp with time zone, 'YYYY-MM-DD'::text) AS day,
ail.product_id, ai.partner_id, ai.payment_term, ai.period_id,
CASE
WHEN u.uom_type::text <> 'reference'::text
THEN ( SELECT product_uom.name
FROM product_uom
WHERE product_uom.uom_type::text = 'reference'::text
AND product_uom.active
AND product_uom.category_id = u.category_id LIMIT 1)
ELSE u.name
END AS uom_name,
ai.currency_id, ai.journal_id, ai.fiscal_position, ai.user_id, ai.company_id,
count(ail.*) AS nbr,
ai.type, ai.state, pt.categ_id, ai.date_due, ai.account_id, ail.account_id AS account_line_id,
ai.partner_bank_id,
SUM(CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN (- ail.quantity) / u.factor
ELSE ail.quantity / u.factor
END) AS product_qty,
SUM(CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN - ail.price_subtotal
ELSE ail.price_subtotal
END) AS price_total,
CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN SUM(- ail.price_subtotal)
ELSE SUM(ail.price_subtotal)
END / CASE
WHEN SUM(ail.quantity / u.factor) <> 0::numeric
THEN CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN SUM((- ail.quantity) / u.factor)
ELSE SUM(ail.quantity / u.factor)
END
ELSE 1::numeric
END AS price_average,
CASE
WHEN ai.type::text = ANY (ARRAY['out_refund'::character varying::text, 'in_invoice'::character varying::text])
THEN - ai.residual
ELSE ai.residual
END / (SELECT count(*) FROM account_invoice_line l where invoice_id = ai.id) *
count(*) AS residual
"""
return select_str
def _from(self):
from_str = """
FROM account_invoice_line ail
JOIN account_invoice ai ON ai.id = ail.invoice_id
LEFT JOIN product_product pr ON pr.id = ail.product_id
left JOIN product_template pt ON pt.id = pr.product_tmpl_id
LEFT JOIN product_uom u ON u.id = ail.uos_id
"""
return from_str
def _group_by(self):
group_by_str = """
GROUP BY ail.product_id, ai.date_invoice, ai.id,
to_char(ai.date_invoice::timestamp with time zone, 'YYYY'::text),
to_char(ai.date_invoice::timestamp with time zone, 'MM'::text),
to_char(ai.date_invoice::timestamp with time zone, 'YYYY-MM-DD'::text),
ai.partner_id, ai.payment_term, ai.period_id, u.name, ai.currency_id, ai.journal_id,
ai.fiscal_position, ai.user_id, ai.company_id, ai.type, ai.state, pt.categ_id,
ai.date_due, ai.account_id, ail.account_id, ai.partner_bank_id, ai.residual,
ai.amount_total, u.uom_type, u.category_id
"""
return group_by_str
def init(self, cr):
# self._table = account_invoice_report
tools.drop_view_if_exists(cr, self._table)
cr.execute("""CREATE or REPLACE VIEW %s as (
%s
FROM (
%s %s %s
) AS sub
JOIN res_currency_rate cr ON (cr.currency_id = sub.currency_id)
WHERE
cr.id IN (SELECT id
FROM res_currency_rate cr2
WHERE (cr2.currency_id = sub.currency_id)
AND ((sub.date IS NOT NULL AND cr2.name <= sub.date)
OR (sub.date IS NULL AND cr2.name <= NOW()))
ORDER BY name DESC LIMIT 1)
)""" % (
self._table,
self._select(), self._sub_select(), self._from(), self._group_by()))
account_invoice_report()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
apache/airflow | airflow/contrib/sensors/qubole_sensor.py | 2 | 1176 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use :mod:`airflow.providers.qubole.sensors.qubole`."""
import warnings
from airflow.providers.qubole.sensors.qubole import ( # noqa
QuboleFileSensor,
QubolePartitionSensor,
QuboleSensor,
)
warnings.warn(
"This module is deprecated. Please use `airflow.providers.qubole.sensors.qubole`.",
DeprecationWarning,
stacklevel=2,
)
| apache-2.0 |
piran9/Project | bindings/python/ns3modulegen_core_customizations.py | 15 | 19476 | import re
from pybindgen.typehandlers import base as typehandlers
from pybindgen import ReturnValue, Parameter
from pybindgen.cppmethod import CustomCppMethodWrapper, CustomCppConstructorWrapper
from pybindgen.typehandlers.codesink import MemoryCodeSink
from pybindgen.typehandlers import ctypeparser
from pybindgen import cppclass
import warnings
from pybindgen.typehandlers.base import CodeGenerationError
import sys
class SmartPointerTransformation(typehandlers.TypeTransformation):
"""
This class provides a "type transformation" that tends to support
NS-3 smart pointers. Parameters such as "Ptr<Foo> foo" are
transformed into something like Parameter.new("Foo*", "foo",
transfer_ownership=False). Return values such as Ptr<Foo> are
transformed into ReturnValue.new("Foo*",
caller_owns_return=False). Since the underlying objects have
reference counting, PyBindGen does the right thing.
"""
def __init__(self):
super(SmartPointerTransformation, self).__init__()
self.rx = re.compile(r'(ns3::|::ns3::|)Ptr<([^>]+)>\s*$')
def _get_untransformed_type_traits(self, name):
m = self.rx.match(name)
is_const = False
if m is None:
return None, False
else:
name1 = m.group(2).strip()
if name1.startswith('const '):
name1 = name1[len('const '):]
is_const = True
if name1.endswith(' const'):
name1 = name1[:-len(' const')]
is_const = True
new_name = name1+' *'
if new_name.startswith('::'):
new_name = new_name[2:]
return new_name, is_const
def get_untransformed_name(self, name):
new_name, dummy_is_const = self._get_untransformed_type_traits(name)
return new_name
def create_type_handler(self, type_handler, *args, **kwargs):
if issubclass(type_handler, Parameter):
kwargs['transfer_ownership'] = False
elif issubclass(type_handler, ReturnValue):
kwargs['caller_owns_return'] = False
else:
raise AssertionError
## fix the ctype, add ns3:: namespace
orig_ctype, is_const = self._get_untransformed_type_traits(args[0])
if is_const:
correct_ctype = 'ns3::Ptr< %s const >' % orig_ctype[:-2]
else:
correct_ctype = 'ns3::Ptr< %s >' % orig_ctype[:-2]
args = tuple([correct_ctype] + list(args[1:]))
handler = type_handler(*args, **kwargs)
handler.set_tranformation(self, orig_ctype)
return handler
def untransform(self, type_handler, declarations, code_block, expression):
return 'const_cast<%s> (ns3::PeekPointer (%s))' % (type_handler.untransformed_ctype, expression)
def transform(self, type_handler, declarations, code_block, expression):
assert type_handler.untransformed_ctype[-1] == '*'
return 'ns3::Ptr< %s > (%s)' % (type_handler.untransformed_ctype[:-1], expression)
## register the type transformation
transf = SmartPointerTransformation()
typehandlers.return_type_matcher.register_transformation(transf)
typehandlers.param_type_matcher.register_transformation(transf)
del transf
class ArgvParam(Parameter):
"""
Converts a python list-of-strings argument to a pair of 'int argc,
char *argv[]' arguments to pass into C.
One Python argument becomes two C function arguments -> it's a miracle!
Note: this parameter type handler is not registered by any name;
must be used explicitly.
"""
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = []
def convert_c_to_python(self, wrapper):
raise NotImplementedError
def convert_python_to_c(self, wrapper):
py_name = wrapper.declarations.declare_variable('PyObject*', 'py_' + self.name)
argc_var = wrapper.declarations.declare_variable('int', 'argc')
name = wrapper.declarations.declare_variable('char**', self.name)
idx = wrapper.declarations.declare_variable('Py_ssize_t', 'idx')
wrapper.parse_params.add_parameter('O!', ['&PyList_Type', '&'+py_name], self.name)
#wrapper.before_call.write_error_check('!PyList_Check(%s)' % py_name) # XXX
wrapper.before_call.write_code("%s = (char **) malloc(sizeof(char*)*PyList_Size(%s));"
% (name, py_name))
wrapper.before_call.add_cleanup_code('free(%s);' % name)
wrapper.before_call.write_code('''
for (%(idx)s = 0; %(idx)s < PyList_Size(%(py_name)s); %(idx)s++)
{
''' % vars())
wrapper.before_call.sink.indent()
wrapper.before_call.write_code('''
PyObject *item = PyList_GET_ITEM(%(py_name)s, %(idx)s);
''' % vars())
#wrapper.before_call.write_error_check('item == NULL')
wrapper.before_call.write_error_check(
'!PyString_Check(item)',
failure_cleanup=('PyErr_SetString(PyExc_TypeError, '
'"argument %s must be a list of strings");') % self.name)
wrapper.before_call.write_code(
'%s[%s] = PyString_AsString(item);' % (name, idx))
wrapper.before_call.sink.unindent()
wrapper.before_call.write_code('}')
wrapper.before_call.write_code('%s = PyList_Size(%s);' % (argc_var, py_name))
wrapper.call_params.append(argc_var)
wrapper.call_params.append(name)
class CallbackImplProxyMethod(typehandlers.ReverseWrapperBase):
"""
Class that generates a proxy virtual method that calls a similarly named python method.
"""
def __init__(self, return_value, parameters):
super(CallbackImplProxyMethod, self).__init__(return_value, parameters)
def generate_python_call(self):
"""code to call the python method"""
build_params = self.build_params.get_parameters(force_tuple_creation=True)
if build_params[0][0] == '"':
build_params[0] = '(char *) ' + build_params[0]
args = self.before_call.declare_variable('PyObject*', 'args')
self.before_call.write_code('%s = Py_BuildValue(%s);'
% (args, ', '.join(build_params)))
self.before_call.add_cleanup_code('Py_DECREF(%s);' % args)
self.before_call.write_code('py_retval = PyObject_CallObject(m_callback, %s);' % args)
self.before_call.write_error_check('py_retval == NULL')
self.before_call.add_cleanup_code('Py_DECREF(py_retval);')
def generate_callback_classes(out, callbacks):
for callback_impl_num, template_parameters in enumerate(callbacks):
sink = MemoryCodeSink()
cls_name = "ns3::Callback< %s >" % ', '.join(template_parameters)
#print >> sys.stderr, "***** trying to register callback: %r" % cls_name
class_name = "PythonCallbackImpl%i" % callback_impl_num
sink.writeln('''
class %s : public ns3::CallbackImpl<%s>
{
public:
PyObject *m_callback;
%s(PyObject *callback)
{
Py_INCREF(callback);
m_callback = callback;
}
virtual ~%s()
{
PyGILState_STATE __py_gil_state;
__py_gil_state = (PyEval_ThreadsInitialized() ? PyGILState_Ensure() : (PyGILState_STATE) 0);
Py_DECREF(m_callback);
m_callback = NULL;
PyGILState_Release(__py_gil_state);
}
virtual bool IsEqual(ns3::Ptr<const ns3::CallbackImplBase> other_base) const
{
const %s *other = dynamic_cast<const %s*> (ns3::PeekPointer (other_base));
if (other != NULL)
return (other->m_callback == m_callback);
else
return false;
}
''' % (class_name, ', '.join(template_parameters), class_name, class_name, class_name, class_name))
sink.indent()
callback_return = template_parameters[0]
return_ctype = ctypeparser.parse_type(callback_return)
if ('const' in return_ctype.remove_modifiers()):
kwargs = {'is_const': True}
else:
kwargs = {}
try:
return_type = ReturnValue.new(str(return_ctype), **kwargs)
except (typehandlers.TypeLookupError, typehandlers.TypeConfigurationError), ex:
warnings.warn("***** Unable to register callback; Return value '%s' error (used in %s): %r"
% (callback_return, cls_name, ex),
Warning)
continue
arguments = []
ok = True
callback_parameters = [arg for arg in template_parameters[1:] if arg != 'ns3::empty']
for arg_num, arg_type in enumerate(callback_parameters):
arg_name = 'arg%i' % (arg_num+1)
param_ctype = ctypeparser.parse_type(arg_type)
if ('const' in param_ctype.remove_modifiers()):
kwargs = {'is_const': True}
else:
kwargs = {}
try:
arguments.append(Parameter.new(str(param_ctype), arg_name, **kwargs))
except (typehandlers.TypeLookupError, typehandlers.TypeConfigurationError), ex:
warnings.warn("***** Unable to register callback; parameter '%s %s' error (used in %s): %r"
% (arg_type, arg_name, cls_name, ex),
Warning)
ok = False
if not ok:
continue
wrapper = CallbackImplProxyMethod(return_type, arguments)
wrapper.generate(sink, 'operator()', decl_modifiers=[])
sink.unindent()
sink.writeln('};\n')
sink.flush_to(out)
class PythonCallbackParameter(Parameter):
"Class handlers"
CTYPES = [cls_name]
print >> sys.stderr, "***** registering callback handler: %r" % ctypeparser.normalize_type_string(cls_name)
DIRECTIONS = [Parameter.DIRECTION_IN]
PYTHON_CALLBACK_IMPL_NAME = class_name
TEMPLATE_ARGS = template_parameters
def convert_python_to_c(self, wrapper):
"parses python args to get C++ value"
assert isinstance(wrapper, typehandlers.ForwardWrapperBase)
if self.default_value is None:
py_callback = wrapper.declarations.declare_variable('PyObject*', self.name)
wrapper.parse_params.add_parameter('O', ['&'+py_callback], self.name)
wrapper.before_call.write_error_check(
'!PyCallable_Check(%s)' % py_callback,
'PyErr_SetString(PyExc_TypeError, "parameter \'%s\' must be callbale");' % self.name)
callback_impl = wrapper.declarations.declare_variable(
'ns3::Ptr<%s>' % self.PYTHON_CALLBACK_IMPL_NAME,
'%s_cb_impl' % self.name)
wrapper.before_call.write_code("%s = ns3::Create<%s> (%s);"
% (callback_impl, self.PYTHON_CALLBACK_IMPL_NAME, py_callback))
wrapper.call_params.append(
'ns3::Callback<%s> (%s)' % (', '.join(self.TEMPLATE_ARGS), callback_impl))
else:
py_callback = wrapper.declarations.declare_variable('PyObject*', self.name, 'NULL')
wrapper.parse_params.add_parameter('O', ['&'+py_callback], self.name, optional=True)
value = wrapper.declarations.declare_variable(
'ns3::Callback<%s>' % ', '.join(self.TEMPLATE_ARGS),
self.name+'_value',
self.default_value)
wrapper.before_call.write_code("if (%s) {" % (py_callback,))
wrapper.before_call.indent()
wrapper.before_call.write_error_check(
'!PyCallable_Check(%s)' % py_callback,
'PyErr_SetString(PyExc_TypeError, "parameter \'%s\' must be callbale");' % self.name)
wrapper.before_call.write_code("%s = ns3::Callback<%s> (ns3::Create<%s> (%s));"
% (value, ', '.join(self.TEMPLATE_ARGS),
self.PYTHON_CALLBACK_IMPL_NAME, py_callback))
wrapper.before_call.unindent()
wrapper.before_call.write_code("}") # closes: if (py_callback) {
wrapper.call_params.append(value)
def convert_c_to_python(self, wrapper):
raise typehandlers.NotSupportedError("Reverse wrappers for ns3::Callback<...> types "
"(python using callbacks defined in C++) not implemented.")
# def write_preamble(out):
# pybindgen.write_preamble(out)
# out.writeln("#include \"ns3/everything.h\"")
def Simulator_customizations(module):
Simulator = module['ns3::Simulator']
## Simulator::Schedule(delay, callback, ...user..args...)
Simulator.add_custom_method_wrapper("Schedule", "_wrap_Simulator_Schedule",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
## Simulator::ScheduleNow(callback, ...user..args...)
Simulator.add_custom_method_wrapper("ScheduleNow", "_wrap_Simulator_ScheduleNow",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
## Simulator::ScheduleDestroy(callback, ...user..args...)
Simulator.add_custom_method_wrapper("ScheduleDestroy", "_wrap_Simulator_ScheduleDestroy",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
Simulator.add_custom_method_wrapper("Run", "_wrap_Simulator_Run",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
def CommandLine_customizations(module):
CommandLine = module['ns3::CommandLine']
CommandLine.add_method('Parse', None, [ArgvParam(None, 'argv')],
is_static=False)
CommandLine.add_custom_method_wrapper("AddValue", "_wrap_CommandLine_AddValue",
flags=["METH_VARARGS", "METH_KEYWORDS"])
def Object_customizations(module):
## ---------------------------------------------------------------------
## Here we generate custom constructor code for all classes that
## derive from ns3::Object. The custom constructors are needed in
## order to support kwargs only and to translate kwargs into ns3
## attributes, etc.
## ---------------------------------------------------------------------
try:
Object = module['ns3::Object']
except KeyError:
return
## add a GetTypeId method to all generatd helper classes
def helper_class_hook(helper_class):
decl = """
static ns3::TypeId GetTypeId (void)
{
static ns3::TypeId tid = ns3::TypeId ("%s")
.SetParent< %s > ()
;
return tid;
}""" % (helper_class.name, helper_class.class_.full_name)
helper_class.add_custom_method(decl)
helper_class.add_post_generation_code(
"NS_OBJECT_ENSURE_REGISTERED (%s);" % helper_class.name)
Object.add_helper_class_hook(helper_class_hook)
def ns3_object_instance_creation_function(cpp_class, code_block, lvalue,
parameters, construct_type_name):
assert lvalue
assert not lvalue.startswith('None')
if cpp_class.cannot_be_constructed:
raise CodeGenerationError("%s cannot be constructed (%s)"
% cpp_class.full_name)
if cpp_class.incomplete_type:
raise CodeGenerationError("%s cannot be constructed (incomplete type)"
% cpp_class.full_name)
code_block.write_code("%s = new %s(%s);" % (lvalue, construct_type_name, parameters))
code_block.write_code("%s->Ref ();" % (lvalue))
def ns3_object_post_instance_creation_function(cpp_class, code_block, lvalue,
parameters, construct_type_name):
code_block.write_code("ns3::CompleteConstruct(%s);" % (lvalue, ))
Object.set_instance_creation_function(ns3_object_instance_creation_function)
Object.set_post_instance_creation_function(ns3_object_post_instance_creation_function)
def Attribute_customizations(module):
# Fix up for the "const AttributeValue &v = EmptyAttribute()"
# case, as used extensively by helper classes.
# Here's why we need to do this: pybindgen.gccxmlscanner, when
# scanning parameter default values, is only provided with the
# value as a simple C expression string. (py)gccxml does not
# report the type of the default value.
# As a workaround, here we iterate over all parameters of all
# methods of all classes and tell pybindgen what is the type of
# the default value for attributes.
for cls in module.classes:
for meth in cls.get_all_methods():
for param in meth.parameters:
if isinstance(param, cppclass.CppClassRefParameter):
if param.cpp_class.name == 'AttributeValue' \
and param.default_value is not None \
and param.default_value_type is None:
param.default_value_type = 'ns3::EmptyAttributeValue'
def TypeId_customizations(module):
TypeId = module['ns3::TypeId']
TypeId.add_custom_method_wrapper("LookupByNameFailSafe", "_wrap_TypeId_LookupByNameFailSafe",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
def add_std_ofstream(module):
module.add_include('<fstream>')
ostream = module.add_class('ostream', foreign_cpp_namespace='::std')
ostream.set_cannot_be_constructed("abstract base class")
ofstream = module.add_class('ofstream', foreign_cpp_namespace='::std', parent=ostream)
ofstream.add_enum('openmode', [
('app', 'std::ios_base::app'),
('ate', 'std::ios_base::ate'),
('binary', 'std::ios_base::binary'),
('in', 'std::ios_base::in'),
('out', 'std::ios_base::out'),
('trunc', 'std::ios_base::trunc'),
])
ofstream.add_constructor([Parameter.new("const char *", 'filename'),
Parameter.new("::std::ofstream::openmode", 'mode', default_value="std::ios_base::out")])
ofstream.add_method('close', None, [])
add_std_ios_openmode(module)
def add_std_ios_openmode(module):
import pybindgen.typehandlers.base
for alias in "std::_Ios_Openmode", "std::ios::openmode":
pybindgen.typehandlers.base.param_type_matcher.add_type_alias(alias, "int")
for flag in 'in', 'out', 'ate', 'app', 'trunc', 'binary':
module.after_init.write_code('PyModule_AddIntConstant(m, (char *) "STD_IOS_%s", std::ios::%s);'
% (flag.upper(), flag))
def add_ipv4_address_tp_hash(module):
module.body.writeln('''
long
_ns3_Ipv4Address_tp_hash (PyObject *obj)
{
PyNs3Ipv4Address *addr = reinterpret_cast<PyNs3Ipv4Address *> (obj);
return static_cast<long> (ns3::Ipv4AddressHash () (*addr->obj));
}
''')
module.header.writeln('long _ns3_Ipv4Address_tp_hash (PyObject *obj);')
module['Ipv4Address'].pytype.slots['tp_hash'] = "_ns3_Ipv4Address_tp_hash"
| gpl-2.0 |
brunogamacatao/portalsaladeaula | django/contrib/localflavor/pl/forms.py | 46 | 5225 | """
Polish-specific form helpers
"""
import re
from django.forms import ValidationError
from django.forms.fields import Select, RegexField
from django.utils.translation import ugettext_lazy as _
class PLProvinceSelect(Select):
"""
A select widget with list of Polish administrative provinces as choices.
"""
def __init__(self, attrs=None):
from pl_voivodeships import VOIVODESHIP_CHOICES
super(PLProvinceSelect, self).__init__(attrs, choices=VOIVODESHIP_CHOICES)
class PLCountySelect(Select):
"""
A select widget with list of Polish administrative units as choices.
"""
def __init__(self, attrs=None):
from pl_administrativeunits import ADMINISTRATIVE_UNIT_CHOICES
super(PLCountySelect, self).__init__(attrs, choices=ADMINISTRATIVE_UNIT_CHOICES)
class PLPESELField(RegexField):
"""
A form field that validates as Polish Identification Number (PESEL).
Checks the following rules:
* the length consist of 11 digits
* has a valid checksum
The algorithm is documented at http://en.wikipedia.org/wiki/PESEL.
"""
default_error_messages = {
'invalid': _(u'National Identification Number consists of 11 digits.'),
'checksum': _(u'Wrong checksum for the National Identification Number.'),
}
def __init__(self, *args, **kwargs):
super(PLPESELField, self).__init__(r'^\d{11}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self,value):
super(PLPESELField, self).clean(value)
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_checksum(self, number):
"""
Calculates a checksum with the provided algorithm.
"""
multiple_table = (1, 3, 7, 9, 1, 3, 7, 9, 1, 3, 1)
result = 0
for i in range(len(number)):
result += int(number[i]) * multiple_table[i]
return result % 10 == 0
class PLNIPField(RegexField):
"""
A form field that validates as Polish Tax Number (NIP).
Valid forms are: XXX-XXX-YY-YY or XX-XX-YYY-YYY.
Checksum algorithm based on documentation at
http://wipos.p.lodz.pl/zylla/ut/nip-rego.html
"""
default_error_messages = {
'invalid': _(u'Enter a tax number field (NIP) in the format XXX-XXX-XX-XX or XX-XX-XXX-XXX.'),
'checksum': _(u'Wrong checksum for the Tax Number (NIP).'),
}
def __init__(self, *args, **kwargs):
super(PLNIPField, self).__init__(r'^\d{3}-\d{3}-\d{2}-\d{2}$|^\d{2}-\d{2}-\d{3}-\d{3}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self,value):
super(PLNIPField, self).clean(value)
value = re.sub("[-]", "", value)
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_checksum(self, number):
"""
Calculates a checksum with the provided algorithm.
"""
multiple_table = (6, 5, 7, 2, 3, 4, 5, 6, 7)
result = 0
for i in range(len(number)-1):
result += int(number[i]) * multiple_table[i]
result %= 11
if result == int(number[-1]):
return True
else:
return False
class PLREGONField(RegexField):
"""
A form field that validates its input is a REGON number.
Valid regon number consists of 9 or 14 digits.
See http://www.stat.gov.pl/bip/regon_ENG_HTML.htm for more information.
"""
default_error_messages = {
'invalid': _(u'National Business Register Number (REGON) consists of 9 or 14 digits.'),
'checksum': _(u'Wrong checksum for the National Business Register Number (REGON).'),
}
def __init__(self, *args, **kwargs):
super(PLREGONField, self).__init__(r'^\d{9,14}$',
max_length=None, min_length=None, *args, **kwargs)
def clean(self,value):
super(PLREGONField, self).clean(value)
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['checksum'])
return u'%s' % value
def has_valid_checksum(self, number):
"""
Calculates a checksum with the provided algorithm.
"""
weights = (
(8, 9, 2, 3, 4, 5, 6, 7, -1),
(2, 4, 8, 5, 0, 9, 7, 3, 6, 1, 2, 4, 8, -1),
(8, 9, 2, 3, 4, 5, 6, 7, -1, 0, 0, 0, 0, 0),
)
weights = [table for table in weights if len(table) == len(number)]
for table in weights:
checksum = sum([int(n) * w for n, w in zip(number, table)])
if checksum % 11 % 10:
return False
return bool(weights)
class PLPostalCodeField(RegexField):
"""
A form field that validates as Polish postal code.
Valid code is XX-XXX where X is digit.
"""
default_error_messages = {
'invalid': _(u'Enter a postal code in the format XX-XXX.'),
}
def __init__(self, *args, **kwargs):
super(PLPostalCodeField, self).__init__(r'^\d{2}-\d{3}$',
max_length=None, min_length=None, *args, **kwargs)
| bsd-3-clause |
cytec/SickRage | lib/feedparser/datetimes/__init__.py | 43 | 1368 | from __future__ import absolute_import
from .asctime import _parse_date_asctime
from .greek import _parse_date_greek
from .hungarian import _parse_date_hungarian
from .iso8601 import _parse_date_iso8601
from .korean import _parse_date_onblog, _parse_date_nate
from .perforce import _parse_date_perforce
from .rfc822 import _parse_date_rfc822
from .w3dtf import _parse_date_w3dtf
_date_handlers = []
def registerDateHandler(func):
'''Register a date handler function (takes string, returns 9-tuple date in GMT)'''
_date_handlers.insert(0, func)
def _parse_date(dateString):
'''Parses a variety of date formats into a 9-tuple in GMT'''
if not dateString:
return None
for handler in _date_handlers:
try:
date9tuple = handler(dateString)
except (KeyError, OverflowError, ValueError):
continue
if not date9tuple:
continue
if len(date9tuple) != 9:
continue
return date9tuple
return None
registerDateHandler(_parse_date_onblog)
registerDateHandler(_parse_date_nate)
registerDateHandler(_parse_date_greek)
registerDateHandler(_parse_date_hungarian)
registerDateHandler(_parse_date_perforce)
registerDateHandler(_parse_date_asctime)
registerDateHandler(_parse_date_iso8601)
registerDateHandler(_parse_date_rfc822)
registerDateHandler(_parse_date_w3dtf)
| gpl-3.0 |
2014cdbg3/2015cdbg9 | static/Brython3.1.1-20150328-091302/Lib/site-packages/spur.py | 291 | 5461 | #coding: utf-8
import math
# 導入數學函式後, 圓周率為 pi
# deg 為角度轉為徑度的轉換因子
deg = math.pi/180.
class Spur(object):
def __init__(self, ctx):
self.ctx = ctx
def create_line(self, x1, y1, x2, y2, width=3, fill="red"):
self.ctx.beginPath()
self.ctx.lineWidth = width
self.ctx.moveTo(x1, y1)
self.ctx.lineTo(x2, y2)
self.ctx.strokeStyle = fill
self.ctx.stroke()
#
# 以下分別為正齒輪繪圖與主 tkinter 畫布繪圖
#
# 定義一個繪正齒輪的繪圖函式
# midx 為齒輪圓心 x 座標
# midy 為齒輪圓心 y 座標
# rp 為節圓半徑, n 為齒數
# pa 為壓力角 (deg)
# rot 為旋轉角 (deg)
# 注意 n 為 52 齒時繪圖產生錯誤, 因為 base circle 與齒根圓大小未進行判斷, 必須要修正
def Gear(self, midx, midy, rp, n=20, pa=20, color="black"):
# 齒輪漸開線分成 15 線段繪製
imax = 15
# 在輸入的畫布上繪製直線, 由圓心到節圓 y 軸頂點畫一直線
self.create_line(midx, midy, midx, midy-rp)
# 畫出 rp 圓, 畫圓函式尚未定義
#create_oval(midx-rp, midy-rp, midx+rp, midy+rp, width=2)
# a 為模數 (代表公制中齒的大小), 模數為節圓直徑(稱為節徑)除以齒數
# 模數也就是齒冠大小
a=2*rp/n
# d 為齒根大小, 為模數的 1.157 或 1.25倍, 這裡採 1.25 倍
d=2.5*rp/n
# ra 為齒輪的外圍半徑
ra=rp+a
# 畫出 ra 圓, 畫圓函式尚未定義
#create_oval(midx-ra, midy-ra, midx+ra, midy+ra, width=1)
# rb 則為齒輪的基圓半徑
# 基圓為漸開線長齒之基準圓
rb=rp*math.cos(pa*deg)
# 畫出 rb 圓 (基圓), 畫圓函式尚未定義
#create_oval(midx-rb, midy-rb, midx+rb, midy+rb, width=1)
# rd 為齒根圓半徑
rd=rp-d
# 當 rd 大於 rb 時, 漸開線並非畫至 rb, 而是 rd
# 畫出 rd 圓 (齒根圓), 畫圓函式尚未定義
#create_oval(midx-rd, midy-rd, midx+rd, midy+rd, width=1)
# dr 則為基圓到齒頂圓半徑分成 imax 段後的每段半徑增量大小
# 將圓弧分成 imax 段來繪製漸開線
# 當 rd 大於 rb 時, 漸開線並非畫至 rb, 而是 rd
if rd>rb:
dr = (ra-rd)/imax
else:
dr=(ra-rb)/imax
# tan(pa*deg)-pa*deg 為漸開線函數
sigma=math.pi/(2*n)+math.tan(pa*deg)-pa*deg
for j in range(n):
ang=-2.*j*math.pi/n+sigma
ang2=2.*j*math.pi/n+sigma
lxd=midx+rd*math.sin(ang2-2.*math.pi/n)
lyd=midy-rd*math.cos(ang2-2.*math.pi/n)
for i in range(imax+1):
# 當 rd 大於 rb 時, 漸開線並非畫至 rb, 而是 rd
if rd>rb:
r=rd+i*dr
else:
r=rb+i*dr
theta=math.sqrt((r*r)/(rb*rb)-1.)
alpha=theta-math.atan(theta)
xpt=r*math.sin(alpha-ang)
ypt=r*math.cos(alpha-ang)
xd=rd*math.sin(-ang)
yd=rd*math.cos(-ang)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由左側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
self.create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=color)
# 最後一點, 則為齒頂圓
if(i==imax):
lfx=midx+xpt
lfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# the line from last end of dedendum point to the recent
# end of dedendum point
# lxd 為齒根圓上的左側 x 座標, lyd 則為 y 座標
# 下列為齒根圓上用來近似圓弧的直線
self.create_line((lxd),(lyd),(midx+xd),(midy-yd),fill=color)
for i in range(imax+1):
# 當 rd 大於 rb 時, 漸開線並非畫至 rb, 而是 rd
if rd>rb:
r=rd+i*dr
else:
r=rb+i*dr
theta=math.sqrt((r*r)/(rb*rb)-1.)
alpha=theta-math.atan(theta)
xpt=r*math.sin(ang2-alpha)
ypt=r*math.cos(ang2-alpha)
xd=rd*math.sin(ang2)
yd=rd*math.cos(ang2)
# i=0 時, 繪線起點由齒根圓上的點, 作為起點
if(i==0):
last_x = midx+xd
last_y = midy-yd
# 由右側齒根圓作為起點, 除第一點 (xd,yd) 齒根圓上的起點外, 其餘的 (xpt,ypt)則為漸開線上的分段點
self.create_line((midx+xpt),(midy-ypt),(last_x),(last_y),fill=color)
# 最後一點, 則為齒頂圓
if(i==imax):
rfx=midx+xpt
rfy=midy-ypt
last_x = midx+xpt
last_y = midy-ypt
# lfx 為齒頂圓上的左側 x 座標, lfy 則為 y 座標
# 下列為齒頂圓上用來近似圓弧的直線
self.create_line(lfx,lfy,rfx,rfy,fill=color)
| gpl-3.0 |
makermade/arm_android-19_arm-linux-androideabi-4.8 | lib/python2.7/distutils/tests/test_install_scripts.py | 95 | 2652 | """Tests for distutils.command.install_scripts."""
import os
import unittest
from distutils.command.install_scripts import install_scripts
from distutils.core import Distribution
from distutils.tests import support
from test.test_support import run_unittest
class InstallScriptsTestCase(support.TempdirManager,
support.LoggingSilencer,
unittest.TestCase):
def test_default_settings(self):
dist = Distribution()
dist.command_obj["build"] = support.DummyCommand(
build_scripts="/foo/bar")
dist.command_obj["install"] = support.DummyCommand(
install_scripts="/splat/funk",
force=1,
skip_build=1,
)
cmd = install_scripts(dist)
self.assertTrue(not cmd.force)
self.assertTrue(not cmd.skip_build)
self.assertTrue(cmd.build_dir is None)
self.assertTrue(cmd.install_dir is None)
cmd.finalize_options()
self.assertTrue(cmd.force)
self.assertTrue(cmd.skip_build)
self.assertEqual(cmd.build_dir, "/foo/bar")
self.assertEqual(cmd.install_dir, "/splat/funk")
def test_installation(self):
source = self.mkdtemp()
expected = []
def write_script(name, text):
expected.append(name)
f = open(os.path.join(source, name), "w")
try:
f.write(text)
finally:
f.close()
write_script("script1.py", ("#! /usr/bin/env python2.3\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
write_script("script2.py", ("#!/usr/bin/python\n"
"# bogus script w/ Python sh-bang\n"
"pass\n"))
write_script("shell.sh", ("#!/bin/sh\n"
"# bogus shell script w/ sh-bang\n"
"exit 0\n"))
target = self.mkdtemp()
dist = Distribution()
dist.command_obj["build"] = support.DummyCommand(build_scripts=source)
dist.command_obj["install"] = support.DummyCommand(
install_scripts=target,
force=1,
skip_build=1,
)
cmd = install_scripts(dist)
cmd.finalize_options()
cmd.run()
installed = os.listdir(target)
for name in expected:
self.assertTrue(name in installed)
def test_suite():
return unittest.makeSuite(InstallScriptsTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| gpl-2.0 |
hanxi/HXGame | libs/external/emscripten/third_party/ply/example/ansic/cparse.py | 164 | 20153 | # -----------------------------------------------------------------------------
# cparse.py
#
# Simple parser for ANSI C. Based on the grammar in K&R, 2nd Ed.
# -----------------------------------------------------------------------------
import sys
import clex
import ply.yacc as yacc
# Get the token map
tokens = clex.tokens
# translation-unit:
def p_translation_unit_1(t):
'translation_unit : external_declaration'
pass
def p_translation_unit_2(t):
'translation_unit : translation_unit external_declaration'
pass
# external-declaration:
def p_external_declaration_1(t):
'external_declaration : function_definition'
pass
def p_external_declaration_2(t):
'external_declaration : declaration'
pass
# function-definition:
def p_function_definition_1(t):
'function_definition : declaration_specifiers declarator declaration_list compound_statement'
pass
def p_function_definition_2(t):
'function_definition : declarator declaration_list compound_statement'
pass
def p_function_definition_3(t):
'function_definition : declarator compound_statement'
pass
def p_function_definition_4(t):
'function_definition : declaration_specifiers declarator compound_statement'
pass
# declaration:
def p_declaration_1(t):
'declaration : declaration_specifiers init_declarator_list SEMI'
pass
def p_declaration_2(t):
'declaration : declaration_specifiers SEMI'
pass
# declaration-list:
def p_declaration_list_1(t):
'declaration_list : declaration'
pass
def p_declaration_list_2(t):
'declaration_list : declaration_list declaration '
pass
# declaration-specifiers
def p_declaration_specifiers_1(t):
'declaration_specifiers : storage_class_specifier declaration_specifiers'
pass
def p_declaration_specifiers_2(t):
'declaration_specifiers : type_specifier declaration_specifiers'
pass
def p_declaration_specifiers_3(t):
'declaration_specifiers : type_qualifier declaration_specifiers'
pass
def p_declaration_specifiers_4(t):
'declaration_specifiers : storage_class_specifier'
pass
def p_declaration_specifiers_5(t):
'declaration_specifiers : type_specifier'
pass
def p_declaration_specifiers_6(t):
'declaration_specifiers : type_qualifier'
pass
# storage-class-specifier
def p_storage_class_specifier(t):
'''storage_class_specifier : AUTO
| REGISTER
| STATIC
| EXTERN
| TYPEDEF
'''
pass
# type-specifier:
def p_type_specifier(t):
'''type_specifier : VOID
| CHAR
| SHORT
| INT
| LONG
| FLOAT
| DOUBLE
| SIGNED
| UNSIGNED
| struct_or_union_specifier
| enum_specifier
| TYPEID
'''
pass
# type-qualifier:
def p_type_qualifier(t):
'''type_qualifier : CONST
| VOLATILE'''
pass
# struct-or-union-specifier
def p_struct_or_union_specifier_1(t):
'struct_or_union_specifier : struct_or_union ID LBRACE struct_declaration_list RBRACE'
pass
def p_struct_or_union_specifier_2(t):
'struct_or_union_specifier : struct_or_union LBRACE struct_declaration_list RBRACE'
pass
def p_struct_or_union_specifier_3(t):
'struct_or_union_specifier : struct_or_union ID'
pass
# struct-or-union:
def p_struct_or_union(t):
'''struct_or_union : STRUCT
| UNION
'''
pass
# struct-declaration-list:
def p_struct_declaration_list_1(t):
'struct_declaration_list : struct_declaration'
pass
def p_struct_declaration_list_2(t):
'struct_declaration_list : struct_declaration_list struct_declaration'
pass
# init-declarator-list:
def p_init_declarator_list_1(t):
'init_declarator_list : init_declarator'
pass
def p_init_declarator_list_2(t):
'init_declarator_list : init_declarator_list COMMA init_declarator'
pass
# init-declarator
def p_init_declarator_1(t):
'init_declarator : declarator'
pass
def p_init_declarator_2(t):
'init_declarator : declarator EQUALS initializer'
pass
# struct-declaration:
def p_struct_declaration(t):
'struct_declaration : specifier_qualifier_list struct_declarator_list SEMI'
pass
# specifier-qualifier-list:
def p_specifier_qualifier_list_1(t):
'specifier_qualifier_list : type_specifier specifier_qualifier_list'
pass
def p_specifier_qualifier_list_2(t):
'specifier_qualifier_list : type_specifier'
pass
def p_specifier_qualifier_list_3(t):
'specifier_qualifier_list : type_qualifier specifier_qualifier_list'
pass
def p_specifier_qualifier_list_4(t):
'specifier_qualifier_list : type_qualifier'
pass
# struct-declarator-list:
def p_struct_declarator_list_1(t):
'struct_declarator_list : struct_declarator'
pass
def p_struct_declarator_list_2(t):
'struct_declarator_list : struct_declarator_list COMMA struct_declarator'
pass
# struct-declarator:
def p_struct_declarator_1(t):
'struct_declarator : declarator'
pass
def p_struct_declarator_2(t):
'struct_declarator : declarator COLON constant_expression'
pass
def p_struct_declarator_3(t):
'struct_declarator : COLON constant_expression'
pass
# enum-specifier:
def p_enum_specifier_1(t):
'enum_specifier : ENUM ID LBRACE enumerator_list RBRACE'
pass
def p_enum_specifier_2(t):
'enum_specifier : ENUM LBRACE enumerator_list RBRACE'
pass
def p_enum_specifier_3(t):
'enum_specifier : ENUM ID'
pass
# enumerator_list:
def p_enumerator_list_1(t):
'enumerator_list : enumerator'
pass
def p_enumerator_list_2(t):
'enumerator_list : enumerator_list COMMA enumerator'
pass
# enumerator:
def p_enumerator_1(t):
'enumerator : ID'
pass
def p_enumerator_2(t):
'enumerator : ID EQUALS constant_expression'
pass
# declarator:
def p_declarator_1(t):
'declarator : pointer direct_declarator'
pass
def p_declarator_2(t):
'declarator : direct_declarator'
pass
# direct-declarator:
def p_direct_declarator_1(t):
'direct_declarator : ID'
pass
def p_direct_declarator_2(t):
'direct_declarator : LPAREN declarator RPAREN'
pass
def p_direct_declarator_3(t):
'direct_declarator : direct_declarator LBRACKET constant_expression_opt RBRACKET'
pass
def p_direct_declarator_4(t):
'direct_declarator : direct_declarator LPAREN parameter_type_list RPAREN '
pass
def p_direct_declarator_5(t):
'direct_declarator : direct_declarator LPAREN identifier_list RPAREN '
pass
def p_direct_declarator_6(t):
'direct_declarator : direct_declarator LPAREN RPAREN '
pass
# pointer:
def p_pointer_1(t):
'pointer : TIMES type_qualifier_list'
pass
def p_pointer_2(t):
'pointer : TIMES'
pass
def p_pointer_3(t):
'pointer : TIMES type_qualifier_list pointer'
pass
def p_pointer_4(t):
'pointer : TIMES pointer'
pass
# type-qualifier-list:
def p_type_qualifier_list_1(t):
'type_qualifier_list : type_qualifier'
pass
def p_type_qualifier_list_2(t):
'type_qualifier_list : type_qualifier_list type_qualifier'
pass
# parameter-type-list:
def p_parameter_type_list_1(t):
'parameter_type_list : parameter_list'
pass
def p_parameter_type_list_2(t):
'parameter_type_list : parameter_list COMMA ELLIPSIS'
pass
# parameter-list:
def p_parameter_list_1(t):
'parameter_list : parameter_declaration'
pass
def p_parameter_list_2(t):
'parameter_list : parameter_list COMMA parameter_declaration'
pass
# parameter-declaration:
def p_parameter_declaration_1(t):
'parameter_declaration : declaration_specifiers declarator'
pass
def p_parameter_declaration_2(t):
'parameter_declaration : declaration_specifiers abstract_declarator_opt'
pass
# identifier-list:
def p_identifier_list_1(t):
'identifier_list : ID'
pass
def p_identifier_list_2(t):
'identifier_list : identifier_list COMMA ID'
pass
# initializer:
def p_initializer_1(t):
'initializer : assignment_expression'
pass
def p_initializer_2(t):
'''initializer : LBRACE initializer_list RBRACE
| LBRACE initializer_list COMMA RBRACE'''
pass
# initializer-list:
def p_initializer_list_1(t):
'initializer_list : initializer'
pass
def p_initializer_list_2(t):
'initializer_list : initializer_list COMMA initializer'
pass
# type-name:
def p_type_name(t):
'type_name : specifier_qualifier_list abstract_declarator_opt'
pass
def p_abstract_declarator_opt_1(t):
'abstract_declarator_opt : empty'
pass
def p_abstract_declarator_opt_2(t):
'abstract_declarator_opt : abstract_declarator'
pass
# abstract-declarator:
def p_abstract_declarator_1(t):
'abstract_declarator : pointer '
pass
def p_abstract_declarator_2(t):
'abstract_declarator : pointer direct_abstract_declarator'
pass
def p_abstract_declarator_3(t):
'abstract_declarator : direct_abstract_declarator'
pass
# direct-abstract-declarator:
def p_direct_abstract_declarator_1(t):
'direct_abstract_declarator : LPAREN abstract_declarator RPAREN'
pass
def p_direct_abstract_declarator_2(t):
'direct_abstract_declarator : direct_abstract_declarator LBRACKET constant_expression_opt RBRACKET'
pass
def p_direct_abstract_declarator_3(t):
'direct_abstract_declarator : LBRACKET constant_expression_opt RBRACKET'
pass
def p_direct_abstract_declarator_4(t):
'direct_abstract_declarator : direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN'
pass
def p_direct_abstract_declarator_5(t):
'direct_abstract_declarator : LPAREN parameter_type_list_opt RPAREN'
pass
# Optional fields in abstract declarators
def p_constant_expression_opt_1(t):
'constant_expression_opt : empty'
pass
def p_constant_expression_opt_2(t):
'constant_expression_opt : constant_expression'
pass
def p_parameter_type_list_opt_1(t):
'parameter_type_list_opt : empty'
pass
def p_parameter_type_list_opt_2(t):
'parameter_type_list_opt : parameter_type_list'
pass
# statement:
def p_statement(t):
'''
statement : labeled_statement
| expression_statement
| compound_statement
| selection_statement
| iteration_statement
| jump_statement
'''
pass
# labeled-statement:
def p_labeled_statement_1(t):
'labeled_statement : ID COLON statement'
pass
def p_labeled_statement_2(t):
'labeled_statement : CASE constant_expression COLON statement'
pass
def p_labeled_statement_3(t):
'labeled_statement : DEFAULT COLON statement'
pass
# expression-statement:
def p_expression_statement(t):
'expression_statement : expression_opt SEMI'
pass
# compound-statement:
def p_compound_statement_1(t):
'compound_statement : LBRACE declaration_list statement_list RBRACE'
pass
def p_compound_statement_2(t):
'compound_statement : LBRACE statement_list RBRACE'
pass
def p_compound_statement_3(t):
'compound_statement : LBRACE declaration_list RBRACE'
pass
def p_compound_statement_4(t):
'compound_statement : LBRACE RBRACE'
pass
# statement-list:
def p_statement_list_1(t):
'statement_list : statement'
pass
def p_statement_list_2(t):
'statement_list : statement_list statement'
pass
# selection-statement
def p_selection_statement_1(t):
'selection_statement : IF LPAREN expression RPAREN statement'
pass
def p_selection_statement_2(t):
'selection_statement : IF LPAREN expression RPAREN statement ELSE statement '
pass
def p_selection_statement_3(t):
'selection_statement : SWITCH LPAREN expression RPAREN statement '
pass
# iteration_statement:
def p_iteration_statement_1(t):
'iteration_statement : WHILE LPAREN expression RPAREN statement'
pass
def p_iteration_statement_2(t):
'iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN statement '
pass
def p_iteration_statement_3(t):
'iteration_statement : DO statement WHILE LPAREN expression RPAREN SEMI'
pass
# jump_statement:
def p_jump_statement_1(t):
'jump_statement : GOTO ID SEMI'
pass
def p_jump_statement_2(t):
'jump_statement : CONTINUE SEMI'
pass
def p_jump_statement_3(t):
'jump_statement : BREAK SEMI'
pass
def p_jump_statement_4(t):
'jump_statement : RETURN expression_opt SEMI'
pass
def p_expression_opt_1(t):
'expression_opt : empty'
pass
def p_expression_opt_2(t):
'expression_opt : expression'
pass
# expression:
def p_expression_1(t):
'expression : assignment_expression'
pass
def p_expression_2(t):
'expression : expression COMMA assignment_expression'
pass
# assigment_expression:
def p_assignment_expression_1(t):
'assignment_expression : conditional_expression'
pass
def p_assignment_expression_2(t):
'assignment_expression : unary_expression assignment_operator assignment_expression'
pass
# assignment_operator:
def p_assignment_operator(t):
'''
assignment_operator : EQUALS
| TIMESEQUAL
| DIVEQUAL
| MODEQUAL
| PLUSEQUAL
| MINUSEQUAL
| LSHIFTEQUAL
| RSHIFTEQUAL
| ANDEQUAL
| OREQUAL
| XOREQUAL
'''
pass
# conditional-expression
def p_conditional_expression_1(t):
'conditional_expression : logical_or_expression'
pass
def p_conditional_expression_2(t):
'conditional_expression : logical_or_expression CONDOP expression COLON conditional_expression '
pass
# constant-expression
def p_constant_expression(t):
'constant_expression : conditional_expression'
pass
# logical-or-expression
def p_logical_or_expression_1(t):
'logical_or_expression : logical_and_expression'
pass
def p_logical_or_expression_2(t):
'logical_or_expression : logical_or_expression LOR logical_and_expression'
pass
# logical-and-expression
def p_logical_and_expression_1(t):
'logical_and_expression : inclusive_or_expression'
pass
def p_logical_and_expression_2(t):
'logical_and_expression : logical_and_expression LAND inclusive_or_expression'
pass
# inclusive-or-expression:
def p_inclusive_or_expression_1(t):
'inclusive_or_expression : exclusive_or_expression'
pass
def p_inclusive_or_expression_2(t):
'inclusive_or_expression : inclusive_or_expression OR exclusive_or_expression'
pass
# exclusive-or-expression:
def p_exclusive_or_expression_1(t):
'exclusive_or_expression : and_expression'
pass
def p_exclusive_or_expression_2(t):
'exclusive_or_expression : exclusive_or_expression XOR and_expression'
pass
# AND-expression
def p_and_expression_1(t):
'and_expression : equality_expression'
pass
def p_and_expression_2(t):
'and_expression : and_expression AND equality_expression'
pass
# equality-expression:
def p_equality_expression_1(t):
'equality_expression : relational_expression'
pass
def p_equality_expression_2(t):
'equality_expression : equality_expression EQ relational_expression'
pass
def p_equality_expression_3(t):
'equality_expression : equality_expression NE relational_expression'
pass
# relational-expression:
def p_relational_expression_1(t):
'relational_expression : shift_expression'
pass
def p_relational_expression_2(t):
'relational_expression : relational_expression LT shift_expression'
pass
def p_relational_expression_3(t):
'relational_expression : relational_expression GT shift_expression'
pass
def p_relational_expression_4(t):
'relational_expression : relational_expression LE shift_expression'
pass
def p_relational_expression_5(t):
'relational_expression : relational_expression GE shift_expression'
pass
# shift-expression
def p_shift_expression_1(t):
'shift_expression : additive_expression'
pass
def p_shift_expression_2(t):
'shift_expression : shift_expression LSHIFT additive_expression'
pass
def p_shift_expression_3(t):
'shift_expression : shift_expression RSHIFT additive_expression'
pass
# additive-expression
def p_additive_expression_1(t):
'additive_expression : multiplicative_expression'
pass
def p_additive_expression_2(t):
'additive_expression : additive_expression PLUS multiplicative_expression'
pass
def p_additive_expression_3(t):
'additive_expression : additive_expression MINUS multiplicative_expression'
pass
# multiplicative-expression
def p_multiplicative_expression_1(t):
'multiplicative_expression : cast_expression'
pass
def p_multiplicative_expression_2(t):
'multiplicative_expression : multiplicative_expression TIMES cast_expression'
pass
def p_multiplicative_expression_3(t):
'multiplicative_expression : multiplicative_expression DIVIDE cast_expression'
pass
def p_multiplicative_expression_4(t):
'multiplicative_expression : multiplicative_expression MOD cast_expression'
pass
# cast-expression:
def p_cast_expression_1(t):
'cast_expression : unary_expression'
pass
def p_cast_expression_2(t):
'cast_expression : LPAREN type_name RPAREN cast_expression'
pass
# unary-expression:
def p_unary_expression_1(t):
'unary_expression : postfix_expression'
pass
def p_unary_expression_2(t):
'unary_expression : PLUSPLUS unary_expression'
pass
def p_unary_expression_3(t):
'unary_expression : MINUSMINUS unary_expression'
pass
def p_unary_expression_4(t):
'unary_expression : unary_operator cast_expression'
pass
def p_unary_expression_5(t):
'unary_expression : SIZEOF unary_expression'
pass
def p_unary_expression_6(t):
'unary_expression : SIZEOF LPAREN type_name RPAREN'
pass
#unary-operator
def p_unary_operator(t):
'''unary_operator : AND
| TIMES
| PLUS
| MINUS
| NOT
| LNOT '''
pass
# postfix-expression:
def p_postfix_expression_1(t):
'postfix_expression : primary_expression'
pass
def p_postfix_expression_2(t):
'postfix_expression : postfix_expression LBRACKET expression RBRACKET'
pass
def p_postfix_expression_3(t):
'postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN'
pass
def p_postfix_expression_4(t):
'postfix_expression : postfix_expression LPAREN RPAREN'
pass
def p_postfix_expression_5(t):
'postfix_expression : postfix_expression PERIOD ID'
pass
def p_postfix_expression_6(t):
'postfix_expression : postfix_expression ARROW ID'
pass
def p_postfix_expression_7(t):
'postfix_expression : postfix_expression PLUSPLUS'
pass
def p_postfix_expression_8(t):
'postfix_expression : postfix_expression MINUSMINUS'
pass
# primary-expression:
def p_primary_expression(t):
'''primary_expression : ID
| constant
| SCONST
| LPAREN expression RPAREN'''
pass
# argument-expression-list:
def p_argument_expression_list(t):
'''argument_expression_list : assignment_expression
| argument_expression_list COMMA assignment_expression'''
pass
# constant:
def p_constant(t):
'''constant : ICONST
| FCONST
| CCONST'''
pass
def p_empty(t):
'empty : '
pass
def p_error(t):
print("Whoa. We're hosed")
import profile
# Build the grammar
yacc.yacc(method='LALR')
#profile.run("yacc.yacc(method='LALR')")
| mit |
rosmo/aurora | src/main/python/apache/aurora/client/api/sla.py | 5 | 12572 | #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import time
from collections import defaultdict, namedtuple
from twitter.common import log
from apache.aurora.client.base import DEFAULT_GROUPING, format_response, group_hosts
from apache.aurora.common.aurora_job_key import AuroraJobKey
from gen.apache.aurora.api.constants import LIVE_STATES
from gen.apache.aurora.api.ttypes import ResponseCode, ScheduleStatus, TaskQuery
def job_key_from_scheduled(task, cluster):
"""Creates AuroraJobKey from the ScheduledTask.
Arguments:
task -- ScheduledTask to get job key from.
cluster -- Cluster the task belongs to.
"""
config = task.assignedTask.task
return AuroraJobKey(
cluster=cluster.name,
role=config.job.role if config.job else config.owner.role,
env=config.job.environment if config.job else config.environment,
name=config.job.name if config.job else config.jobName
)
def task_query(hosts=None, job_keys=None):
"""Creates TaskQuery optionally scoped by a job(s) or hosts.
Arguments:
hosts -- list of hostnames to scope the query by.
job_keys -- list of AuroraJobKeys to scope the query by.
"""
return TaskQuery(
slaveHosts=set(hosts) if hosts else None,
jobKeys=[k.to_thrift() for k in job_keys] if job_keys else None,
statuses=LIVE_STATES)
class JobUpTimeSlaVector(object):
"""A grouping of job active tasks by:
- instance: Map of instance ID -> instance uptime in seconds.
Exposes an API for converting raw instance uptime data into job SLA metrics.
"""
def __init__(self, tasks, now=None):
self._tasks = tasks
self._now = now or time.time()
self._uptime_map = self._instance_uptime()
def total_tasks(self):
"""Returns the total count of active tasks."""
return len(self._uptime_map)
def get_wait_time_to_sla(self, percentile, duration, total_tasks=None):
"""Returns an approximate wait time until the job reaches the specified SLA
defined by percentile and duration.
Arguments:
percentile -- up count percentile to calculate wait time against.
duration -- uptime duration to calculate wait time against.
total_tasks -- optional total task count to calculate against.
"""
upcount = self.get_task_up_count(duration, total_tasks)
if upcount >= percentile:
return 0
# To get wait time to SLA:
# - Calculate the desired number of up instances in order to satisfy the percentile.
# - Find the desired index (x) in the instance list sorted in non-decreasing order of uptimes.
# If desired index outside of current element count -> return None for "infeasible".
# - Calculate wait time as: duration - duration(x)
elements = len(self._uptime_map)
total = total_tasks or elements
target_count = math.ceil(total * percentile / 100.0)
index = elements - int(target_count)
if index < 0 or index >= elements:
return None
else:
return duration - sorted(self._uptime_map.values())[index]
def get_task_up_count(self, duration, total_tasks=None):
"""Returns the percentage of job tasks that stayed up longer than duration.
Arguments:
duration -- uptime duration in seconds.
total_tasks -- optional total task count to calculate against.
"""
total = total_tasks or len(self._uptime_map)
above = len([uptime for uptime in self._uptime_map.values() if uptime >= duration])
return 100.0 * above / total if total else 0
def get_job_uptime(self, percentile):
"""Returns the uptime (in seconds) of the job at the specified percentile.
Arguments:
percentile -- percentile to report uptime for.
"""
if percentile <= 0 or percentile >= 100:
raise ValueError('Percentile must be within (0, 100), got %r instead.' % percentile)
total = len(self._uptime_map)
value = math.floor(percentile / 100.0 * total)
index = total - int(value) - 1
return sorted(self._uptime_map.values())[index] if 0 <= index < total else 0
def _instance_uptime(self):
instance_map = {}
for task in self._tasks:
for event in task.taskEvents:
if event.status == ScheduleStatus.RUNNING:
instance_map[task.assignedTask.instanceId] = math.floor(
self._now - event.timestamp / 1000)
break
return instance_map
JobUpTimeLimit = namedtuple('JobUpTimeLimit', ['job', 'percentage', 'duration_secs'])
JobUpTimeDetails = namedtuple('JobUpTimeDetails',
['job', 'predicted_percentage', 'safe', 'safe_in_secs'])
class DomainUpTimeSlaVector(object):
"""A grouping of all active tasks in the cluster by:
- job: Map of job_key -> task. Provides logical mapping between jobs and their active tasks.
- host: Map of hostname -> job_key. Provides logical mapping between hosts and their jobs.
Exposes an API for querying safe domain details.
"""
DEFAULT_MIN_INSTANCE_COUNT = 2
def __init__(self, cluster, tasks, min_instance_count=DEFAULT_MIN_INSTANCE_COUNT, hosts=None):
self._cluster = cluster
self._tasks = tasks
self._now = time.time()
self._tasks_by_job, self._jobs_by_host, self._hosts_by_job = self._init_mappings(
min_instance_count)
self._host_filter = hosts
def get_safe_hosts(self,
percentage,
duration,
job_limits=None,
grouping_function=DEFAULT_GROUPING):
"""Returns hosts safe to restart with respect to their job SLA.
Every host is analyzed separately without considering other job hosts.
Arguments:
percentage -- default task up count percentage. Used if job_limits mapping is not found.
duration -- default task uptime duration in seconds. Used if job_limits mapping is not found.
job_limits -- optional SLA override map. Key: job key. Value JobUpTimeLimit. If specified,
replaces default percentage/duration within the job context.
grouping_function -- grouping function to use to group hosts.
"""
safe_groups = []
for hosts, job_keys in self._iter_groups(
self._jobs_by_host.keys(), grouping_function, self._host_filter):
safe_hosts = defaultdict(list)
for job_key in job_keys:
job_hosts = hosts.intersection(self._hosts_by_job[job_key])
job_duration = duration
job_percentage = percentage
if job_limits and job_key in job_limits:
job_duration = job_limits[job_key].duration_secs
job_percentage = job_limits[job_key].percentage
filtered_percentage, _, _ = self._simulate_hosts_down(job_key, job_hosts, job_duration)
if filtered_percentage < job_percentage:
break
for host in job_hosts:
safe_hosts[host].append(JobUpTimeLimit(job_key, filtered_percentage, job_duration))
else:
safe_groups.append(safe_hosts)
return safe_groups
def probe_hosts(self, percentage, duration, grouping_function=DEFAULT_GROUPING):
"""Returns predicted job SLAs following the removal of provided hosts.
For every given host creates a list of JobUpTimeDetails with predicted job SLA details
in case the host is restarted, including: host, job_key, predicted up count, whether
the predicted job SLA >= percentage and the expected wait time in seconds for the job
to reach its SLA.
Arguments:
percentage -- task up count percentage.
duration -- task uptime duration in seconds.
grouping_function -- grouping function to use to group hosts.
"""
probed_groups = []
for hosts, job_keys in self._iter_groups(self._host_filter or [], grouping_function):
probed_hosts = defaultdict(list)
for job_key in job_keys:
job_hosts = hosts.intersection(self._hosts_by_job[job_key])
filtered_percentage, total_count, filtered_vector = self._simulate_hosts_down(
job_key, job_hosts, duration)
# Calculate wait time to SLA in case down host violates job's SLA.
if filtered_percentage < percentage:
safe = False
wait_to_sla = filtered_vector.get_wait_time_to_sla(percentage, duration, total_count)
else:
safe = True
wait_to_sla = 0
for host in job_hosts:
probed_hosts[host].append(
JobUpTimeDetails(job_key, filtered_percentage, safe, wait_to_sla))
if probed_hosts:
probed_groups.append(probed_hosts)
return probed_groups
def _iter_groups(self, hosts_to_group, grouping_function, host_filter=None):
groups = group_hosts(hosts_to_group, grouping_function)
for _, hosts in sorted(groups.items(), key=lambda v: v[0]):
job_keys = set()
for host in hosts:
if host_filter and host not in self._host_filter:
continue
job_keys = job_keys.union(self._jobs_by_host.get(host, set()))
yield hosts, job_keys
def _create_group_results(self, group, uptime_details):
result = defaultdict(list)
for host in group.keys():
result[host].append(uptime_details)
def _simulate_hosts_down(self, job_key, hosts, duration):
unfiltered_tasks = self._tasks_by_job[job_key]
# Get total job task count to use in SLA calculation.
total_count = len(unfiltered_tasks)
# Get a list of job tasks that would remain after the affected hosts go down
# and create an SLA vector with these tasks.
filtered_tasks = [task for task in unfiltered_tasks
if task.assignedTask.slaveHost not in hosts]
filtered_vector = JobUpTimeSlaVector(filtered_tasks, self._now)
# Calculate the SLA that would be in effect should the host go down.
filtered_percentage = filtered_vector.get_task_up_count(duration, total_count)
return filtered_percentage, total_count, filtered_vector
def _init_mappings(self, count):
tasks_by_job = defaultdict(list)
for task in self._tasks:
if task.assignedTask.task.production:
tasks_by_job[job_key_from_scheduled(task, self._cluster)].append(task)
# Filter jobs by the min instance count.
tasks_by_job = defaultdict(list, ((job, tasks) for job, tasks
in tasks_by_job.items() if len(tasks) >= count))
jobs_by_host = defaultdict(set)
hosts_by_job = defaultdict(set)
for job_key, tasks in tasks_by_job.items():
for task in tasks:
host = task.assignedTask.slaveHost
jobs_by_host[host].add(job_key)
hosts_by_job[job_key].add(host)
return tasks_by_job, jobs_by_host, hosts_by_job
class Sla(object):
"""Defines methods for generating job uptime metrics required for monitoring job SLA."""
def __init__(self, scheduler):
self._scheduler = scheduler
def get_job_uptime_vector(self, job_key):
"""Returns a JobUpTimeSlaVector object for the given job key.
Arguments:
job_key -- job to create a task uptime vector for.
"""
return JobUpTimeSlaVector(self._get_tasks(task_query(job_keys=[job_key])))
def get_domain_uptime_vector(self, cluster, min_instance_count, hosts=None):
"""Returns a DomainUpTimeSlaVector object with all available job uptimes.
Arguments:
cluster -- Cluster to get vector for.
min_instance_count -- Minimum job instance count to consider for domain uptime calculations.
hosts -- optional list of hostnames to query by.
"""
tasks = self._get_tasks(task_query(hosts=hosts)) if hosts else None
job_keys = set(job_key_from_scheduled(t, cluster) for t in tasks) if tasks else None
# Avoid full cluster pull if job_keys are missing for any reason but the hosts are specified.
job_tasks = [] if hosts and not job_keys else self._get_tasks(task_query(job_keys=job_keys))
return DomainUpTimeSlaVector(
cluster,
job_tasks,
min_instance_count=min_instance_count,
hosts=hosts)
def _get_tasks(self, task_query):
resp = self._scheduler.getTasksWithoutConfigs(task_query)
log.info(format_response(resp))
if resp.responseCode != ResponseCode.OK:
return []
return resp.result.scheduleStatusResult.tasks
| apache-2.0 |
Lochlan/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/net/bugzilla/bugzilla_unittest.py | 119 | 23727 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
import datetime
import StringIO
from .bugzilla import Bugzilla, BugzillaQueries, EditUsersParser
from webkitpy.common.config import urls
from webkitpy.common.config.committers import Reviewer, Committer, Contributor, CommitterList
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.net.web_mock import MockBrowser
from webkitpy.thirdparty.mock import Mock
from webkitpy.thirdparty.BeautifulSoup import BeautifulSoup
class BugzillaTest(unittest.TestCase):
_example_attachment = '''
<attachment
isobsolete="1"
ispatch="1"
isprivate="0"
>
<attachid>33721</attachid>
<date>2009-07-29 10:23 PDT</date>
<desc>Fixed whitespace issue</desc>
<filename>patch</filename>
<type>text/plain</type>
<size>9719</size>
<attacher>[email protected]</attacher>
<flag name="review"
id="17931"
status="+"
setter="[email protected]"
/>
<flag name="commit-queue"
id="17932"
status="+"
setter="[email protected]"
/>
</attachment>
'''
_expected_example_attachment_parsing = {
'attach_date': datetime.datetime(2009, 07, 29, 10, 23),
'bug_id' : 100,
'is_obsolete' : True,
'is_patch' : True,
'id' : 33721,
'url' : "https://bugs.webkit.org/attachment.cgi?id=33721",
'name' : "Fixed whitespace issue",
'type' : "text/plain",
'review' : '+',
'reviewer_email' : '[email protected]',
'commit-queue' : '+',
'committer_email' : '[email protected]',
'attacher_email' : '[email protected]',
}
def test_url_creation(self):
# FIXME: These would be all better as doctests
bugs = Bugzilla()
self.assertIsNone(bugs.bug_url_for_bug_id(None))
self.assertIsNone(bugs.short_bug_url_for_bug_id(None))
self.assertIsNone(bugs.attachment_url_for_id(None))
def test_parse_bug_id(self):
# Test that we can parse the urls we produce.
bugs = Bugzilla()
self.assertEqual(12345, urls.parse_bug_id(bugs.short_bug_url_for_bug_id(12345)))
self.assertEqual(12345, urls.parse_bug_id(bugs.bug_url_for_bug_id(12345)))
self.assertEqual(12345, urls.parse_bug_id(bugs.bug_url_for_bug_id(12345, xml=True)))
_bug_xml = """
<bug>
<bug_id>32585</bug_id>
<creation_ts>2009-12-15 15:17 PST</creation_ts>
<short_desc>bug to test webkit-patch's and commit-queue's failures</short_desc>
<delta_ts>2009-12-27 21:04:50 PST</delta_ts>
<reporter_accessible>1</reporter_accessible>
<cclist_accessible>1</cclist_accessible>
<classification_id>1</classification_id>
<classification>Unclassified</classification>
<product>WebKit</product>
<component>Tools / Tests</component>
<version>528+ (Nightly build)</version>
<rep_platform>PC</rep_platform>
<op_sys>Mac OS X 10.5</op_sys>
<bug_status>NEW</bug_status>
<priority>P2</priority>
<bug_severity>Normal</bug_severity>
<target_milestone>---</target_milestone>
<everconfirmed>1</everconfirmed>
<reporter name="Eric Seidel">[email protected]</reporter>
<assigned_to name="Nobody">[email protected]</assigned_to>
<cc>[email protected]</cc>
<cc>[email protected]</cc>
<long_desc isprivate="0">
<who name="Eric Seidel">[email protected]</who>
<bug_when>2009-12-15 15:17:28 PST</bug_when>
<thetext>bug to test webkit-patch and commit-queue failures
Ignore this bug. Just for testing failure modes of webkit-patch and the commit-queue.</thetext>
</long_desc>
<attachment
isobsolete="0"
ispatch="1"
isprivate="0"
>
<attachid>45548</attachid>
<date>2009-12-27 23:51 PST</date>
<desc>Patch</desc>
<filename>bug-32585-20091228005112.patch</filename>
<type>text/plain</type>
<size>10882</size>
<attacher>[email protected]</attacher>
<token>1261988248-dc51409e9c421a4358f365fa8bec8357</token>
<data encoding="base64">SW5kZXg6IFdlYktpdC9tYWMvQ2hhbmdlTG9nCj09PT09PT09PT09PT09PT09PT09PT09PT09PT09
removed-because-it-was-really-long
ZEZpbmlzaExvYWRXaXRoUmVhc29uOnJlYXNvbl07Cit9CisKIEBlbmQKIAogI2VuZGlmCg==
</data>
<flag name="review"
id="27602"
status="?"
setter="[email protected]"
/>
</attachment>
</bug>
"""
_single_bug_xml = """
<?xml version="1.0" encoding="UTF-8" standalone="yes" ?>
<!DOCTYPE bugzilla SYSTEM "https://bugs.webkit.org/bugzilla.dtd">
<bugzilla version="3.2.3"
urlbase="https://bugs.webkit.org/"
maintainer="[email protected]"
exporter="[email protected]"
>
%s
</bugzilla>
""" % _bug_xml
_expected_example_bug_parsing = {
"id" : 32585,
"title" : u"bug to test webkit-patch's and commit-queue's failures",
"cc_emails" : ["[email protected]", "[email protected]"],
"reporter_email" : "[email protected]",
"assigned_to_email" : "[email protected]",
"bug_status": "NEW",
"attachments" : [{
"attach_date": datetime.datetime(2009, 12, 27, 23, 51),
'name': u'Patch',
'url' : "https://bugs.webkit.org/attachment.cgi?id=45548",
'is_obsolete': False,
'review': '?',
'is_patch': True,
'attacher_email': '[email protected]',
'bug_id': 32585,
'type': 'text/plain',
'id': 45548
}],
"comments" : [{
'comment_date': datetime.datetime(2009, 12, 15, 15, 17, 28),
'comment_email': '[email protected]',
'text': """bug to test webkit-patch and commit-queue failures
Ignore this bug. Just for testing failure modes of webkit-patch and the commit-queue.""",
}]
}
# FIXME: This should move to a central location and be shared by more unit tests.
def _assert_dictionaries_equal(self, actual, expected):
# Make sure we aren't parsing more or less than we expect
self.assertItemsEqual(actual.keys(), expected.keys())
for key, expected_value in expected.items():
self.assertEqual(actual[key], expected_value, ("Failure for key: %s: Actual='%s' Expected='%s'" % (key, actual[key], expected_value)))
def test_parse_bug_dictionary_from_xml(self):
bug = Bugzilla()._parse_bug_dictionary_from_xml(self._single_bug_xml)
self._assert_dictionaries_equal(bug, self._expected_example_bug_parsing)
_sample_multi_bug_xml = """
<bugzilla version="3.2.3" urlbase="https://bugs.webkit.org/" maintainer="[email protected]" exporter="[email protected]">
%s
%s
</bugzilla>
""" % (_bug_xml, _bug_xml)
def test_parse_bugs_from_xml(self):
bugzilla = Bugzilla()
bugs = bugzilla._parse_bugs_from_xml(self._sample_multi_bug_xml)
self.assertEqual(len(bugs), 2)
self.assertEqual(bugs[0].id(), self._expected_example_bug_parsing['id'])
bugs = bugzilla._parse_bugs_from_xml("")
self.assertEqual(len(bugs), 0)
# This could be combined into test_bug_parsing later if desired.
def test_attachment_parsing(self):
bugzilla = Bugzilla()
soup = BeautifulSoup(self._example_attachment)
attachment_element = soup.find("attachment")
attachment = bugzilla._parse_attachment_element(attachment_element, self._expected_example_attachment_parsing['bug_id'])
self.assertTrue(attachment)
self._assert_dictionaries_equal(attachment, self._expected_example_attachment_parsing)
_sample_attachment_detail_page = """
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>
Attachment 41073 Details for Bug 27314</title>
<link rel="Top" href="https://bugs.webkit.org/">
<link rel="Up" href="show_bug.cgi?id=27314">
"""
def test_attachment_detail_bug_parsing(self):
bugzilla = Bugzilla()
self.assertEqual(27314, bugzilla._parse_bug_id_from_attachment_page(self._sample_attachment_detail_page))
def test_add_cc_to_bug(self):
bugzilla = Bugzilla()
bugzilla.browser = MockBrowser()
bugzilla.authenticate = lambda: None
expected_logs = "Adding ['[email protected]'] to the CC list for bug 42\n"
OutputCapture().assert_outputs(self, bugzilla.add_cc_to_bug, [42, ["[email protected]"]], expected_logs=expected_logs)
def _mock_control_item(self, name):
mock_item = Mock()
mock_item.name = name
return mock_item
def _mock_find_control(self, item_names=[], selected_index=0):
mock_control = Mock()
mock_control.items = [self._mock_control_item(name) for name in item_names]
mock_control.value = [item_names[selected_index]] if item_names else None
return lambda name, type: mock_control
def _assert_reopen(self, item_names=None, selected_index=None, extra_logs=None):
bugzilla = Bugzilla()
bugzilla.browser = MockBrowser()
bugzilla.authenticate = lambda: None
mock_find_control = self._mock_find_control(item_names, selected_index)
bugzilla.browser.find_control = mock_find_control
expected_logs = "Re-opening bug 42\n['comment']\n"
if extra_logs:
expected_logs += extra_logs
OutputCapture().assert_outputs(self, bugzilla.reopen_bug, [42, ["comment"]], expected_logs=expected_logs)
def test_reopen_bug(self):
self._assert_reopen(item_names=["REOPENED", "RESOLVED", "CLOSED"], selected_index=1)
self._assert_reopen(item_names=["UNCONFIRMED", "RESOLVED", "CLOSED"], selected_index=1)
extra_logs = "Did not reopen bug 42, it appears to already be open with status ['NEW'].\n"
self._assert_reopen(item_names=["NEW", "RESOLVED"], selected_index=0, extra_logs=extra_logs)
def test_file_object_for_upload(self):
bugzilla = Bugzilla()
file_object = StringIO.StringIO()
unicode_tor = u"WebKit \u2661 Tor Arne Vestb\u00F8!"
utf8_tor = unicode_tor.encode("utf-8")
self.assertEqual(bugzilla._file_object_for_upload(file_object), file_object)
self.assertEqual(bugzilla._file_object_for_upload(utf8_tor).read(), utf8_tor)
self.assertEqual(bugzilla._file_object_for_upload(unicode_tor).read(), utf8_tor)
def test_filename_for_upload(self):
bugzilla = Bugzilla()
mock_file = Mock()
mock_file.name = "foo"
self.assertEqual(bugzilla._filename_for_upload(mock_file, 1234), 'foo')
mock_timestamp = lambda: "now"
filename = bugzilla._filename_for_upload(StringIO.StringIO(), 1234, extension="patch", timestamp=mock_timestamp)
self.assertEqual(filename, "bug-1234-now.patch")
def test_commit_queue_flag(self):
bugzilla = Bugzilla()
bugzilla.committers = CommitterList(reviewers=[Reviewer("WebKit Reviewer", "[email protected]")],
committers=[Committer("WebKit Committer", "[email protected]")],
contributors=[Contributor("WebKit Contributor", "[email protected]")])
def assert_commit_queue_flag(mark_for_landing, mark_for_commit_queue, expected, username=None):
bugzilla.username = username
capture = OutputCapture()
capture.capture_output()
try:
self.assertEqual(bugzilla._commit_queue_flag(mark_for_landing=mark_for_landing, mark_for_commit_queue=mark_for_commit_queue), expected)
finally:
capture.restore_output()
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=False, expected='X', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=True, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=False, expected='X', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=False, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=True, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=False, expected='X', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=False, expected='+', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=True, expected='+', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=False, expected='X', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=False, mark_for_commit_queue=True, expected='?', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=False, expected='+', username='[email protected]')
assert_commit_queue_flag(mark_for_landing=True, mark_for_commit_queue=True, expected='+', username='[email protected]')
def test__check_create_bug_response(self):
bugzilla = Bugzilla()
title_html_bugzilla_323 = "<title>Bug 101640 Submitted</title>"
self.assertEqual(bugzilla._check_create_bug_response(title_html_bugzilla_323), '101640')
title_html_bugzilla_425 = "<title>Bug 101640 Submitted – Testing webkit-patch again</title>"
self.assertEqual(bugzilla._check_create_bug_response(title_html_bugzilla_425), '101640')
class BugzillaQueriesTest(unittest.TestCase):
_sample_request_page = """
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>Request Queue</title>
</head>
<body>
<h3>Flag: review</h3>
<table class="requests" cellspacing="0" cellpadding="4" border="1">
<tr>
<th>Requester</th>
<th>Requestee</th>
<th>Bug</th>
<th>Attachment</th>
<th>Created</th>
</tr>
<tr>
<td>Shinichiro Hamaji <hamaji@chromium.org></td>
<td></td>
<td><a href="show_bug.cgi?id=30015">30015: text-transform:capitalize is failing in CSS2.1 test suite</a></td>
<td><a href="attachment.cgi?id=40511&action=review">
40511: Patch v0</a></td>
<td>2009-10-02 04:58 PST</td>
</tr>
<tr>
<td>Zan Dobersek <zandobersek@gmail.com></td>
<td></td>
<td><a href="show_bug.cgi?id=26304">26304: [GTK] Add controls for playing html5 video.</a></td>
<td><a href="attachment.cgi?id=40722&action=review">
40722: Media controls, the simple approach</a></td>
<td>2009-10-06 09:13 PST</td>
</tr>
<tr>
<td>Zan Dobersek <zandobersek@gmail.com></td>
<td></td>
<td><a href="show_bug.cgi?id=26304">26304: [GTK] Add controls for playing html5 video.</a></td>
<td><a href="attachment.cgi?id=40723&action=review">
40723: Adjust the media slider thumb size</a></td>
<td>2009-10-06 09:15 PST</td>
</tr>
</table>
</body>
</html>
"""
_sample_quip_page = u"""
<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN"
"http://www.w3.org/TR/html4/loose.dtd">
<html>
<head>
<title>Bugzilla Quip System</title>
</head>
<body>
<h2>
Existing quips:
</h2>
<ul>
<li>Everything should be made as simple as possible, but not simpler. - Albert Einstein</li>
<li>Good artists copy. Great artists steal. - Pablo Picasso</li>
<li>\u00e7gua mole em pedra dura, tanto bate at\u008e que fura.</li>
</ul>
</body>
</html>
"""
def _assert_result_count(self, queries, html, count):
self.assertEqual(queries._parse_result_count(html), count)
def test_parse_result_count(self):
queries = BugzillaQueries(None)
# Pages with results, always list the count at least twice.
self._assert_result_count(queries, '<span class="bz_result_count">314 bugs found.</span><span class="bz_result_count">314 bugs found.</span>', 314)
self._assert_result_count(queries, '<span class="bz_result_count">Zarro Boogs found.</span>', 0)
self._assert_result_count(queries, '<span class="bz_result_count">\n \nOne bug found.</span>', 1)
self.assertRaises(Exception, queries._parse_result_count, ['Invalid'])
def test_request_page_parsing(self):
queries = BugzillaQueries(None)
self.assertEqual([40511, 40722, 40723], queries._parse_attachment_ids_request_query(self._sample_request_page))
def test_quip_page_parsing(self):
queries = BugzillaQueries(None)
expected_quips = ["Everything should be made as simple as possible, but not simpler. - Albert Einstein", "Good artists copy. Great artists steal. - Pablo Picasso", u"\u00e7gua mole em pedra dura, tanto bate at\u008e que fura."]
self.assertEqual(expected_quips, queries._parse_quips(self._sample_quip_page))
def test_load_query(self):
queries = BugzillaQueries(Mock())
queries._load_query("request.cgi?action=queue&type=review&group=type")
class EditUsersParserTest(unittest.TestCase):
_example_user_results = """
<div id="bugzilla-body">
<p>1 user found.</p>
<table id="admin_table" border="1" cellpadding="4" cellspacing="0">
<tr bgcolor="#6666FF">
<th align="left">Edit user...
</th>
<th align="left">Real name
</th>
<th align="left">Account History
</th>
</tr>
<tr>
<td >
<a href="editusers.cgi?action=edit&userid=1234&matchvalue=login_name&groupid=&grouprestrict=&matchtype=substr&matchstr=abarth%40webkit.org">
abarth@webkit.org
</a>
</td>
<td >
Adam Barth
</td>
<td >
<a href="editusers.cgi?action=activity&userid=1234&matchvalue=login_name&groupid=&grouprestrict=&matchtype=substr&matchstr=abarth%40webkit.org">
View
</a>
</td>
</tr>
</table>
"""
_example_empty_user_results = """
<div id="bugzilla-body">
<p>0 users found.</p>
<table id="admin_table" border="1" cellpadding="4" cellspacing="0">
<tr bgcolor="#6666FF">
<th align="left">Edit user...
</th>
<th align="left">Real name
</th>
<th align="left">Account History
</th>
</tr>
<tr><td colspan="3" align="center"><i><none></i></td></tr>
</table>
"""
def _assert_login_userid_pairs(self, results_page, expected_logins):
parser = EditUsersParser()
logins = parser.login_userid_pairs_from_edit_user_results(results_page)
self.assertEqual(logins, expected_logins)
def test_logins_from_editusers_results(self):
self._assert_login_userid_pairs(self._example_user_results, [("[email protected]", 1234)])
self._assert_login_userid_pairs(self._example_empty_user_results, [])
_example_user_page = """<table class="main"><tr>
<th><label for="login">Login name:</label></th>
<td>eric@webkit.org
</td>
</tr>
<tr>
<th><label for="name">Real name:</label></th>
<td>Eric Seidel
</td>
</tr>
<tr>
<th>Group access:</th>
<td>
<table class="groups">
<tr>
</tr>
<tr>
<th colspan="2">User is a member of these groups</th>
</tr>
<tr class="direct">
<td class="checkbox"><input type="checkbox"
id="group_7"
name="group_7"
value="1" checked="checked" /></td>
<td class="groupname">
<label for="group_7">
<strong>canconfirm:</strong>
Can confirm a bug.
</label>
</td>
</tr>
<tr class="direct">
<td class="checkbox"><input type="checkbox"
id="group_6"
name="group_6"
value="1" /></td>
<td class="groupname">
<label for="group_6">
<strong>editbugs:</strong>
Can edit all aspects of any bug.
/label>
</td>
</tr>
</table>
</td>
</tr>
<tr>
<th>Product responsibilities:</th>
<td>
<em>none</em>
</td>
</tr>
</table>"""
def test_user_dict_from_edit_user_page(self):
parser = EditUsersParser()
user_dict = parser.user_dict_from_edit_user_page(self._example_user_page)
expected_user_dict = {u'login': u'[email protected]', u'groups': set(['canconfirm']), u'name': u'Eric Seidel'}
self.assertEqual(expected_user_dict, user_dict)
| bsd-3-clause |
zookeepr/zookeepr | zkpylons/tests/functional/test_ceiling.py | 3 | 6694 | from routes import url_for
from zk.model import Product
from BeautifulSoup import BeautifulSoup
from .fixtures import CeilingFactory, ProductCategoryFactory, ProductFactory, PersonFactory, RoleFactory, RegistrationFactory, InvoiceFactory, InvoiceItemFactory, CompletePersonFactory
from .utils import do_login
from .crud_helper import CrudHelper
class TestCeiling(CrudHelper):
def test_new(self, app, db_session):
cats = [ProductCategoryFactory() for i in range(2)]
products = [ProductFactory(category=cats[0]) for i in range (4)] \
+ [ProductFactory(category=cats[1]) for i in range (3)]
data = {
'name' : 'test_new',
'max_sold' : '23',
'available_from' : '01/02/1945',
'available_until' : '02/03/1956',
'products' : [products[0].id, products[3].id, products[6].id],
}
def extra_form_check(form):
assert len(form.fields['ceiling.products'][0].options) == len(products)
def extra_data_check(new):
# Datetime object and multiple products are too complex for the default check
# So we disable the default data check and replace it with this
assert new.parent is None
assert new.name == 'test_new'
assert new.max_sold == 23
assert new.available_from.date().isoformat() == '1945-02-01'
assert new.available_until.date().isoformat() == '1956-03-02'
selected_ids = data['products']
assert len(new.products) == len(selected_ids)
for pid in selected_ids:
p = Product.find_by_id(pid)
assert p in new.products
CrudHelper.test_new(self, app, db_session, data=data, extra_form_check = extra_form_check, do_data_check=False, extra_data_check = extra_data_check)
# TODO: Invalid content, different date styles
def test_view(self, app, db_session):
# These are the number of special people in a given ceiling group
# Such as number of under 18s or number of special diet folk
pc1 = ProductCategoryFactory()
prods = [ProductFactory(category=pc1) for i in range(2)]
ceil = CeilingFactory(max_sold=4223, available_from='2012-12-01', available_until='1901-06-23', products=prods)
peeps = [CompletePersonFactory() for i in range(3)]
reg1 = RegistrationFactory(person=peeps[0], diet="Wabbits", over18=True)
reg2 = RegistrationFactory(person=peeps[1], diet="Wrascles", over18=False)
reg3 = RegistrationFactory(person=peeps[2], diet="", over18=False)
# need a new invoice item for each invoice
for peep in peeps:
InvoiceFactory(person=peep, items=[InvoiceItemFactory(product=p) for p in prods])
db_session.commit()
expected = [
ceil.name, str(ceil.max_sold),
ceil.available_from.strftime("%d/%m/%y"), ceil.available_until.strftime("%d/%m/%y"),
] + [p.description for p in ceil.products]
resp = CrudHelper.test_view(self, app, db_session, expected=expected, target=ceil)
print resp
soup = BeautifulSoup(resp.body)
def process_table(name):
table = soup.find(id=name).findNext('table')
return [row.findAll('td') for row in table.find('tbody').findAll('tr')]
dietspec_paid = process_table("diet_special_paid")
assert len(dietspec_paid) == 1
assert dietspec_paid[0][0].find(text="No entries")
dietspec_unpaid = process_table("diet_special_invoiced")
assert len(dietspec_unpaid) == 2
for pers in [peeps[0], peeps[1]]:
assert len(filter(None, [c.find(text=pers.fullname) for r in dietspec_unpaid for c in r])) == 1
diet_paid = process_table("diet_paid")
assert len(diet_paid) == 1
assert diet_paid[0][0].find(text="No entries")
diet_unpaid = process_table("diet_invoiced")
assert len(diet_unpaid) == 2
for pers in [peeps[0], peeps[1]]:
assert len(filter(None, [c.find(text=pers.fullname) for r in diet_unpaid for c in r])) == 1
u18_paid = process_table("under18_paid")
assert len(u18_paid) == 1
assert u18_paid[0][0].find(text="No entries")
u18_unpaid = process_table("under18_invoiced")
assert len(u18_unpaid) == 2*2
for pers in [peeps[1], peeps[2]]:
assert len(filter(None, [c.find(text=pers.fullname) for r in u18_unpaid for c in r])) == 2
def test_edit(self, app, db_session):
cats = [ProductCategoryFactory() for i in range(2)]
products = [ProductFactory(category=cats[0]) for i in range (4)] \
+ [ProductFactory(category=cats[1]) for i in range (3)]
c = CeilingFactory(max_sold=4223, available_from='2012-12-01', available_until='1901-06-23', products=[products[0], products[4], products[1]])
initial_values = {
'max_sold' : str(c.max_sold),
'available_from' : '01/12/12',
'available_until' : '23/06/01',
'products' : [str(products[i].id) for i in (0, 1, 4)],
}
new_values = {
'name' : 'test_new',
'max_sold' : '23',
'available_from' : '01/02/1945',
'available_until' : '02/03/1956',
'products' : [products[0].id, products[3].id, products[6].id],
}
db_session.commit()
def extra_form_check(form):
assert len(form.fields['ceiling.products'][0].options) == 7
def extra_data_check(new):
# Datetime object and multiple products are too complex for the default check
# So we disable the default data check and replace it with this
assert new.parent is None
assert new.name == 'test_new'
assert new.max_sold == 23
assert new.available_from.date().isoformat() == '1945-02-01'
assert new.available_until.date().isoformat() == '1956-03-02'
selected_ids = new_values['products']
assert len(new.products) == len(selected_ids)
for pid in selected_ids:
p = Product.find_by_id(pid)
assert p in new.products
# TODO: Invalid content, different date styles
CrudHelper.test_edit(self, app, db_session, initial_values=initial_values, new_values=new_values, extra_form_check=extra_form_check, do_data_check=False, extra_data_check=extra_data_check, pageid=c.id)
| gpl-2.0 |
CMLL/taiga-back | taiga/front/templatetags/functions.py | 14 | 1300 | # Copyright (C) 2015 Andrey Antukh <[email protected]>
# Copyright (C) 2015 Jesús Espino <[email protected]>
# Copyright (C) 2015 David Barragán <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django_jinja import library
from django_sites import get_by_id as get_site_by_id
from taiga.front.urls import urls
register = library.Library()
@register.global_function(name="resolve_front_url")
def resolve(type, *args):
site = get_site_by_id("front")
url_tmpl = "{scheme}//{domain}{url}"
scheme = site.scheme and "{0}:".format(site.scheme) or ""
url = urls[type].format(*args)
return url_tmpl.format(scheme=scheme, domain=site.domain, url=url)
| agpl-3.0 |
ehashman/oh-mainline | vendor/packages/Django/django/contrib/gis/tests/relatedapp/tests.py | 198 | 14731 | from __future__ import absolute_import
from datetime import date
from django.contrib.gis.geos import GEOSGeometry, Point, MultiPoint
from django.contrib.gis.db.models import Collect, Count, Extent, F, Union
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.tests.utils import mysql, oracle, no_mysql, no_oracle, no_spatialite
from django.test import TestCase
from .models import City, Location, DirectoryEntry, Parcel, Book, Author, Article
class RelatedGeoModelTest(TestCase):
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.all()
qs2 = City.objects.select_related()
qs3 = City.objects.select_related('location')
# Reference data for what's in the fixtures.
cities = (
('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat), c.location.point)
@no_mysql
def test03_transform_related(self):
"Testing the `transform` GeoQuerySet method on related geographic models."
# All the transformations are to state plane coordinate systems using
# US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot).
tol = 0
def check_pnt(ref, pnt):
self.assertAlmostEqual(ref.x, pnt.x, tol)
self.assertAlmostEqual(ref.y, pnt.y, tol)
self.assertEqual(ref.srid, pnt.srid)
# Each city transformed to the SRID of their state plane coordinate system.
transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'),
('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'),
('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'),
)
for name, srid, wkt in transformed:
# Doing this implicitly sets `select_related` select the location.
# TODO: Fix why this breaks on Oracle.
qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point'))
check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point)
@no_mysql
@no_spatialite
def test04a_related_extent_aggregate(self):
"Testing the `extent` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes New Mexico (Roswell).
all_extent = (-104.528056, 29.763374, -79.460734, 40.18476)
txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476)
e1 = City.objects.extent(field_name='location__point')
e2 = City.objects.exclude(state='NM').extent(field_name='location__point')
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e): self.assertAlmostEqual(ref_val, e_val, tol)
@no_mysql
def test04b_related_union_aggregate(self):
"Testing the `unionagg` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned. Each point # corresponds to City PK.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
p4 = Point(-96.801611, 32.782057)
p5 = Point(-95.363151, 29.763374)
# Creating the reference union geometry depending on the spatial backend,
# as Oracle will have a different internal ordering of the component
# geometries than PostGIS. The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.unionagg()`).
if oracle:
ref_u1 = MultiPoint(p4, p5, p3, p1, p2, srid=4326)
ref_u2 = MultiPoint(p3, p2, srid=4326)
else:
# Looks like PostGIS points by longitude value.
ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.unionagg(field_name='location__point')
u2 = City.objects.exclude(name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth')).unionagg(field_name='location__point')
u3 = aggs['location__point__union']
self.assertEqual(ref_u1, u1)
self.assertEqual(ref_u2, u2)
self.assertEqual(ref_u1, u3)
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
l = list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry('POLYGON((-97.501205 33.052520,-97.501205 33.052576,-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))', srid=4326)
pcity = City.objects.get(name='Aurora')
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
p1 = Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# same (but in different coordinate systems), and this time they
# actually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
p2 = Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
if not mysql:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel, which has the center point equal
# to the point in the City ForeignKey.
qs = Parcel.objects.filter(center1=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
if not mysql:
# This time the city column should be wrapped in transformation SQL.
qs = Parcel.objects.filter(border2__contains=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
def test07_values(self):
"Testing values() and values_list() and GeoQuerySets."
# GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively.
gqs = Location.objects.all()
gvqs = Location.objects.values()
gvlqs = Location.objects.values_list()
# Incrementing through each of the models, dictionaries, and tuples
# returned by the different types of GeoQuerySets.
for m, d, t in zip(gqs, gvqs, gvlqs):
# The values should be Geometry objects and not raw strings returned
# by the spatial database.
self.assertTrue(isinstance(d['point'], Geometry))
self.assertTrue(isinstance(t[1], Geometry))
self.assertEqual(m.point, d['point'])
self.assertEqual(m.point, t[1])
def test08_defer_only(self):
"Testing defer() and only() on Geographic models."
qs = Location.objects.all()
def_qs = Location.objects.defer('point')
for loc, def_loc in zip(qs, def_qs):
self.assertEqual(loc.point, def_loc.point)
def test09_pk_relations(self):
"Ensuring correct primary key column is selected across relations. See #10757."
# The expected ID values -- notice the last two location IDs
# are out of order. Dallas and Houston have location IDs that differ
# from their PKs -- this is done to ensure that the related location
# ID column is selected instead of ID column for the city.
city_ids = (1, 2, 3, 4, 5)
loc_ids = (1, 2, 3, 5, 4)
ids_qs = City.objects.order_by('id').values('id', 'location__id')
for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids):
self.assertEqual(val_dict['id'], c_id)
self.assertEqual(val_dict['location__id'], l_id)
def test10_combine(self):
"Testing the combination of two GeoQuerySets. See #10807."
buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1)
buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1)
qs1 = City.objects.filter(location__point__within=buf1)
qs2 = City.objects.filter(location__point__within=buf2)
combined = qs1 | qs2
names = [c.name for c in combined]
self.assertEqual(2, len(names))
self.assertTrue('Aurora' in names)
self.assertTrue('Kecksburg' in names)
def test11_geoquery_pickle(self):
"Ensuring GeoQuery objects are unpickled correctly. See #10839."
import pickle
from django.contrib.gis.db.models.sql import GeoQuery
qs = City.objects.all()
q_str = pickle.dumps(qs.query)
q = pickle.loads(q_str)
self.assertEqual(GeoQuery, q.__class__)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test12a_count(self):
"Testing `Count` aggregate use with the `GeoManager` on geo-fields."
# The City, 'Fort Worth' uses the same location as Dallas.
dallas = City.objects.get(name='Dallas')
# Count annotation should be 2 for the Dallas location now.
loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id)
self.assertEqual(2, loc.num_cities)
def test12b_count(self):
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
# Should only be one author (Trevor Paglen) returned by this query, and
# the annotation should have 3 for the number of books, see #11087.
# Also testing with a `GeoValuesQuerySet`, see #11489.
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
self.assertEqual(1, len(qs))
self.assertEqual(3, qs[0].num_books)
self.assertEqual(1, len(vqs))
self.assertEqual(3, vqs[0]['num_books'])
def test13c_count(self):
"Testing `Count` aggregate with `.values()`. See #15305."
qs = Location.objects.filter(id=5).annotate(num_cities=Count('city')).values('id', 'point', 'num_cities')
self.assertEqual(1, len(qs))
self.assertEqual(2, qs[0]['num_cities'])
self.assertTrue(isinstance(qs[0]['point'], GEOSGeometry))
# TODO: The phantom model does appear on Oracle.
@no_oracle
def test13_select_related_null_fk(self):
"Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381."
no_author = Book.objects.create(title='Without Author')
b = Book.objects.select_related('author').get(title='Without Author')
# Should be `None`, and not a 'dummy' model.
self.assertEqual(None, b.author)
@no_mysql
@no_oracle
@no_spatialite
def test14_collect(self):
"Testing the `collect` GeoQuerySet method and `Collect` aggregate."
# Reference query:
# SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
# "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
# WHERE "relatedapp_city"."state" = 'TX';
ref_geom = GEOSGeometry('MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,-95.363151 29.763374,-96.801611 32.782057)')
c1 = City.objects.filter(state='TX').collect(field_name='location__point')
c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']
for coll in (c1, c2):
# Even though Dallas and Ft. Worth share same point, Collect doesn't
# consolidate -- that's why 4 points in MultiPoint.
self.assertEqual(4, len(coll))
self.assertEqual(ref_geom, coll)
def test15_invalid_select_related(self):
"Testing doing select_related on the related name manager of a unique FK. See #13934."
qs = Article.objects.select_related('author__article')
# This triggers TypeError when `get_default_columns` has no `local_only`
# keyword. The TypeError is swallowed if QuerySet is actually
# evaluated as list generation swallows TypeError in CPython.
sql = str(qs.query)
def test16_annotated_date_queryset(self):
"Ensure annotated date querysets work if spatial backend is used. See #14648."
birth_years = [dt.year for dt in
list(Author.objects.annotate(num_books=Count('books')).dates('dob', 'year'))]
birth_years.sort()
self.assertEqual([1950, 1974], birth_years)
# TODO: Related tests for KML, GML, and distance lookups.
| agpl-3.0 |
alxgu/ansible | lib/ansible/plugins/action/dellos9.py | 38 | 4095 | #
# (c) 2016 Red Hat Inc.
#
# Copyright (c) 2017 Dell Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import copy
from ansible import constants as C
from ansible.module_utils._text import to_text
from ansible.module_utils.connection import Connection
from ansible.plugins.action.network import ActionModule as ActionNetworkModule
from ansible.module_utils.network.common.utils import load_provider
from ansible.module_utils.network.dellos9.dellos9 import dellos9_provider_spec
from ansible.utils.display import Display
display = Display()
class ActionModule(ActionNetworkModule):
def run(self, tmp=None, task_vars=None):
del tmp # tmp no longer has any effect
self._config_module = True if self._task.action == 'dellos9_config' else False
socket_path = None
if self._play_context.connection == 'network_cli':
provider = self._task.args.get('provider', {})
if any(provider.values()):
display.warning('provider is unnecessary when using network_cli and will be ignored')
del self._task.args['provider']
elif self._play_context.connection == 'local':
provider = load_provider(dellos9_provider_spec, self._task.args)
pc = copy.deepcopy(self._play_context)
pc.connection = 'network_cli'
pc.network_os = 'dellos9'
pc.remote_addr = provider['host'] or self._play_context.remote_addr
pc.port = int(provider['port'] or self._play_context.port or 22)
pc.remote_user = provider['username'] or self._play_context.connection_user
pc.password = provider['password'] or self._play_context.password
pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
pc.become = provider['authorize'] or False
if pc.become:
pc.become_method = 'enable'
pc.become_pass = provider['auth_pass']
display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
connection.set_options(direct={'persistent_command_timeout': command_timeout})
socket_path = connection.run()
display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
if not socket_path:
return {'failed': True,
'msg': 'unable to open shell. Please see: ' +
'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
task_vars['ansible_socket'] = socket_path
# make sure we are in the right cli context which should be
# enable mode and not config module
if socket_path is None:
socket_path = self._connection.socket_path
conn = Connection(socket_path)
out = conn.get_prompt()
while to_text(out, errors='surrogate_then_replace').strip().endswith(')#'):
display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
conn.send_command('exit')
out = conn.get_prompt()
result = super(ActionModule, self).run(task_vars=task_vars)
return result
| gpl-3.0 |
smilusingjavascript/blink | Tools/Scripts/webkitpy/layout_tests/breakpad/dump_reader_win.py | 50 | 5429 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import os
import shlex
from webkitpy.layout_tests.breakpad.dump_reader import DumpReader
_log = logging.getLogger(__name__)
class DumpReaderWin(DumpReader):
"""DumpReader for windows breakpad."""
def __init__(self, host, build_dir):
super(DumpReaderWin, self).__init__(host, build_dir)
self._cdb_available = None
def check_is_functional(self):
return self._check_cdb_available()
def _file_extension(self):
return 'txt'
def _get_pid_from_dump(self, dump_file):
with self._host.filesystem.open_text_file_for_reading(dump_file) as f:
crash_keys = dict([l.split(':', 1) for l in f.read().splitlines()])
if 'pid' in crash_keys:
return crash_keys['pid']
return None
def _get_stack_from_dump(self, dump_file):
minidump = dump_file[:-3] + 'dmp'
cmd = [self._cdb_path, '-y', self._build_dir, '-c', '.lines;.ecxr;k30;q', '-z', minidump]
try:
stack = self._host.executive.run_command(cmd)
except:
_log.warning('Failed to execute "%s"' % ' '.join(cmd))
else:
return stack
return None
def _find_depot_tools_path(self):
"""Attempt to find depot_tools location in PATH."""
for i in os.environ['PATH'].split(os.pathsep):
if os.path.isfile(os.path.join(i, 'gclient')):
return i
def _check_cdb_available(self):
"""Checks whether we can use cdb to symbolize minidumps."""
if self._cdb_available != None:
return self._cdb_available
CDB_LOCATION_TEMPLATES = [
'%s\\Debugging Tools For Windows',
'%s\\Debugging Tools For Windows (x86)',
'%s\\Debugging Tools For Windows (x64)',
'%s\\Windows Kits\\8.0\\Debuggers\\x86',
'%s\\Windows Kits\\8.0\\Debuggers\\x64',
'%s\\Windows Kits\\8.1\\Debuggers\\x86',
'%s\\Windows Kits\\8.1\\Debuggers\\x64',
]
program_files_directories = ['C:\\Program Files']
program_files = os.environ.get('ProgramFiles')
if program_files:
program_files_directories.append(program_files)
program_files = os.environ.get('ProgramFiles(x86)')
if program_files:
program_files_directories.append(program_files)
possible_cdb_locations = []
for template in CDB_LOCATION_TEMPLATES:
for program_files in program_files_directories:
possible_cdb_locations.append(template % program_files)
gyp_defines = os.environ.get('GYP_DEFINES', [])
if gyp_defines:
gyp_defines = shlex.split(gyp_defines)
if 'windows_sdk_path' in gyp_defines:
possible_cdb_locations.extend([
'%s\\Debuggers\\x86' % gyp_defines['windows_sdk_path'],
'%s\\Debuggers\\x64' % gyp_defines['windows_sdk_path'],
])
# Look in depot_tools win_toolchain too.
depot_tools = self._find_depot_tools_path()
if depot_tools:
win8sdk = os.path.join(depot_tools, 'win_toolchain', 'vs2013_files', 'win8sdk')
possible_cdb_locations.extend([
'%s\\Debuggers\\x86' % win8sdk,
'%s\\Debuggers\\x64' % win8sdk,
])
for cdb_path in possible_cdb_locations:
cdb = self._host.filesystem.join(cdb_path, 'cdb.exe')
try:
_ = self._host.executive.run_command([cdb, '-version'])
except:
pass
else:
self._cdb_path = cdb
self._cdb_available = True
return self._cdb_available
_log.warning("CDB is not installed; can't symbolize minidumps.")
_log.warning('')
self._cdb_available = False
return self._cdb_available
| bsd-3-clause |
heli522/scikit-learn | sklearn/utils/arpack.py | 265 | 64837 | """
This contains a copy of the future version of
scipy.sparse.linalg.eigen.arpack.eigsh
It's an upgraded wrapper of the ARPACK library which
allows the use of shift-invert mode for symmetric matrices.
Find a few eigenvectors and eigenvalues of a matrix.
Uses ARPACK: http://www.caam.rice.edu/software/ARPACK/
"""
# Wrapper implementation notes
#
# ARPACK Entry Points
# -------------------
# The entry points to ARPACK are
# - (s,d)seupd : single and double precision symmetric matrix
# - (s,d,c,z)neupd: single,double,complex,double complex general matrix
# This wrapper puts the *neupd (general matrix) interfaces in eigs()
# and the *seupd (symmetric matrix) in eigsh().
# There is no Hermetian complex/double complex interface.
# To find eigenvalues of a Hermetian matrix you
# must use eigs() and not eigsh()
# It might be desirable to handle the Hermetian case differently
# and, for example, return real eigenvalues.
# Number of eigenvalues returned and complex eigenvalues
# ------------------------------------------------------
# The ARPACK nonsymmetric real and double interface (s,d)naupd return
# eigenvalues and eigenvectors in real (float,double) arrays.
# Since the eigenvalues and eigenvectors are, in general, complex
# ARPACK puts the real and imaginary parts in consecutive entries
# in real-valued arrays. This wrapper puts the real entries
# into complex data types and attempts to return the requested eigenvalues
# and eigenvectors.
# Solver modes
# ------------
# ARPACK and handle shifted and shift-inverse computations
# for eigenvalues by providing a shift (sigma) and a solver.
__docformat__ = "restructuredtext en"
__all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence']
import warnings
from scipy.sparse.linalg.eigen.arpack import _arpack
import numpy as np
from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator
from scipy.sparse import identity, isspmatrix, isspmatrix_csr
from scipy.linalg import lu_factor, lu_solve
from scipy.sparse.sputils import isdense
from scipy.sparse.linalg import gmres, splu
import scipy
from distutils.version import LooseVersion
_type_conv = {'f': 's', 'd': 'd', 'F': 'c', 'D': 'z'}
_ndigits = {'f': 5, 'd': 12, 'F': 5, 'D': 12}
DNAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found. IPARAM(5) "
"returns the number of wanted converged Ritz values.",
2: "No longer an informational error. Deprecated starting "
"with release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the "
"Implicitly restarted Arnoldi iteration. One possibility "
"is to increase the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: " WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation;",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible.",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated."
}
SNAUPD_ERRORS = DNAUPD_ERRORS
ZNAUPD_ERRORS = DNAUPD_ERRORS.copy()
ZNAUPD_ERRORS[-10] = "IPARAM(7) must be 1,2,3."
CNAUPD_ERRORS = ZNAUPD_ERRORS
DSAUPD_ERRORS = {
0: "Normal exit.",
1: "Maximum number of iterations taken. "
"All possible eigenvalues of OP has been found.",
2: "No longer an informational error. Deprecated starting with "
"release 2 of ARPACK.",
3: "No shifts could be applied during a cycle of the Implicitly "
"restarted Arnoldi iteration. One possibility is to increase "
"the size of NCV relative to NEV. ",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-4: "The maximum number of Arnoldi update iterations allowed "
"must be greater than zero.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work array WORKL is not sufficient.",
-8: "Error return from trid. eigenvalue calculation; "
"Informational error from LAPACK routine dsteqr .",
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "IPARAM(1) must be equal to 0 or 1.",
-13: "NEV and WHICH = 'BE' are incompatible. ",
-9999: "Could not build an Arnoldi factorization. "
"IPARAM(5) returns the size of the current Arnoldi "
"factorization. The user is advised to check that "
"enough workspace and array storage has been allocated.",
}
SSAUPD_ERRORS = DSAUPD_ERRORS
DNEUPD_ERRORS = {
0: "Normal exit.",
1: "The Schur form computed by LAPACK routine dlahqr "
"could not be reordered by LAPACK routine dtrsen. "
"Re-enter subroutine dneupd with IPARAM(5)NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least NCV "
"columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 2 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from calculation of a real Schur form. "
"Informational error from LAPACK routine dlahqr .",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine dtrevc.",
-10: "IPARAM(7) must be 1,2,3,4.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "DNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "DNEUPD got a different count of the number of converged "
"Ritz values than DNAUPD got. This indicates the user "
"probably made an error in passing data from DNAUPD to "
"DNEUPD or that the data was modified before entering "
"DNEUPD",
}
SNEUPD_ERRORS = DNEUPD_ERRORS.copy()
SNEUPD_ERRORS[1] = ("The Schur form computed by LAPACK routine slahqr "
"could not be reordered by LAPACK routine strsen . "
"Re-enter subroutine dneupd with IPARAM(5)=NCV and "
"increase the size of the arrays DR and DI to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.")
SNEUPD_ERRORS[-14] = ("SNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
SNEUPD_ERRORS[-15] = ("SNEUPD got a different count of the number of "
"converged Ritz values than SNAUPD got. This indicates "
"the user probably made an error in passing data from "
"SNAUPD to SNEUPD or that the data was modified before "
"entering SNEUPD")
ZNEUPD_ERRORS = {0: "Normal exit.",
1: "The Schur form computed by LAPACK routine csheqr "
"could not be reordered by LAPACK routine ztrsen. "
"Re-enter subroutine zneupd with IPARAM(5)=NCV and "
"increase the size of the array D to have "
"dimension at least dimension NCV and allocate at least "
"NCV columns for Z. NOTE: Not necessary if Z and V share "
"the same space. Please notify the authors if this error "
"occurs.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV-NEV >= 1 and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LR', 'SR', 'LI', 'SI'",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: "Error return from LAPACK eigenvalue calculation. "
"This should never happened.",
-9: "Error return from calculation of eigenvectors. "
"Informational error from LAPACK routine ztrevc.",
-10: "IPARAM(7) must be 1,2,3",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "HOWMNY = 'S' not yet implemented",
-13: "HOWMNY must be one of 'A' or 'P' if RVEC = .true.",
-14: "ZNAUPD did not find any eigenvalues to sufficient "
"accuracy.",
-15: "ZNEUPD got a different count of the number of "
"converged Ritz values than ZNAUPD got. This "
"indicates the user probably made an error in passing "
"data from ZNAUPD to ZNEUPD or that the data was "
"modified before entering ZNEUPD"}
CNEUPD_ERRORS = ZNEUPD_ERRORS.copy()
CNEUPD_ERRORS[-14] = ("CNAUPD did not find any eigenvalues to sufficient "
"accuracy.")
CNEUPD_ERRORS[-15] = ("CNEUPD got a different count of the number of "
"converged Ritz values than CNAUPD got. This indicates "
"the user probably made an error in passing data from "
"CNAUPD to CNEUPD or that the data was modified before "
"entering CNEUPD")
DSEUPD_ERRORS = {
0: "Normal exit.",
-1: "N must be positive.",
-2: "NEV must be positive.",
-3: "NCV must be greater than NEV and less than or equal to N.",
-5: "WHICH must be one of 'LM', 'SM', 'LA', 'SA' or 'BE'.",
-6: "BMAT must be one of 'I' or 'G'.",
-7: "Length of private work WORKL array is not sufficient.",
-8: ("Error return from trid. eigenvalue calculation; "
"Information error from LAPACK routine dsteqr."),
-9: "Starting vector is zero.",
-10: "IPARAM(7) must be 1,2,3,4,5.",
-11: "IPARAM(7) = 1 and BMAT = 'G' are incompatible.",
-12: "NEV and WHICH = 'BE' are incompatible.",
-14: "DSAUPD did not find any eigenvalues to sufficient accuracy.",
-15: "HOWMNY must be one of 'A' or 'S' if RVEC = .true.",
-16: "HOWMNY = 'S' not yet implemented",
-17: ("DSEUPD got a different count of the number of converged "
"Ritz values than DSAUPD got. This indicates the user "
"probably made an error in passing data from DSAUPD to "
"DSEUPD or that the data was modified before entering "
"DSEUPD.")
}
SSEUPD_ERRORS = DSEUPD_ERRORS.copy()
SSEUPD_ERRORS[-14] = ("SSAUPD did not find any eigenvalues "
"to sufficient accuracy.")
SSEUPD_ERRORS[-17] = ("SSEUPD got a different count of the number of "
"converged "
"Ritz values than SSAUPD got. This indicates the user "
"probably made an error in passing data from SSAUPD to "
"SSEUPD or that the data was modified before entering "
"SSEUPD.")
_SAUPD_ERRORS = {'d': DSAUPD_ERRORS,
's': SSAUPD_ERRORS}
_NAUPD_ERRORS = {'d': DNAUPD_ERRORS,
's': SNAUPD_ERRORS,
'z': ZNAUPD_ERRORS,
'c': CNAUPD_ERRORS}
_SEUPD_ERRORS = {'d': DSEUPD_ERRORS,
's': SSEUPD_ERRORS}
_NEUPD_ERRORS = {'d': DNEUPD_ERRORS,
's': SNEUPD_ERRORS,
'z': ZNEUPD_ERRORS,
'c': CNEUPD_ERRORS}
# accepted values of parameter WHICH in _SEUPD
_SEUPD_WHICH = ['LM', 'SM', 'LA', 'SA', 'BE']
# accepted values of parameter WHICH in _NAUPD
_NEUPD_WHICH = ['LM', 'SM', 'LR', 'SR', 'LI', 'SI']
class ArpackError(RuntimeError):
"""
ARPACK error
"""
def __init__(self, info, infodict=_NAUPD_ERRORS):
msg = infodict.get(info, "Unknown error")
RuntimeError.__init__(self, "ARPACK error %d: %s" % (info, msg))
class ArpackNoConvergence(ArpackError):
"""
ARPACK iteration did not converge
Attributes
----------
eigenvalues : ndarray
Partial result. Converged eigenvalues.
eigenvectors : ndarray
Partial result. Converged eigenvectors.
"""
def __init__(self, msg, eigenvalues, eigenvectors):
ArpackError.__init__(self, -1, {-1: msg})
self.eigenvalues = eigenvalues
self.eigenvectors = eigenvectors
class _ArpackParams(object):
def __init__(self, n, k, tp, mode=1, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
if k <= 0:
raise ValueError("k must be positive, k=%d" % k)
if maxiter is None:
maxiter = n * 10
if maxiter <= 0:
raise ValueError("maxiter must be positive, maxiter=%d" % maxiter)
if tp not in 'fdFD':
raise ValueError("matrix type must be 'f', 'd', 'F', or 'D'")
if v0 is not None:
# ARPACK overwrites its initial resid, make a copy
self.resid = np.array(v0, copy=True)
info = 1
else:
self.resid = np.zeros(n, tp)
info = 0
if sigma is None:
#sigma not used
self.sigma = 0
else:
self.sigma = sigma
if ncv is None:
ncv = 2 * k + 1
ncv = min(ncv, n)
self.v = np.zeros((n, ncv), tp) # holds Ritz vectors
self.iparam = np.zeros(11, "int")
# set solver mode and parameters
ishfts = 1
self.mode = mode
self.iparam[0] = ishfts
self.iparam[2] = maxiter
self.iparam[3] = 1
self.iparam[6] = mode
self.n = n
self.tol = tol
self.k = k
self.maxiter = maxiter
self.ncv = ncv
self.which = which
self.tp = tp
self.info = info
self.converged = False
self.ido = 0
def _raise_no_convergence(self):
msg = "No convergence (%d iterations, %d/%d eigenvectors converged)"
k_ok = self.iparam[4]
num_iter = self.iparam[2]
try:
ev, vec = self.extract(True)
except ArpackError as err:
msg = "%s [%s]" % (msg, err)
ev = np.zeros((0,))
vec = np.zeros((self.n, 0))
k_ok = 0
raise ArpackNoConvergence(msg % (num_iter, k_ok, self.k), ev, vec)
class _SymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x :
# A - symmetric
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the general eigenvalue problem:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
#
# mode = 4:
# Solve the general eigenvalue problem in Buckling mode:
# A*x = lambda*AG*x
# A - symmetric positive semi-definite
# AG - symmetric indefinite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = left multiplication by [A-sigma*AG]^-1
#
# mode = 5:
# Solve the general eigenvalue problem in Cayley-transformed mode:
# A*x = lambda*M*x
# A - symmetric
# M - symmetric positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 3:
if matvec is not None:
raise ValueError("matvec must not be specified for mode=3")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=3")
if M_matvec is None:
self.OP = Minv_matvec
self.OPa = Minv_matvec
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(M_matvec(x))
self.OPa = Minv_matvec
self.B = M_matvec
self.bmat = 'G'
elif mode == 4:
if matvec is None:
raise ValueError("matvec must be specified for mode=4")
if M_matvec is not None:
raise ValueError("M_matvec must not be specified for mode=4")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=4")
self.OPa = Minv_matvec
self.OP = lambda x: self.OPa(matvec(x))
self.B = matvec
self.bmat = 'G'
elif mode == 5:
if matvec is None:
raise ValueError("matvec must be specified for mode=5")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=5")
self.OPa = Minv_matvec
self.A_matvec = matvec
if M_matvec is None:
self.OP = lambda x: Minv_matvec(matvec(x) + sigma * x)
self.B = lambda x: x
self.bmat = 'I'
else:
self.OP = lambda x: Minv_matvec(matvec(x)
+ sigma * M_matvec(x))
self.B = M_matvec
self.bmat = 'G'
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _SEUPD_WHICH:
raise ValueError("which must be one of %s"
% ' '.join(_SEUPD_WHICH))
if k >= n:
raise ValueError("k must be less than rank(A), k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k:
raise ValueError("ncv must be k<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(self.ncv * (self.ncv + 8), self.tp)
ltr = _type_conv[self.tp]
if ltr not in ["s", "d"]:
raise ValueError("Input matrix is not real-valued.")
self._arpack_solver = _arpack.__dict__[ltr + 'saupd']
self._arpack_extract = _arpack.__dict__[ltr + 'seupd']
self.iterate_infodict = _SAUPD_ERRORS[ltr]
self.extract_infodict = _SEUPD_ERRORS[ltr]
self.ipntr = np.zeros(11, "int")
def iterate(self):
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info = \
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode == 1:
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.mode == 2:
self.workd[xslice] = self.OPb(self.workd[xslice])
self.workd[yslice] = self.OPa(self.workd[xslice])
elif self.mode == 5:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
Ax = self.A_matvec(self.workd[xslice])
self.workd[yslice] = self.OPa(Ax + (self.sigma *
self.workd[Bxslice]))
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
rvec = return_eigenvectors
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
d, z, ierr = self._arpack_extract(rvec, howmny, sselect, self.sigma,
self.bmat, self.which, self.k,
self.tol, self.resid, self.v,
self.iparam[0:7], self.ipntr,
self.workd[0:2 * self.n],
self.workl, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
class _UnsymmetricArpackParams(_ArpackParams):
def __init__(self, n, k, tp, matvec, mode=1, M_matvec=None,
Minv_matvec=None, sigma=None,
ncv=None, v0=None, maxiter=None, which="LM", tol=0):
# The following modes are supported:
# mode = 1:
# Solve the standard eigenvalue problem:
# A*x = lambda*x
# A - square matrix
# Arguments should be
# matvec = left multiplication by A
# M_matvec = None [not used]
# Minv_matvec = None [not used]
#
# mode = 2:
# Solve the generalized eigenvalue problem:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = left multiplication by A
# M_matvec = left multiplication by M
# Minv_matvec = left multiplication by M^-1
#
# mode = 3,4:
# Solve the general eigenvalue problem in shift-invert mode:
# A*x = lambda*M*x
# A - square matrix
# M - symmetric, positive semi-definite
# Arguments should be
# matvec = None [not used]
# M_matvec = left multiplication by M
# or None, if M is the identity
# Minv_matvec = left multiplication by [A-sigma*M]^-1
# if A is real and mode==3, use the real part of Minv_matvec
# if A is real and mode==4, use the imag part of Minv_matvec
# if A is complex and mode==3,
# use real and imag parts of Minv_matvec
if mode == 1:
if matvec is None:
raise ValueError("matvec must be specified for mode=1")
if M_matvec is not None:
raise ValueError("M_matvec cannot be specified for mode=1")
if Minv_matvec is not None:
raise ValueError("Minv_matvec cannot be specified for mode=1")
self.OP = matvec
self.B = lambda x: x
self.bmat = 'I'
elif mode == 2:
if matvec is None:
raise ValueError("matvec must be specified for mode=2")
if M_matvec is None:
raise ValueError("M_matvec must be specified for mode=2")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified for mode=2")
self.OP = lambda x: Minv_matvec(matvec(x))
self.OPa = Minv_matvec
self.OPb = matvec
self.B = M_matvec
self.bmat = 'G'
elif mode in (3, 4):
if matvec is None:
raise ValueError("matvec must be specified "
"for mode in (3,4)")
if Minv_matvec is None:
raise ValueError("Minv_matvec must be specified "
"for mode in (3,4)")
self.matvec = matvec
if tp in 'DF': # complex type
if mode == 3:
self.OPa = Minv_matvec
else:
raise ValueError("mode=4 invalid for complex A")
else: # real type
if mode == 3:
self.OPa = lambda x: np.real(Minv_matvec(x))
else:
self.OPa = lambda x: np.imag(Minv_matvec(x))
if M_matvec is None:
self.B = lambda x: x
self.bmat = 'I'
self.OP = self.OPa
else:
self.B = M_matvec
self.bmat = 'G'
self.OP = lambda x: self.OPa(M_matvec(x))
else:
raise ValueError("mode=%i not implemented" % mode)
if which not in _NEUPD_WHICH:
raise ValueError("Parameter which must be one of %s"
% ' '.join(_NEUPD_WHICH))
if k >= n - 1:
raise ValueError("k must be less than rank(A)-1, k=%d" % k)
_ArpackParams.__init__(self, n, k, tp, mode, sigma,
ncv, v0, maxiter, which, tol)
if self.ncv > n or self.ncv <= k + 1:
raise ValueError("ncv must be k+1<ncv<=n, ncv=%s" % self.ncv)
self.workd = np.zeros(3 * n, self.tp)
self.workl = np.zeros(3 * self.ncv * (self.ncv + 2), self.tp)
ltr = _type_conv[self.tp]
self._arpack_solver = _arpack.__dict__[ltr + 'naupd']
self._arpack_extract = _arpack.__dict__[ltr + 'neupd']
self.iterate_infodict = _NAUPD_ERRORS[ltr]
self.extract_infodict = _NEUPD_ERRORS[ltr]
self.ipntr = np.zeros(14, "int")
if self.tp in 'FD':
self.rwork = np.zeros(self.ncv, self.tp.lower())
else:
self.rwork = None
def iterate(self):
if self.tp in 'fd':
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.info)
else:
self.ido, self.resid, self.v, self.iparam, self.ipntr, self.info =\
self._arpack_solver(self.ido, self.bmat, self.which, self.k,
self.tol, self.resid, self.v, self.iparam,
self.ipntr, self.workd, self.workl,
self.rwork, self.info)
xslice = slice(self.ipntr[0] - 1, self.ipntr[0] - 1 + self.n)
yslice = slice(self.ipntr[1] - 1, self.ipntr[1] - 1 + self.n)
if self.ido == -1:
# initialization
self.workd[yslice] = self.OP(self.workd[xslice])
elif self.ido == 1:
# compute y = Op*x
if self.mode in (1, 2):
self.workd[yslice] = self.OP(self.workd[xslice])
else:
Bxslice = slice(self.ipntr[2] - 1, self.ipntr[2] - 1 + self.n)
self.workd[yslice] = self.OPa(self.workd[Bxslice])
elif self.ido == 2:
self.workd[yslice] = self.B(self.workd[xslice])
elif self.ido == 3:
raise ValueError("ARPACK requested user shifts. Assure ISHIFT==0")
else:
self.converged = True
if self.info == 0:
pass
elif self.info == 1:
self._raise_no_convergence()
else:
raise ArpackError(self.info, infodict=self.iterate_infodict)
def extract(self, return_eigenvectors):
k, n = self.k, self.n
ierr = 0
howmny = 'A' # return all eigenvectors
sselect = np.zeros(self.ncv, 'int') # unused
sigmar = np.real(self.sigma)
sigmai = np.imag(self.sigma)
workev = np.zeros(3 * self.ncv, self.tp)
if self.tp in 'fd':
dr = np.zeros(k + 1, self.tp)
di = np.zeros(k + 1, self.tp)
zr = np.zeros((n, k + 1), self.tp)
dr, di, zr, ierr = \
self._arpack_extract(
return_eigenvectors, howmny, sselect, sigmar, sigmai,
workev, self.bmat, self.which, k, self.tol, self.resid,
self.v, self.iparam, self.ipntr, self.workd, self.workl,
self.info)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
nreturned = self.iparam[4] # number of good eigenvalues returned
# Build complex eigenvalues from real and imaginary parts
d = dr + 1.0j * di
# Arrange the eigenvectors: complex eigenvectors are stored as
# real,imaginary in consecutive columns
z = zr.astype(self.tp.upper())
# The ARPACK nonsymmetric real and double interface (s,d)naupd
# return eigenvalues and eigenvectors in real (float,double)
# arrays.
# Efficiency: this should check that return_eigenvectors == True
# before going through this construction.
if sigmai == 0:
i = 0
while i <= k:
# check if complex
if abs(d[i].imag) != 0:
# this is a complex conjugate pair with eigenvalues
# in consecutive columns
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
else:
# real matrix, mode 3 or 4, imag(sigma) is nonzero:
# see remark 3 in <s,d>neupd.f
# Build complex eigenvalues from real and imaginary parts
i = 0
while i <= k:
if abs(d[i].imag) == 0:
d[i] = np.dot(zr[:, i], self.matvec(zr[:, i]))
else:
if i < k:
z[:, i] = zr[:, i] + 1.0j * zr[:, i + 1]
z[:, i + 1] = z[:, i].conjugate()
d[i] = ((np.dot(zr[:, i],
self.matvec(zr[:, i]))
+ np.dot(zr[:, i + 1],
self.matvec(zr[:, i + 1])))
+ 1j * (np.dot(zr[:, i],
self.matvec(zr[:, i + 1]))
- np.dot(zr[:, i + 1],
self.matvec(zr[:, i]))))
d[i + 1] = d[i].conj()
i += 1
else:
#last eigenvalue is complex: the imaginary part of
# the eigenvector has not been returned
#this can only happen if nreturned > k, so we'll
# throw out this case.
nreturned -= 1
i += 1
# Now we have k+1 possible eigenvalues and eigenvectors
# Return the ones specified by the keyword "which"
if nreturned <= k:
# we got less or equal as many eigenvalues we wanted
d = d[:nreturned]
z = z[:, :nreturned]
else:
# we got one extra eigenvalue (likely a cc pair, but which?)
# cut at approx precision for sorting
rd = np.round(d, decimals=_ndigits[self.tp])
if self.which in ['LR', 'SR']:
ind = np.argsort(rd.real)
elif self.which in ['LI', 'SI']:
# for LI,SI ARPACK returns largest,smallest
# abs(imaginary) why?
ind = np.argsort(abs(rd.imag))
else:
ind = np.argsort(abs(rd))
if self.which in ['LR', 'LM', 'LI']:
d = d[ind[-k:]]
z = z[:, ind[-k:]]
if self.which in ['SR', 'SM', 'SI']:
d = d[ind[:k]]
z = z[:, ind[:k]]
else:
# complex is so much simpler...
d, z, ierr =\
self._arpack_extract(
return_eigenvectors, howmny, sselect, self.sigma, workev,
self.bmat, self.which, k, self.tol, self.resid, self.v,
self.iparam, self.ipntr, self.workd, self.workl,
self.rwork, ierr)
if ierr != 0:
raise ArpackError(ierr, infodict=self.extract_infodict)
k_ok = self.iparam[4]
d = d[:k_ok]
z = z[:, :k_ok]
if return_eigenvectors:
return d, z
else:
return d
def _aslinearoperator_with_dtype(m):
m = aslinearoperator(m)
if not hasattr(m, 'dtype'):
x = np.zeros(m.shape[1])
m.dtype = (m * x).dtype
return m
class SpLuInv(LinearOperator):
"""
SpLuInv:
helper class to repeatedly solve M*x=b
using a sparse LU-decopposition of M
"""
def __init__(self, M):
self.M_lu = splu(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
self.isreal = not np.issubdtype(self.dtype, np.complexfloating)
def _matvec(self, x):
# careful here: splu.solve will throw away imaginary
# part of x if M is real
if self.isreal and np.issubdtype(x.dtype, np.complexfloating):
return (self.M_lu.solve(np.real(x))
+ 1j * self.M_lu.solve(np.imag(x)))
else:
return self.M_lu.solve(x)
class LuInv(LinearOperator):
"""
LuInv:
helper class to repeatedly solve M*x=b
using an LU-decomposition of M
"""
def __init__(self, M):
self.M_lu = lu_factor(M)
LinearOperator.__init__(self, M.shape, self._matvec, dtype=M.dtype)
def _matvec(self, x):
return lu_solve(self.M_lu, x)
class IterInv(LinearOperator):
"""
IterInv:
helper class to repeatedly solve M*x=b
using an iterative method.
"""
def __init__(self, M, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(M.dtype).eps
self.M = M
self.ifunc = ifunc
self.tol = tol
if hasattr(M, 'dtype'):
dtype = M.dtype
else:
x = np.zeros(M.shape[1])
dtype = (M * x).dtype
LinearOperator.__init__(self, M.shape, self._matvec, dtype=dtype)
def _matvec(self, x):
b, info = self.ifunc(self.M, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting M: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
class IterOpInv(LinearOperator):
"""
IterOpInv:
helper class to repeatedly solve [A-sigma*M]*x = b
using an iterative method
"""
def __init__(self, A, M, sigma, ifunc=gmres, tol=0):
if tol <= 0:
# when tol=0, ARPACK uses machine tolerance as calculated
# by LAPACK's _LAMCH function. We should match this
tol = np.finfo(A.dtype).eps
self.A = A
self.M = M
self.sigma = sigma
self.ifunc = ifunc
self.tol = tol
x = np.zeros(A.shape[1])
if M is None:
dtype = self.mult_func_M_None(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func_M_None,
dtype=dtype)
else:
dtype = self.mult_func(x).dtype
self.OP = LinearOperator(self.A.shape,
self.mult_func,
dtype=dtype)
LinearOperator.__init__(self, A.shape, self._matvec, dtype=dtype)
def mult_func(self, x):
return self.A.matvec(x) - self.sigma * self.M.matvec(x)
def mult_func_M_None(self, x):
return self.A.matvec(x) - self.sigma * x
def _matvec(self, x):
b, info = self.ifunc(self.OP, x, tol=self.tol)
if info != 0:
raise ValueError("Error in inverting [A-sigma*M]: function "
"%s did not converge (info = %i)."
% (self.ifunc.__name__, info))
return b
def get_inv_matvec(M, symmetric=False, tol=0):
if isdense(M):
return LuInv(M).matvec
elif isspmatrix(M):
if isspmatrix_csr(M) and symmetric:
M = M.T
return SpLuInv(M).matvec
else:
return IterInv(M, tol=tol).matvec
def get_OPinv_matvec(A, M, sigma, symmetric=False, tol=0):
if sigma == 0:
return get_inv_matvec(A, symmetric=symmetric, tol=tol)
if M is None:
#M is the identity matrix
if isdense(A):
if (np.issubdtype(A.dtype, np.complexfloating)
or np.imag(sigma) == 0):
A = np.copy(A)
else:
A = A + 0j
A.flat[::A.shape[1] + 1] -= sigma
return LuInv(A).matvec
elif isspmatrix(A):
A = A - sigma * identity(A.shape[0])
if symmetric and isspmatrix_csr(A):
A = A.T
return SpLuInv(A.tocsc()).matvec
else:
return IterOpInv(_aslinearoperator_with_dtype(A), M, sigma,
tol=tol).matvec
else:
if ((not isdense(A) and not isspmatrix(A)) or
(not isdense(M) and not isspmatrix(M))):
return IterOpInv(_aslinearoperator_with_dtype(A),
_aslinearoperator_with_dtype(M), sigma,
tol=tol).matvec
elif isdense(A) or isdense(M):
return LuInv(A - sigma * M).matvec
else:
OP = A - sigma * M
if symmetric and isspmatrix_csr(OP):
OP = OP.T
return SpLuInv(OP.tocsc()).matvec
def _eigs(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None, OPinv=None,
OPpart=None):
"""
Find k eigenvalues and eigenvectors of the square matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem
for w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing \
the operation A * x, where A is a real or complex square matrix.
k : int, default 6
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
return_eigenvectors : boolean, default True
Whether to return the eigenvectors along with the eigenvalues.
M : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation M*x for the generalized eigenvalue problem
``A * x = w * M * x``
M must represent a real symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma==None, M is positive definite
* If sigma is specified, M is positive semi-definite
If sigma==None, eigs requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real or complex
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] * x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
For a real matrix A, shift-invert can either be done in imaginary
mode or real mode, specified by the parameter OPpart ('r' or 'i').
Note that when sigma is specified, the keyword 'which' (below)
refers to the shifted eigenvalues w'[i] where:
* If A is real and OPpart == 'r' (default),
w'[i] = 1/2 * [ 1/(w[i]-sigma) + 1/(w[i]-conj(sigma)) ]
* If A is real and OPpart == 'i',
w'[i] = 1/2i * [ 1/(w[i]-sigma) - 1/(w[i]-conj(sigma)) ]
* If A is complex,
w'[i] = 1/(w[i]-sigma)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
`ncv` must be greater than `k`; it is recommended that ``ncv > 2*k``.
which : string ['LM' | 'SM' | 'LR' | 'SR' | 'LI' | 'SI']
Which `k` eigenvectors and eigenvalues to find:
- 'LM' : largest magnitude
- 'SM' : smallest magnitude
- 'LR' : largest real part
- 'SR' : smallest real part
- 'LI' : largest imaginary part
- 'SI' : smallest imaginary part
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion)
The default value of 0 implies machine precision.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
Minv : N x N matrix, array, sparse matrix, or linear operator
See notes in M, above.
OPinv : N x N matrix, array, sparse matrix, or linear operator
See notes in sigma, above.
OPpart : 'r' or 'i'.
See notes in sigma, above
Returns
-------
w : array
Array of k eigenvalues.
v : array
An array of `k` eigenvectors.
``v[:, i]`` is the eigenvector corresponding to the eigenvalue w[i].
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigsh : eigenvalues and eigenvectors for symmetric matrix A
svds : singular value decomposition for a matrix A
Examples
--------
Find 6 eigenvectors of the identity matrix:
>>> from sklearn.utils.arpack import eigs
>>> id = np.identity(13)
>>> vals, vecs = eigs(id, k=6)
>>> vals
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> vecs.shape
(13, 6)
Notes
-----
This function is a wrapper to the ARPACK [1]_ SNEUPD, DNEUPD, CNEUPD,
ZNEUPD, functions which use the Implicitly Restarted Arnoldi Method to
find the eigenvalues and eigenvectors [2]_.
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if OPpart is not None:
raise ValueError("OPpart should not be specified with "
"sigma = None or complex A")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
#sigma is not None: shift-invert mode
if np.issubdtype(A.dtype, np.complexfloating):
if OPpart is not None:
raise ValueError("OPpart should not be specified "
"with sigma=None or complex A")
mode = 3
elif OPpart is None or OPpart.lower() == 'r':
mode = 3
elif OPpart.lower() == 'i':
if np.imag(sigma) == 0:
raise ValueError("OPpart cannot be 'i' if sigma is real")
mode = 4
else:
raise ValueError("OPpart must be one of ('r','i')")
matvec = _aslinearoperator_with_dtype(A).matvec
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=False, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
params = _UnsymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _eigsh(A, k=6, M=None, sigma=None, which='LM', v0=None, ncv=None,
maxiter=None, tol=0, return_eigenvectors=True, Minv=None,
OPinv=None, mode='normal'):
"""
Find k eigenvalues and eigenvectors of the real symmetric square matrix
or complex hermitian matrix A.
Solves ``A * x[i] = w[i] * x[i]``, the standard eigenvalue problem for
w[i] eigenvalues with corresponding eigenvectors x[i].
If M is specified, solves ``A * x[i] = w[i] * M * x[i]``, the
generalized eigenvalue problem for w[i] eigenvalues
with corresponding eigenvectors x[i]
Parameters
----------
A : An N x N matrix, array, sparse matrix, or LinearOperator representing
the operation A * x, where A is a real symmetric matrix
For buckling mode (see below) A must additionally be positive-definite
k : integer
The number of eigenvalues and eigenvectors desired.
`k` must be smaller than N. It is not possible to compute all
eigenvectors of a matrix.
M : An N x N matrix, array, sparse matrix, or linear operator representing
the operation M * x for the generalized eigenvalue problem
``A * x = w * M * x``.
M must represent a real, symmetric matrix. For best results, M should
be of the same type as A. Additionally:
* If sigma == None, M is symmetric positive definite
* If sigma is specified, M is symmetric positive semi-definite
* In buckling mode, M is symmetric indefinite.
If sigma == None, eigsh requires an operator to compute the solution
of the linear equation `M * x = b`. This is done internally via a
(sparse) LU decomposition for an explicit matrix M, or via an
iterative solver for a general linear operator. Alternatively,
the user can supply the matrix or operator Minv, which gives
x = Minv * b = M^-1 * b
sigma : real
Find eigenvalues near sigma using shift-invert mode. This requires
an operator to compute the solution of the linear system
`[A - sigma * M] x = b`, where M is the identity matrix if
unspecified. This is computed internally via a (sparse) LU
decomposition for explicit matrices A & M, or via an iterative
solver if either A or M is a general linear operator.
Alternatively, the user can supply the matrix or operator OPinv,
which gives x = OPinv * b = [A - sigma * M]^-1 * b.
Note that when sigma is specified, the keyword 'which' refers to
the shifted eigenvalues w'[i] where:
- if mode == 'normal',
w'[i] = 1 / (w[i] - sigma)
- if mode == 'cayley',
w'[i] = (w[i] + sigma) / (w[i] - sigma)
- if mode == 'buckling',
w'[i] = w[i] / (w[i] - sigma)
(see further discussion in 'mode' below)
v0 : array
Starting vector for iteration.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k and smaller than n;
it is recommended that ncv > 2*k
which : string ['LM' | 'SM' | 'LA' | 'SA' | 'BE']
If A is a complex hermitian matrix, 'BE' is invalid.
Which `k` eigenvectors and eigenvalues to find
- 'LM' : Largest (in magnitude) eigenvalues
- 'SM' : Smallest (in magnitude) eigenvalues
- 'LA' : Largest (algebraic) eigenvalues
- 'SA' : Smallest (algebraic) eigenvalues
- 'BE' : Half (k/2) from each end of the spectrum
When k is odd, return one more (k/2+1) from the high end
When sigma != None, 'which' refers to the shifted eigenvalues w'[i]
(see discussion in 'sigma', above). ARPACK is generally better
at finding large values than small values. If small eigenvalues are
desired, consider using shift-invert mode for better performance.
maxiter : integer
Maximum number of Arnoldi update iterations allowed
tol : float
Relative accuracy for eigenvalues (stopping criterion).
The default value of 0 implies machine precision.
Minv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in M, above
OPinv : N x N matrix, array, sparse matrix, or LinearOperator
See notes in sigma, above.
return_eigenvectors : boolean
Return eigenvectors (True) in addition to eigenvalues
mode : string ['normal' | 'buckling' | 'cayley']
Specify strategy to use for shift-invert mode. This argument applies
only for real-valued A and sigma != None. For shift-invert mode,
ARPACK internally solves the eigenvalue problem
``OP * x'[i] = w'[i] * B * x'[i]``
and transforms the resulting Ritz vectors x'[i] and Ritz values w'[i]
into the desired eigenvectors and eigenvalues of the problem
``A * x[i] = w[i] * M * x[i]``.
The modes are as follows:
- 'normal' : OP = [A - sigma * M]^-1 * M
B = M
w'[i] = 1 / (w[i] - sigma)
- 'buckling' : OP = [A - sigma * M]^-1 * A
B = A
w'[i] = w[i] / (w[i] - sigma)
- 'cayley' : OP = [A - sigma * M]^-1 * [A + sigma * M]
B = M
w'[i] = (w[i] + sigma) / (w[i] - sigma)
The choice of mode will affect which eigenvalues are selected by
the keyword 'which', and can also impact the stability of
convergence (see [2] for a discussion)
Returns
-------
w : array
Array of k eigenvalues
v : array
An array of k eigenvectors
The v[i] is the eigenvector corresponding to the eigenvector w[i]
Raises
------
ArpackNoConvergence
When the requested convergence is not obtained.
The currently converged eigenvalues and eigenvectors can be found
as ``eigenvalues`` and ``eigenvectors`` attributes of the exception
object.
See Also
--------
eigs : eigenvalues and eigenvectors for a general (nonsymmetric) matrix A
svds : singular value decomposition for a matrix A
Notes
-----
This function is a wrapper to the ARPACK [1]_ SSEUPD and DSEUPD
functions which use the Implicitly Restarted Lanczos Method to
find the eigenvalues and eigenvectors [2]_.
Examples
--------
>>> from sklearn.utils.arpack import eigsh
>>> id = np.identity(13)
>>> vals, vecs = eigsh(id, k=6)
>>> vals # doctest: +SKIP
array([ 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j, 1.+0.j])
>>> print(vecs.shape)
(13, 6)
References
----------
.. [1] ARPACK Software, http://www.caam.rice.edu/software/ARPACK/
.. [2] R. B. Lehoucq, D. C. Sorensen, and C. Yang, ARPACK USERS GUIDE:
Solution of Large Scale Eigenvalue Problems by Implicitly Restarted
Arnoldi Methods. SIAM, Philadelphia, PA, 1998.
"""
# complex hermitian matrices should be solved with eigs
if np.issubdtype(A.dtype, np.complexfloating):
if mode != 'normal':
raise ValueError("mode=%s cannot be used with "
"complex matrix A" % mode)
if which == 'BE':
raise ValueError("which='BE' cannot be used with complex matrix A")
elif which == 'LA':
which = 'LR'
elif which == 'SA':
which = 'SR'
ret = eigs(A, k, M=M, sigma=sigma, which=which, v0=v0,
ncv=ncv, maxiter=maxiter, tol=tol,
return_eigenvectors=return_eigenvectors, Minv=Minv,
OPinv=OPinv)
if return_eigenvectors:
return ret[0].real, ret[1]
else:
return ret.real
if A.shape[0] != A.shape[1]:
raise ValueError('expected square matrix (shape=%s)' % (A.shape,))
if M is not None:
if M.shape != A.shape:
raise ValueError('wrong M dimensions %s, should be %s'
% (M.shape, A.shape))
if np.dtype(M.dtype).char.lower() != np.dtype(A.dtype).char.lower():
warnings.warn('M does not have the same type precision as A. '
'This may adversely affect ARPACK convergence')
n = A.shape[0]
if k <= 0 or k >= n:
raise ValueError("k must be between 1 and rank(A)-1")
if sigma is None:
A = _aslinearoperator_with_dtype(A)
matvec = A.matvec
if OPinv is not None:
raise ValueError("OPinv should not be specified "
"with sigma = None.")
if M is None:
#standard eigenvalue problem
mode = 1
M_matvec = None
Minv_matvec = None
if Minv is not None:
raise ValueError("Minv should not be "
"specified with M = None.")
else:
#general eigenvalue problem
mode = 2
if Minv is None:
Minv_matvec = get_inv_matvec(M, symmetric=True, tol=tol)
else:
Minv = _aslinearoperator_with_dtype(Minv)
Minv_matvec = Minv.matvec
M_matvec = _aslinearoperator_with_dtype(M).matvec
else:
# sigma is not None: shift-invert mode
if Minv is not None:
raise ValueError("Minv should not be specified when sigma is")
# normal mode
if mode == 'normal':
mode = 3
matvec = None
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
OPinv = _aslinearoperator_with_dtype(OPinv)
Minv_matvec = OPinv.matvec
if M is None:
M_matvec = None
else:
M = _aslinearoperator_with_dtype(M)
M_matvec = M.matvec
# buckling mode
elif mode == 'buckling':
mode = 4
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
matvec = _aslinearoperator_with_dtype(A).matvec
M_matvec = None
# cayley-transform mode
elif mode == 'cayley':
mode = 5
matvec = _aslinearoperator_with_dtype(A).matvec
if OPinv is None:
Minv_matvec = get_OPinv_matvec(A, M, sigma,
symmetric=True, tol=tol)
else:
Minv_matvec = _aslinearoperator_with_dtype(OPinv).matvec
if M is None:
M_matvec = None
else:
M_matvec = _aslinearoperator_with_dtype(M).matvec
# unrecognized mode
else:
raise ValueError("unrecognized mode '%s'" % mode)
params = _SymmetricArpackParams(n, k, A.dtype.char, matvec, mode,
M_matvec, Minv_matvec, sigma,
ncv, v0, maxiter, which, tol)
while not params.converged:
params.iterate()
return params.extract(return_eigenvectors)
def _svds(A, k=6, ncv=None, tol=0):
"""Compute k singular values/vectors for a sparse matrix using ARPACK.
Parameters
----------
A : sparse matrix
Array to compute the SVD on
k : int, optional
Number of singular values and vectors to compute.
ncv : integer
The number of Lanczos vectors generated
ncv must be greater than k+1 and smaller than n;
it is recommended that ncv > 2*k
tol : float, optional
Tolerance for singular values. Zero (default) means machine precision.
Notes
-----
This is a naive implementation using an eigensolver on A.H * A or
A * A.H, depending on which one is more efficient.
"""
if not (isinstance(A, np.ndarray) or isspmatrix(A)):
A = np.asarray(A)
n, m = A.shape
if np.issubdtype(A.dtype, np.complexfloating):
herm = lambda x: x.T.conjugate()
eigensolver = eigs
else:
herm = lambda x: x.T
eigensolver = eigsh
if n > m:
X = A
XH = herm(A)
else:
XH = A
X = herm(A)
if hasattr(XH, 'dot'):
def matvec_XH_X(x):
return XH.dot(X.dot(x))
else:
def matvec_XH_X(x):
return np.dot(XH, np.dot(X, x))
XH_X = LinearOperator(matvec=matvec_XH_X, dtype=X.dtype,
shape=(X.shape[1], X.shape[1]))
# Ignore deprecation warnings here: dot on matrices is deprecated,
# but this code is a backport anyhow
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
eigvals, eigvec = eigensolver(XH_X, k=k, tol=tol ** 2)
s = np.sqrt(eigvals)
if n > m:
v = eigvec
if hasattr(X, 'dot'):
u = X.dot(v) / s
else:
u = np.dot(X, v) / s
vh = herm(v)
else:
u = eigvec
if hasattr(X, 'dot'):
vh = herm(X.dot(u) / s)
else:
vh = herm(np.dot(X, u) / s)
return u, s, vh
# check if backport is actually needed:
if scipy.version.version >= LooseVersion('0.10'):
from scipy.sparse.linalg import eigs, eigsh, svds
else:
eigs, eigsh, svds = _eigs, _eigsh, _svds
| bsd-3-clause |
Michaelhobo/ee149-final-project | mavlink/pymavlink/tools/mavextract.py | 41 | 2555 | #!/usr/bin/env python
'''
extract one mode type from a log
'''
import sys, time, os, struct
from argparse import ArgumentParser
parser = ArgumentParser(description=__doc__)
parser.add_argument("--no-timestamps", dest="notimestamps", action='store_true', help="Log doesn't have timestamps")
parser.add_argument("--robust", action='store_true', help="Enable robust parsing (skip over bad data)")
parser.add_argument("--condition", default=None, help="select packets by condition")
parser.add_argument("--mode", default='auto', help="mode to extract")
parser.add_argument("logs", metavar="LOG", nargs="+")
args = parser.parse_args()
from pymavlink import mavutil
def process(filename):
'''process one logfile'''
print("Processing %s" % filename)
mlog = mavutil.mavlink_connection(filename, notimestamps=args.notimestamps,
robust_parsing=args.robust)
ext = os.path.splitext(filename)[1]
isbin = ext in ['.bin', '.BIN']
islog = ext in ['.log', '.LOG']
output = None
count = 1
dirname = os.path.dirname(filename)
if isbin or islog:
extension = "bin"
else:
extension = "tlog"
file_header = ''
while True:
m = mlog.recv_match()
if m is None:
break
if (isbin or islog) and m.get_type() in ["FMT", "PARM", "CMD"]:
file_header += m.get_msgbuf()
if (isbin or islog) and m.get_type() == 'MSG' and m.Message.startswith("Ardu"):
file_header += m.get_msgbuf()
if m.get_type() in ['PARAM_VALUE','MISSION_ITEM']:
timestamp = getattr(m, '_timestamp', None)
file_header += struct.pack('>Q', timestamp*1.0e6) + m.get_msgbuf()
if not mavutil.evaluate_condition(args.condition, mlog.messages):
continue
if mlog.flightmode.upper() == args.mode.upper():
if output is None:
path = os.path.join(dirname, "%s%u.%s" % (args.mode, count, extension))
count += 1
print("Creating %s" % path)
output = open(path, mode='wb')
output.write(file_header)
else:
if output is not None:
output.close()
output = None
if output and m.get_type() != 'BAD_DATA':
timestamp = getattr(m, '_timestamp', None)
if not isbin:
output.write(struct.pack('>Q', timestamp*1.0e6))
output.write(m.get_msgbuf())
for filename in args.logs:
process(filename)
| gpl-3.0 |
trueblue2704/AskMeAnything | lib/python2.7/site-packages/pip/_vendor/html5lib/utils.py | 982 | 2545 | from __future__ import absolute_import, division, unicode_literals
from types import ModuleType
try:
import xml.etree.cElementTree as default_etree
except ImportError:
import xml.etree.ElementTree as default_etree
__all__ = ["default_etree", "MethodDispatcher", "isSurrogatePair",
"surrogatePairToCodepoint", "moduleFactoryFactory"]
class MethodDispatcher(dict):
"""Dict with 2 special properties:
On initiation, keys that are lists, sets or tuples are converted to
multiple keys so accessing any one of the items in the original
list-like object returns the matching value
md = MethodDispatcher({("foo", "bar"):"baz"})
md["foo"] == "baz"
A default value which can be set through the default attribute.
"""
def __init__(self, items=()):
# Using _dictEntries instead of directly assigning to self is about
# twice as fast. Please do careful performance testing before changing
# anything here.
_dictEntries = []
for name, value in items:
if type(name) in (list, tuple, frozenset, set):
for item in name:
_dictEntries.append((item, value))
else:
_dictEntries.append((name, value))
dict.__init__(self, _dictEntries)
self.default = None
def __getitem__(self, key):
return dict.get(self, key, self.default)
# Some utility functions to dal with weirdness around UCS2 vs UCS4
# python builds
def isSurrogatePair(data):
return (len(data) == 2 and
ord(data[0]) >= 0xD800 and ord(data[0]) <= 0xDBFF and
ord(data[1]) >= 0xDC00 and ord(data[1]) <= 0xDFFF)
def surrogatePairToCodepoint(data):
char_val = (0x10000 + (ord(data[0]) - 0xD800) * 0x400 +
(ord(data[1]) - 0xDC00))
return char_val
# Module Factory Factory (no, this isn't Java, I know)
# Here to stop this being duplicated all over the place.
def moduleFactoryFactory(factory):
moduleCache = {}
def moduleFactory(baseModule, *args, **kwargs):
if isinstance(ModuleType.__name__, type("")):
name = "_%s_factory" % baseModule.__name__
else:
name = b"_%s_factory" % baseModule.__name__
if name in moduleCache:
return moduleCache[name]
else:
mod = ModuleType(name)
objs = factory(baseModule, *args, **kwargs)
mod.__dict__.update(objs)
moduleCache[name] = mod
return mod
return moduleFactory
| mit |
ampax/edx-platform-backup | common/lib/capa/capa/tests/__init__.py | 45 | 2261 | """Tools for helping with testing capa."""
import gettext
import os
import os.path
import fs.osfs
from capa.capa_problem import LoncapaProblem, LoncapaSystem
from capa.inputtypes import Status
from mock import Mock, MagicMock
import xml.sax.saxutils as saxutils
TEST_DIR = os.path.dirname(os.path.realpath(__file__))
def tst_render_template(template, context):
"""
A test version of render to template. Renders to the repr of the context, completely ignoring
the template name. To make the output valid xml, quotes the content, and wraps it in a <div>
"""
return '<div>{0}</div>'.format(saxutils.escape(repr(context)))
def calledback_url(dispatch='score_update'):
return dispatch
xqueue_interface = MagicMock()
xqueue_interface.send_to_queue.return_value = (0, 'Success!')
def test_capa_system():
"""
Construct a mock LoncapaSystem instance.
"""
the_system = Mock(
spec=LoncapaSystem,
ajax_url='/dummy-ajax-url',
anonymous_student_id='student',
cache=None,
can_execute_unsafe_code=lambda: False,
get_python_lib_zip=lambda: None,
DEBUG=True,
filestore=fs.osfs.OSFS(os.path.join(TEST_DIR, "test_files")),
i18n=gettext.NullTranslations(),
node_path=os.environ.get("NODE_PATH", "/usr/local/lib/node_modules"),
render_template=tst_render_template,
seed=0,
STATIC_URL='/dummy-static/',
STATUS_CLASS=Status,
xqueue={'interface': xqueue_interface, 'construct_callback': calledback_url, 'default_queuename': 'testqueue', 'waittime': 10},
)
return the_system
def new_loncapa_problem(xml, capa_system=None, seed=723):
"""Construct a `LoncapaProblem` suitable for unit tests."""
return LoncapaProblem(xml, id='1', seed=seed, capa_system=capa_system or test_capa_system())
def load_fixture(relpath):
"""
Return a `unicode` object representing the contents
of the fixture file at the given path within a test_files directory
in the same directory as the test file.
"""
abspath = os.path.join(os.path.dirname(__file__), 'test_files', relpath)
with open(abspath) as fixture_file:
contents = fixture_file.read()
return contents.decode('utf8')
| agpl-3.0 |
lmazuel/azure-sdk-for-python | azure-mgmt-datafactory/azure/mgmt/datafactory/models/eloqua_source.py | 2 | 2076 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .copy_source import CopySource
class EloquaSource(CopySource):
"""A copy activity Eloqua server source.
:param additional_properties: Unmatched properties from the message are
deserialized this collection
:type additional_properties: dict[str, object]
:param source_retry_count: Source retry count. Type: integer (or
Expression with resultType integer).
:type source_retry_count: object
:param source_retry_wait: Source retry wait. Type: string (or Expression
with resultType string), pattern:
((\\d+)\\.)?(\\d\\d):(60|([0-5][0-9])):(60|([0-5][0-9])).
:type source_retry_wait: object
:param type: Constant filled by server.
:type type: str
:param query: A query to retrieve data from source. Type: string (or
Expression with resultType string).
:type query: object
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'additional_properties': {'key': '', 'type': '{object}'},
'source_retry_count': {'key': 'sourceRetryCount', 'type': 'object'},
'source_retry_wait': {'key': 'sourceRetryWait', 'type': 'object'},
'type': {'key': 'type', 'type': 'str'},
'query': {'key': 'query', 'type': 'object'},
}
def __init__(self, additional_properties=None, source_retry_count=None, source_retry_wait=None, query=None):
super(EloquaSource, self).__init__(additional_properties=additional_properties, source_retry_count=source_retry_count, source_retry_wait=source_retry_wait)
self.query = query
self.type = 'EloquaSource'
| mit |
mrgloom/HPOlib | HPOlib/config_parser/parse.py | 5 | 2553 | ##
# wrapping: A program making it easy to use hyperparameter
# optimization software.
# Copyright (C) 2013 Katharina Eggensperger and Matthias Feurer
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
import ConfigParser
logger = logging.getLogger("HPOlib.config_parser.parse")
def parse_config(config_files, allow_no_value=True, optimizer_version="",
cli_values=None):
if type(config_files) is str:
if not os.path.isfile(config_files):
raise Exception('%s is not a valid file\n' % os.path.join(
os.getcwd(), config_files))
else:
for config_file in config_files:
if not os.path.isfile(config_file):
raise Exception('%s is not a valid file\n' % os.path.join(
os.getcwd(), config_file))
config = ConfigParser.SafeConfigParser(allow_no_value=allow_no_value)
config.read(config_files)
if cli_values is not None:
config.readfp(cli_values)
return config
def check_config(config):
# --------------------------------------------------------------------------
# Check critical values
# --------------------------------------------------------------------------
if not config.has_option('HPOLIB', 'number_of_jobs') or \
config.get('HPOLIB', 'number_of_jobs') == '':
raise Exception('number_of_jobs not specified in .cfg')
if not config.has_option('HPOLIB', 'result_on_terminate') or \
config.get('HPOLIB', 'result_on_terminate') == '':
raise Exception('No result_on_terminate specified in .cfg')
if not config.has_option('HPOLIB', 'function') or \
config.get('HPOLIB', 'function') == '':
raise Exception('No function specified in .cfg')
if config.getint('HPOLIB', "number_cv_folds") < 1:
raise Exception("The number of crossvalidation folds must be at least one!")
return True
| gpl-3.0 |
loandy/billy | billy/web/api/tests/test_committees.py | 4 | 1386 | from .base import BaseTestCase
class CommitteesSearchTestCase(BaseTestCase):
url_tmpl = '/api/v1/committees/'
data = dict(state='ex', chamber='lower')
def test_count(self):
self.assertEquals(
len(self.json),
self.db.committees.find(self.data).count())
def test_correct_keys_present(self):
expected_keys = set([
u'level', u'country', u'updated_at', u'parent_id',
u'state', u'subcommittee', u'committee', u'chamber', u'id', 'all_ids'])
self.assertEquals(set(self.json[0]), expected_keys)
def test_status(self):
self.assert_200()
class CommitteeLookupTestCase(BaseTestCase):
url_tmpl = '/api/v1/committees/{committee_id}/'
url_args = dict(committee_id='EXC000001')
def test_state(self):
'''Make sure the returned data has the correct
level field value.
'''
self.assertEquals(self.json['state'], 'ex')
def test_correct_keys_present(self):
expected_keys = set([
u'members', u'level', u'country', u'updated_at',
u'parent_id', u'state', u'subcommittee',
u'committee', u'chamber', u'id', 'all_ids'])
self.assertEquals(set(self.json), expected_keys)
def test_id(self):
self.assertEquals(self.json['id'], 'EXC000001')
def test_status(self):
self.assert_200()
| bsd-3-clause |
lepistone/babel | setup.py | 7 | 2969 | # -*- coding: utf-8 -*-
import os
import sys
import subprocess
from setuptools import setup
sys.path.append(os.path.join('doc', 'common'))
try:
from doctools import build_doc, test_doc
except ImportError:
build_doc = test_doc = None
from distutils.cmd import Command
class import_cldr(Command):
description = 'imports and converts the CLDR data'
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
c = subprocess.Popen([sys.executable, 'scripts/download_import_cldr.py'])
c.wait()
setup(
name='Babel',
version='3.0-dev',
description='Internationalization utilities',
long_description=\
"""A collection of tools for internationalizing Python applications.""",
author='Armin Ronacher',
author_email='[email protected]',
license='BSD',
url='http://babel.pocoo.org/',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=['babel', 'babel.messages', 'babel.localtime'],
package_data={'babel': ['global.dat', 'localedata/*.dat']},
install_requires=[
# This version identifier is currently necessary as
# pytz otherwise does not install on pip 1.4 or
# higher.
'pytz>=0a',
],
cmdclass={'build_doc': build_doc, 'test_doc': test_doc,
'import_cldr': import_cldr},
zip_safe=False,
# Note when adding extractors: builtin extractors we also want to
# work if packages are not installed to simplify testing. If you
# add an extractor here also manually add it to the "extract"
# function in babel.messages.extract.
entry_points="""
[console_scripts]
pybabel = babel.messages.frontend:main
[distutils.commands]
compile_catalog = babel.messages.frontend:compile_catalog
extract_messages = babel.messages.frontend:extract_messages
init_catalog = babel.messages.frontend:init_catalog
update_catalog = babel.messages.frontend:update_catalog
[distutils.setup_keywords]
message_extractors = babel.messages.frontend:check_message_extractors
[babel.checkers]
num_plurals = babel.messages.checkers:num_plurals
python_format = babel.messages.checkers:python_format
[babel.extractors]
ignore = babel.messages.extract:extract_nothing
python = babel.messages.extract:extract_python
javascript = babel.messages.extract:extract_javascript
"""
)
| bsd-3-clause |
mozillazg/django-cron | django_cron/tests.py | 2 | 6953 | import threading
from time import sleep
from datetime import timedelta
from django import db
from django.utils import unittest
from django.core.management import call_command
from django.test.utils import override_settings
from django.test.client import Client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from freezegun import freeze_time
from django_cron.helpers import humanize_duration
from django_cron.models import CronJobLog
class OutBuffer(object):
content = []
modified = False
_str_cache = ''
def write(self, *args):
self.content.extend(args)
self.modified = True
def str_content(self):
if self.modified:
self._str_cache = ''.join((str(x) for x in self.content))
self.modified = False
return self._str_cache
class TestCase(unittest.TestCase):
success_cron = 'test_crons.TestSucessCronJob'
error_cron = 'test_crons.TestErrorCronJob'
five_mins_cron = 'test_crons.Test5minsCronJob'
run_at_times_cron = 'test_crons.TestRunAtTimesCronJob'
wait_3sec_cron = 'test_crons.Wait3secCronJob'
does_not_exist_cron = 'ThisCronObviouslyDoesntExist'
test_failed_runs_notification_cron = 'django_cron.cron.FailedRunsNotificationCronJob'
def setUp(self):
CronJobLog.objects.all().delete()
def test_success_cron(self):
logs_count = CronJobLog.objects.all().count()
call_command('runcrons', self.success_cron, force=True)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
def test_failed_cron(self):
logs_count = CronJobLog.objects.all().count()
call_command('runcrons', self.error_cron, force=True)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
def test_not_exists_cron(self):
logs_count = CronJobLog.objects.all().count()
out_buffer = OutBuffer()
call_command('runcrons', self.does_not_exist_cron, force=True, stdout=out_buffer)
self.assertIn('Make sure these are valid cron class names', out_buffer.str_content())
self.assertIn(self.does_not_exist_cron, out_buffer.str_content())
self.assertEqual(CronJobLog.objects.all().count(), logs_count)
@override_settings(DJANGO_CRON_LOCK_BACKEND='django_cron.backends.lock.file.FileLock')
def test_file_locking_backend(self):
logs_count = CronJobLog.objects.all().count()
call_command('runcrons', self.success_cron, force=True)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
def test_runs_every_mins(self):
logs_count = CronJobLog.objects.all().count()
with freeze_time("2014-01-01 00:00:00"):
call_command('runcrons', self.five_mins_cron)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
with freeze_time("2014-01-01 00:04:59"):
call_command('runcrons', self.five_mins_cron)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
with freeze_time("2014-01-01 00:05:01"):
call_command('runcrons', self.five_mins_cron)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 2)
def test_runs_at_time(self):
logs_count = CronJobLog.objects.all().count()
with freeze_time("2014-01-01 00:00:01"):
call_command('runcrons', self.run_at_times_cron)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
with freeze_time("2014-01-01 00:04:50"):
call_command('runcrons', self.run_at_times_cron)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
with freeze_time("2014-01-01 00:05:01"):
call_command('runcrons', self.run_at_times_cron)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 2)
def test_admin(self):
password = 'test'
user = User.objects.create_superuser(
'test',
'[email protected]',
password
)
self.client = Client()
self.client.login(username=user.username, password=password)
# edit CronJobLog object
call_command('runcrons', self.success_cron, force=True)
log = CronJobLog.objects.all()[0]
url = reverse('admin:django_cron_cronjoblog_change', args=(log.id,))
response = self.client.get(url)
self.assertIn('Cron job logs', str(response.content))
def run_cronjob_in_thread(self, logs_count):
call_command('runcrons', self.wait_3sec_cron)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
db.close_old_connections()
def test_cache_locking_backend(self):
"""
with cache locking backend
"""
logs_count = CronJobLog.objects.all().count()
t = threading.Thread(target=self.run_cronjob_in_thread, args=(logs_count,))
t.daemon = True
t.start()
# this shouldn't get running
sleep(0.1) # to avoid race condition
call_command('runcrons', self.wait_3sec_cron)
t.join(10)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
# TODO: this test doesn't pass - seems that second cronjob is locking file
# however it should throw an exception that file is locked by other cronjob
# @override_settings(
# DJANGO_CRON_LOCK_BACKEND='django_cron.backends.lock.file.FileLock',
# DJANGO_CRON_LOCKFILE_PATH=os.path.join(os.getcwd())
# )
# def test_file_locking_backend_in_thread(self):
# """
# with file locking backend
# """
# logs_count = CronJobLog.objects.all().count()
# t = threading.Thread(target=self.run_cronjob_in_thread, args=(logs_count,))
# t.daemon = True
# t.start()
# # this shouldn't get running
# sleep(1) # to avoid race condition
# call_command('runcrons', self.wait_3sec_cron)
# t.join(10)
# self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
def test_failed_runs_notification(self):
CronJobLog.objects.all().delete()
logs_count = CronJobLog.objects.all().count()
for i in range(10):
call_command('runcrons', self.error_cron, force=True)
call_command('runcrons', self.test_failed_runs_notification_cron)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 11)
def test_humanize_duration(self):
test_subjects = (
(timedelta(days=1, hours=1, minutes=1, seconds=1), '1 day, 1 hour, 1 minute, 1 second'),
(timedelta(days=2), '2 days'),
(timedelta(days=15, minutes=4), '15 days, 4 minutes'),
(timedelta(), '< 1 second'),
)
for duration, humanized in test_subjects:
self.assertEqual(
humanize_duration(duration),
humanized
)
| mit |
7kbird/chrome | third_party/cython/src/Cython/Debugger/Cygdb.py | 90 | 4855 | #!/usr/bin/env python
"""
The Cython debugger
The current directory should contain a directory named 'cython_debug', or a
path to the cython project directory should be given (the parent directory of
cython_debug).
Additional gdb args can be provided only if a path to the project directory is
given.
"""
import os
import sys
import glob
import tempfile
import textwrap
import subprocess
import optparse
import logging
logger = logging.getLogger(__name__)
def make_command_file(path_to_debug_info, prefix_code='', no_import=False):
if not no_import:
pattern = os.path.join(path_to_debug_info,
'cython_debug',
'cython_debug_info_*')
debug_files = glob.glob(pattern)
if not debug_files:
sys.exit('%s.\nNo debug files were found in %s. Aborting.' % (
usage, os.path.abspath(path_to_debug_info)))
fd, tempfilename = tempfile.mkstemp()
f = os.fdopen(fd, 'w')
try:
f.write(prefix_code)
f.write('set breakpoint pending on\n')
f.write("set print pretty on\n")
f.write('python from Cython.Debugger import libcython, libpython\n')
if no_import:
# don't do this, this overrides file command in .gdbinit
# f.write("file %s\n" % sys.executable)
pass
else:
path = os.path.join(path_to_debug_info, "cython_debug", "interpreter")
interpreter_file = open(path)
try:
interpreter = interpreter_file.read()
finally:
interpreter_file.close()
f.write("file %s\n" % interpreter)
f.write('\n'.join('cy import %s\n' % fn for fn in debug_files))
f.write(textwrap.dedent('''\
python
import sys
try:
gdb.lookup_type('PyModuleObject')
except RuntimeError:
sys.stderr.write(
'Python was not compiled with debug symbols (or it was '
'stripped). Some functionality may not work (properly).\\n')
end
source .cygdbinit
'''))
finally:
f.close()
return tempfilename
usage = "Usage: cygdb [options] [PATH [-- GDB_ARGUMENTS]]"
def main(path_to_debug_info=None, gdb_argv=None, no_import=False):
"""
Start the Cython debugger. This tells gdb to import the Cython and Python
extensions (libcython.py and libpython.py) and it enables gdb's pending
breakpoints.
path_to_debug_info is the path to the Cython build directory
gdb_argv is the list of options to gdb
no_import tells cygdb whether it should import debug information
"""
parser = optparse.OptionParser(usage=usage)
parser.add_option("--gdb-executable",
dest="gdb", default='gdb',
help="gdb executable to use [default: gdb]")
parser.add_option("--verbose", "-v",
dest="verbosity", action="count", default=0,
help="Verbose mode. Multiple -v options increase the verbosity")
(options, args) = parser.parse_args()
if path_to_debug_info is None:
if len(args) > 1:
path_to_debug_info = args[0]
else:
path_to_debug_info = os.curdir
if gdb_argv is None:
gdb_argv = args[1:]
if path_to_debug_info == '--':
no_import = True
logging_level = logging.WARN
if options.verbosity == 1:
logging_level = logging.INFO
if options.verbosity == 2:
logging_level = logging.DEBUG
logging.basicConfig(level=logging_level)
logger.info("verbosity = %r", options.verbosity)
logger.debug("options = %r; args = %r", options, args)
logger.debug("Done parsing command-line options. path_to_debug_info = %r, gdb_argv = %r",
path_to_debug_info, gdb_argv)
tempfilename = make_command_file(path_to_debug_info, no_import=no_import)
logger.info("Launching %s with command file: %s and gdb_argv: %s",
options.gdb, tempfilename, gdb_argv)
logger.debug('Command file (%s) contains: """\n%s"""', tempfilename, open(tempfilename).read())
logger.info("Spawning %s...", options.gdb)
p = subprocess.Popen([options.gdb, '-command', tempfilename] + gdb_argv)
logger.info("Spawned %s (pid %d)", options.gdb, p.pid)
while True:
try:
logger.debug("Waiting for gdb (pid %d) to exit...", p.pid)
ret = p.wait()
logger.debug("Wait for gdb (pid %d) to exit is done. Returned: %r", p.pid, ret)
except KeyboardInterrupt:
pass
else:
break
logger.debug("Removing temp command file: %s", tempfilename)
os.remove(tempfilename)
logger.debug("Removed temp command file: %s", tempfilename)
| bsd-3-clause |
g-vidal/mraa | tests/mock/i2c_checks_write_byte.py | 21 | 2079 | #!/usr/bin/env python
# Author: Alex Tereschenko <[email protected]>
# Copyright (c) 2016 Alex Tereschenko.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import mraa as m
import unittest as u
from i2c_checks_shared import *
class I2cChecksWriteByte(u.TestCase):
def setUp(self):
self.i2c = m.I2c(MRAA_I2C_BUS_NUM)
def tearDown(self):
del self.i2c
def test_i2c_write_byte(self):
self.i2c.address(MRAA_MOCK_I2C_ADDR)
test_byte = 0xEE
self.assertEqual(self.i2c.writeByte(test_byte),
m.SUCCESS,
"I2C writeByte() did not return success")
self.assertEqual(self.i2c.readByte(),
test_byte,
"I2C readByte() after writeByte() returned unexpected data")
def test_i2c_write_byte_invalid_addr(self):
self.i2c.address(MRAA_MOCK_I2C_ADDR - 1)
self.assertEqual(self.i2c.writeByte(0xEE),
m.ERROR_UNSPECIFIED,
"I2C writeByte() to invalid address did not return error")
if __name__ == "__main__":
u.main()
| mit |
redreamality/learning-to-rank | lerot/evaluation/LetorNdcgEval.py | 1 | 1546 | # This file is part of Lerot.
#
# Lerot is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Lerot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Lerot. If not, see <http://www.gnu.org/licenses/>.
# KH, 2012/06/20
from numpy import log2
from .AbstractEval import AbstractEval
class LetorNdcgEval(AbstractEval):
"""Compute NDCG as implemented in the Letor toolkit."""
def get_dcg(self, labels, cutoff=-1):
if (cutoff == -1):
cutoff = len(labels)
dcg = 0
# [0:cutoff] returns the labels up to min(len(labels), cutoff)
for r, label in enumerate(labels[0:cutoff]):
# use log2(1 + r), to be consistent with the implementation in the
# letor 4 evaluation tools (and wikipedia, on 6/27/2012), even
# though this makes discounting slightly inconsistent (indices are
# zero-based, so using log2(2 + r) would be more consistent)
if r == 0:
dcg += 2 ** label - 1
else:
dcg += (2 ** label - 1) / log2(1 + r)
return dcg
| gpl-3.0 |
Changaco/oh-mainline | vendor/packages/whoosh/src/whoosh/lang/porter2.py | 117 | 8314 | """An implementation of the Porter2 stemming algorithm.
See http://snowball.tartarus.org/algorithms/english/stemmer.html
Adapted from pyporter2 by Michael Dirolf.
This algorithm is more correct but (at least in this implementation)
several times slower than the original porter algorithm as implemented
in stemming.porter.
"""
import re
r_exp = re.compile(r"[^aeiouy]*[aeiouy]+[^aeiouy](\w*)")
ewss_exp1 = re.compile(r"^[aeiouy][^aeiouy]$")
ewss_exp2 = re.compile(r".*[^aeiouy][aeiouy][^aeiouywxY]$")
ccy_exp = re.compile(r"([aeiouy])y")
s1a_exp = re.compile(r"[aeiouy].")
s1b_exp = re.compile(r"[aeiouy]")
def get_r1(word):
# exceptional forms
if word.startswith('gener') or word.startswith('arsen'):
return 5
if word.startswith('commun'):
return 6
# normal form
match = r_exp.match(word)
if match:
return match.start(1)
return len(word)
def get_r2(word):
match = r_exp.match(word, get_r1(word))
if match:
return match.start(1)
return len(word)
def ends_with_short_syllable(word):
if len(word) == 2:
if ewss_exp1.match(word):
return True
if ewss_exp2.match(word):
return True
return False
def is_short_word(word):
if ends_with_short_syllable(word):
if get_r1(word) == len(word):
return True
return False
def remove_initial_apostrophe(word):
if word.startswith("'"):
return word[1:]
return word
def capitalize_consonant_ys(word):
if word.startswith('y'):
word = 'Y' + word[1:]
return ccy_exp.sub('\g<1>Y', word)
def step_0(word):
if word.endswith("'s'"):
return word[:-3]
if word.endswith("'s"):
return word[:-2]
if word.endswith("'"):
return word[:-1]
return word
def step_1a(word):
if word.endswith('sses'):
return word[:-4] + 'ss'
if word.endswith('ied') or word.endswith('ies'):
if len(word) > 4:
return word[:-3] + 'i'
else:
return word[:-3] + 'ie'
if word.endswith('us') or word.endswith('ss'):
return word
if word.endswith('s'):
preceding = word[:-1]
if s1a_exp.search(preceding):
return preceding
return word
return word
doubles = ('bb', 'dd', 'ff', 'gg', 'mm', 'nn', 'pp', 'rr', 'tt')
def ends_with_double(word):
for double in doubles:
if word.endswith(double):
return True
return False
def step_1b_helper(word):
if word.endswith('at') or word.endswith('bl') or word.endswith('iz'):
return word + 'e'
if ends_with_double(word):
return word[:-1]
if is_short_word(word):
return word + 'e'
return word
s1b_suffixes = ('ed', 'edly', 'ing', 'ingly')
def step_1b(word, r1):
if word.endswith('eedly'):
if len(word) - 5 >= r1:
return word[:-3]
return word
if word.endswith('eed'):
if len(word) - 3 >= r1:
return word[:-1]
return word
for suffix in s1b_suffixes:
if word.endswith(suffix):
preceding = word[:-len(suffix)]
if s1b_exp.search(preceding):
return step_1b_helper(preceding)
return word
return word
def step_1c(word):
if word.endswith('y') or word.endswith('Y') and len(word) > 1:
if word[-2] not in 'aeiouy':
if len(word) > 2:
return word[:-1] + 'i'
return word
def step_2_helper(word, r1, end, repl, prev):
if word.endswith(end):
if len(word) - len(end) >= r1:
if prev == []:
return word[:-len(end)] + repl
for p in prev:
if word[:-len(end)].endswith(p):
return word[:-len(end)] + repl
return word
return None
s2_triples = (('ization', 'ize', []),
('ational', 'ate', []),
('fulness', 'ful', []),
('ousness', 'ous', []),
('iveness', 'ive', []),
('tional', 'tion', []),
('biliti', 'ble', []),
('lessli', 'less', []),
('entli', 'ent', []),
('ation', 'ate', []),
('alism', 'al', []),
('aliti', 'al', []),
('ousli', 'ous', []),
('iviti', 'ive', []),
('fulli', 'ful', []),
('enci', 'ence', []),
('anci', 'ance', []),
('abli', 'able', []),
('izer', 'ize', []),
('ator', 'ate', []),
('alli', 'al', []),
('bli', 'ble', []),
('ogi', 'og', ['l']),
('li', '', ['c', 'd', 'e', 'g', 'h', 'k', 'm', 'n', 'r', 't']))
def step_2(word, r1):
for trip in s2_triples:
attempt = step_2_helper(word, r1, trip[0], trip[1], trip[2])
if attempt:
return attempt
return word
def step_3_helper(word, r1, r2, end, repl, r2_necessary):
if word.endswith(end):
if len(word) - len(end) >= r1:
if not r2_necessary:
return word[:-len(end)] + repl
else:
if len(word) - len(end) >= r2:
return word[:-len(end)] + repl
return word
return None
s3_triples = (('ational', 'ate', False),
('tional', 'tion', False),
('alize', 'al', False),
('icate', 'ic', False),
('iciti', 'ic', False),
('ative', '', True),
('ical', 'ic', False),
('ness', '', False),
('ful', '', False))
def step_3(word, r1, r2):
for trip in s3_triples:
attempt = step_3_helper(word, r1, r2, trip[0], trip[1], trip[2])
if attempt:
return attempt
return word
s4_delete_list = ('al', 'ance', 'ence', 'er', 'ic', 'able', 'ible', 'ant', 'ement',
'ment', 'ent', 'ism', 'ate', 'iti', 'ous', 'ive', 'ize')
def step_4(word, r2):
for end in s4_delete_list:
if word.endswith(end):
if len(word) - len(end) >= r2:
return word[:-len(end)]
return word
if word.endswith('sion') or word.endswith('tion'):
if len(word) - 3 >= r2:
return word[:-3]
return word
def step_5(word, r1, r2):
if word.endswith('l'):
if len(word) - 1 >= r2 and word[-2] == 'l':
return word[:-1]
return word
if word.endswith('e'):
if len(word) - 1 >= r2:
return word[:-1]
if len(word) - 1 >= r1 and not ends_with_short_syllable(word[:-1]):
return word[:-1]
return word
def normalize_ys(word):
return word.replace('Y', 'y')
exceptional_forms = {'skis': 'ski',
'skies': 'sky',
'dying': 'die',
'lying': 'lie',
'tying': 'tie',
'idly': 'idl',
'gently': 'gentl',
'ugly': 'ugli',
'early': 'earli',
'only': 'onli',
'singly': 'singl',
'sky': 'sky',
'news': 'news',
'howe': 'howe',
'atlas': 'atlas',
'cosmos': 'cosmos',
'bias': 'bias',
'andes': 'andes'}
exceptional_early_exit_post_1a = frozenset(['inning', 'outing', 'canning', 'herring',
'earring', 'proceed', 'exceed', 'succeed'])
def stem(word):
if len(word) <= 2:
return word
word = remove_initial_apostrophe(word)
# handle some exceptional forms
if word in exceptional_forms:
return exceptional_forms[word]
word = capitalize_consonant_ys(word)
r1 = get_r1(word)
r2 = get_r2(word)
word = step_0(word)
word = step_1a(word)
# handle some more exceptional forms
if word in exceptional_early_exit_post_1a:
return word
word = step_1b(word, r1)
word = step_1c(word)
word = step_2(word, r1)
word = step_3(word, r1, r2)
word = step_4(word, r2)
word = step_5(word, r1, r2)
word = normalize_ys(word)
return word
| agpl-3.0 |
lmacken/moksha | moksha/tests/functional/test_csrf.py | 2 | 4881 | # -*- coding: utf-8 -*-
# This file is part of Moksha.
# Copyright (C) 2008-2010 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Integration tests for Moksha's CSRF protection.
These tests are meant to ensure the validity of Moksha's CSRF WSGI middleware
and repoze.who metadata provider plugin.
"""
from moksha.tests import TestController
class TestCSRFProtection(TestController):
application_under_test = 'main'
def test_csrf_protection(self):
# Test for anonymous requests
resp = self.app.get('/', status=200)
assert 'moksha_base_url = "/"' in resp
assert 'moksha_csrf_token = ""' in resp
assert 'moksha_userid = ""' in resp
# Requesting a protected area
resp = self.app.get('/moksha_admin/', status=302)
assert resp.location.startswith('http://localhost/login') or \
resp.location.startswith('/login'), resp.location
# Getting the login form:
resp = resp.follow(status=200)
form = resp.form
# Submitting the login form:
form['login'] = u'manager'
form['password'] = 'managepass'
post_login = form.submit(status=302)
# Being redirected to the initially requested page:
assert post_login.location.startswith('http://localhost/post_login') or\
post_login.location.startswith('/post_login')
initial_page = post_login.follow(status=302)
assert 'authtkt' in initial_page.request.cookies, \
"Session cookie wasn't defined: %s" % initial_page.request.cookies
assert initial_page.location.startswith('http://localhost/moksha_admin/'), \
initial_page.location
assert '_csrf_token=' in initial_page.location, "Login not redirected with CSRF token"
token = initial_page.location.split('_csrf_token=')[1]
# Now ensure that the token also also being injected in the page
resp = initial_page.follow(status=200)
assert 'moksha_csrf_token' in resp
assert token == resp.body.split('moksha_csrf_token')[1].split(';')[0].split('"')[1], \
"CSRF token not set in response body!"
# Make sure we can get to the page with the token
resp = self.app.post('/moksha_admin/', {'_csrf_token': token}, status=200)
assert 'moksha_csrf_token' in resp, resp
assert 'moksha_csrf_token = ""' not in resp, "CSRF token not set!"
assert token == resp.body.split('moksha_csrf_token')[1].split(';')[0].split('"')[1], \
"CSRF token not set in response body!"
# Make sure we can't get back to the page without the token
resp = self.app.get('/moksha_admin/', status=302)
assert 'The resource was found at /post_logout' in resp or \
'The resource was found at /login' in resp or \
'The resource was found at http://localhost/login' in resp
# Make sure that we can't get back after we got rejected once
resp = self.app.post('/moksha_admin/', {'_csrf_token': token}, status=302)
assert 'The resource was found at /login' in resp or \
'The resource was found at http://localhost/login' in resp
# Ensure the token gets removed
resp = self.app.get('/', status=200)
assert 'moksha_base_url = "/"' in resp
assert 'moksha_csrf_token = ""' in resp
assert 'moksha_userid = ""' in resp
# Ok, now log back in...
resp = self.app.get('/moksha_admin/', status=302)
resp = resp.follow(status=200)
form = resp.form
form['login'] = u'manager'
form['password'] = 'managepass'
post_login = form.submit(status=302)
initial_page = post_login.follow(status=302)
assert '_csrf_token=' in initial_page.location, "Login not redirected with CSRF token"
newtoken = initial_page.location.split('_csrf_token=')[1]
# For some reason logging out sometimes doesn't give us a new session cookie
#assert newtoken != token, "Did not receieve a new token!!"
# Now, make sure we reject invalid tokens
resp = self.app.post('/moksha_admin/', {'_csrf_token': token + ' '}, status=302)
assert 'The resource was found at /post_logout' in resp or \
'The resource was found at http://localhost/post_logout' in resp
| apache-2.0 |
lahosken/pants | src/python/pants/backend/jvm/subsystems/jar_tool.py | 8 | 1493 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants.backend.jvm.subsystems.jvm_tool_mixin import JvmToolMixin
from pants.base.workunit import WorkUnitLabel
from pants.java.jar.jar_dependency import JarDependency
from pants.subsystem.subsystem import Subsystem
class JarTool(JvmToolMixin, Subsystem):
options_scope = 'jar-tool'
@classmethod
def register_options(cls, register):
super(JarTool, cls).register_options(register)
cls.register_jvm_tool(register,
'jar-tool',
classpath=[
JarDependency(org='org.pantsbuild', name='jar-tool', rev='0.0.10'),
])
def run(self, context, runjava, args):
return runjava(self.tool_classpath_from_products(context.products, 'jar-tool',
scope=self.options_scope),
'org.pantsbuild.tools.jar.Main',
jvm_options=self.get_options().jvm_options,
args=args,
workunit_name='jar-tool',
workunit_labels=[WorkUnitLabel.TOOL, WorkUnitLabel.JVM,
WorkUnitLabel.NAILGUN, WorkUnitLabel.SUPPRESS_LABEL])
| apache-2.0 |
ioram7/keystone-federado-pgid2013 | build/lib.linux-x86_64-2.7/keystone/common/sql/migrate_repo/versions/019_fixup_role.py | 4 | 1070 | import json
import uuid
import sqlalchemy as sql
from sqlalchemy import orm
from keystone import config
from keystone import exception
CONF = config.CONF
def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
role_table = sql.Table('role', meta, autoload=True)
# name should be 255 characters to match fresh database
role_table.c.name.alter(type=sql.String(length=255))
# blank 'extra' field should be "{}"
none = None
update = role_table.update().where(role_table.c.extra == none).values(
{role_table.c.extra: "{}"})
migrate_engine.execute(update)
def downgrade(migrate_engine):
# this fixes bugs in migration 001 and 007 that result in discrepancies
# between fresh databases and databases updated from 004 (folsom).
# the changes fixing 007 will be rolled back in 007's rollback if
# the user desires to return to a state before the existence of the extra
# column.
# the name length change reflects the current default and should not be
# rolled back.
pass
| apache-2.0 |
dabiboo/youtube-dl | test/test_cache.py | 177 | 1575 | #!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
import shutil
# Allow direct execution
import os
import sys
import unittest
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from test.helper import FakeYDL
from youtube_dl.cache import Cache
def _is_empty(d):
return not bool(os.listdir(d))
def _mkdir(d):
if not os.path.exists(d):
os.mkdir(d)
class TestCache(unittest.TestCase):
def setUp(self):
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
TESTDATA_DIR = os.path.join(TEST_DIR, 'testdata')
_mkdir(TESTDATA_DIR)
self.test_dir = os.path.join(TESTDATA_DIR, 'cache_test')
self.tearDown()
def tearDown(self):
if os.path.exists(self.test_dir):
shutil.rmtree(self.test_dir)
def test_cache(self):
ydl = FakeYDL({
'cachedir': self.test_dir,
})
c = Cache(ydl)
obj = {'x': 1, 'y': ['ä', '\\a', True]}
self.assertEqual(c.load('test_cache', 'k.'), None)
c.store('test_cache', 'k.', obj)
self.assertEqual(c.load('test_cache', 'k2'), None)
self.assertFalse(_is_empty(self.test_dir))
self.assertEqual(c.load('test_cache', 'k.'), obj)
self.assertEqual(c.load('test_cache', 'y'), None)
self.assertEqual(c.load('test_cache2', 'k.'), None)
c.remove()
self.assertFalse(os.path.exists(self.test_dir))
self.assertEqual(c.load('test_cache', 'k.'), None)
if __name__ == '__main__':
unittest.main()
| unlicense |
mszewczy/odoo | addons/stock_invoice_directly/__openerp__.py | 260 | 1618 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Invoice Picking Directly',
'version': '1.0',
'category' : 'Warehouse Management',
'description': """
Invoice Wizard for Delivery.
============================
When you send or deliver goods, this module automatically launch the invoicing
wizard if the delivery is to be invoiced.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/warehouse',
'depends': ['delivery', 'stock'],
'data': [],
'demo': [],
'test': ['test/stock_invoice_directly.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
alfonsodev/ansible-modules-extras | monitoring/zabbix_screen.py | 59 | 16469 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013-2014, Epic Games, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: zabbix_screen
short_description: Zabbix screen creates/updates/deletes
description:
- This module allows you to create, modify and delete Zabbix screens and associated graph data.
version_added: "2.0"
author:
- "(@cove)"
- "Tony Minfei Ding"
- "Harrison Gu (@harrisongu)"
requirements:
- "python >= 2.6"
- zabbix-api
options:
server_url:
description:
- Url of Zabbix server, with protocol (http or https).
required: true
aliases: [ "url" ]
login_user:
description:
- Zabbix user name.
required: true
login_password:
description:
- Zabbix user password.
required: true
timeout:
description:
- The timeout of API request (seconds).
default: 10
zabbix_screens:
description:
- List of screens to be created/updated/deleted(see example).
- If the screen(s) already been added, the screen(s) name won't be updated.
- When creating or updating screen(s), C(screen_name), C(host_group) are required.
- When deleting screen(s), the C(screen_name) is required.
- 'The available states are: C(present) (default) and C(absent). If the screen(s) already exists, and the state is not C(absent), the screen(s) will just be updated as needed.'
required: true
notes:
- Too many concurrent updates to the same screen may cause Zabbix to return errors, see examples for a workaround if needed.
'''
EXAMPLES = '''
# Create/update a screen.
- name: Create a new screen or update an existing screen's items
local_action:
module: zabbix_screen
server_url: http://monitor.example.com
login_user: username
login_password: password
screens:
- screen_name: ExampleScreen1
host_group: Example group1
state: present
graph_names:
- Example graph1
- Example graph2
graph_width: 200
graph_height: 100
# Create/update multi-screen
- name: Create two of new screens or update the existing screens' items
local_action:
module: zabbix_screen
server_url: http://monitor.example.com
login_user: username
login_password: password
screens:
- screen_name: ExampleScreen1
host_group: Example group1
state: present
graph_names:
- Example graph1
- Example graph2
graph_width: 200
graph_height: 100
- screen_name: ExampleScreen2
host_group: Example group2
state: present
graph_names:
- Example graph1
- Example graph2
graph_width: 200
graph_height: 100
# Limit the Zabbix screen creations to one host since Zabbix can return an error when doing concurent updates
- name: Create a new screen or update an existing screen's items
local_action:
module: zabbix_screen
server_url: http://monitor.example.com
login_user: username
login_password: password
state: present
screens:
- screen_name: ExampleScreen
host_group: Example group
state: present
graph_names:
- Example graph1
- Example graph2
graph_width: 200
graph_height: 100
when: inventory_hostname==groups['group_name'][0]
'''
try:
from zabbix_api import ZabbixAPI, ZabbixAPISubClass
from zabbix_api import ZabbixAPIException
from zabbix_api import Already_Exists
HAS_ZABBIX_API = True
except ImportError:
HAS_ZABBIX_API = False
# Extend the ZabbixAPI
# Since the zabbix-api python module too old (version 1.0, and there's no higher version so far), it doesn't support the 'screenitem' api call,
# we have to inherit the ZabbixAPI class to add 'screenitem' support.
class ZabbixAPIExtends(ZabbixAPI):
screenitem = None
def __init__(self, server, timeout, **kwargs):
ZabbixAPI.__init__(self, server, timeout=timeout)
self.screenitem = ZabbixAPISubClass(self, dict({"prefix": "screenitem"}, **kwargs))
class Screen(object):
def __init__(self, module, zbx):
self._module = module
self._zapi = zbx
# get group id by group name
def get_host_group_id(self, group_name):
if group_name == "":
self._module.fail_json(msg="group_name is required")
hostGroup_list = self._zapi.hostgroup.get({'output': 'extend', 'filter': {'name': group_name}})
if len(hostGroup_list) < 1:
self._module.fail_json(msg="Host group not found: %s" % group_name)
else:
hostGroup_id = hostGroup_list[0]['groupid']
return hostGroup_id
# get monitored host_id by host_group_id
def get_host_ids_by_group_id(self, group_id):
host_list = self._zapi.host.get({'output': 'extend', 'groupids': group_id, 'monitored_hosts': 1})
if len(host_list) < 1:
self._module.fail_json(msg="No host in the group.")
else:
host_ids = []
for i in host_list:
host_id = i['hostid']
host_ids.append(host_id)
return host_ids
# get screen
def get_screen_id(self, screen_name):
if screen_name == "":
self._module.fail_json(msg="screen_name is required")
try:
screen_id_list = self._zapi.screen.get({'output': 'extend', 'search': {"name": screen_name}})
if len(screen_id_list) >= 1:
screen_id = screen_id_list[0]['screenid']
return screen_id
return None
except Exception as e:
self._module.fail_json(msg="Failed to get screen %s from Zabbix: %s" % (screen_name, e))
# create screen
def create_screen(self, screen_name, h_size, v_size):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
screen = self._zapi.screen.create({'name': screen_name, 'hsize': h_size, 'vsize': v_size})
return screen['screenids'][0]
except Exception as e:
self._module.fail_json(msg="Failed to create screen %s: %s" % (screen_name, e))
# update screen
def update_screen(self, screen_id, screen_name, h_size, v_size):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.screen.update({'screenid': screen_id, 'hsize': h_size, 'vsize': v_size})
except Exception as e:
self._module.fail_json(msg="Failed to update screen %s: %s" % (screen_name, e))
# delete screen
def delete_screen(self, screen_id, screen_name):
try:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.screen.delete([screen_id])
except Exception as e:
self._module.fail_json(msg="Failed to delete screen %s: %s" % (screen_name, e))
# get graph ids
def get_graph_ids(self, hosts, graph_name_list):
graph_id_lists = []
vsize = 1
for host in hosts:
graph_id_list = self.get_graphs_by_host_id(graph_name_list, host)
size = len(graph_id_list)
if size > 0:
graph_id_lists.extend(graph_id_list)
if vsize < size:
vsize = size
return graph_id_lists, vsize
# getGraphs
def get_graphs_by_host_id(self, graph_name_list, host_id):
graph_ids = []
for graph_name in graph_name_list:
graphs_list = self._zapi.graph.get({'output': 'extend', 'search': {'name': graph_name}, 'hostids': host_id})
graph_id_list = []
if len(graphs_list) > 0:
for graph in graphs_list:
graph_id = graph['graphid']
graph_id_list.append(graph_id)
if len(graph_id_list) > 0:
graph_ids.extend(graph_id_list)
return graph_ids
# get screen items
def get_screen_items(self, screen_id):
screen_item_list = self._zapi.screenitem.get({'output': 'extend', 'screenids': screen_id})
return screen_item_list
# delete screen items
def delete_screen_items(self, screen_id, screen_item_id_list):
try:
if len(screen_item_id_list) == 0:
return True
screen_item_list = self.get_screen_items(screen_id)
if len(screen_item_list) > 0:
if self._module.check_mode:
self._module.exit_json(changed=True)
self._zapi.screenitem.delete(screen_item_id_list)
return True
return False
except ZabbixAPIException:
pass
# get screen's hsize and vsize
def get_hsize_vsize(self, hosts, v_size):
h_size = len(hosts)
if h_size == 1:
if v_size == 1:
h_size = 1
elif v_size in range(2, 9):
h_size = 2
else:
h_size = 3
v_size = (v_size - 1) / h_size + 1
return h_size, v_size
# create screen_items
def create_screen_items(self, screen_id, hosts, graph_name_list, width, height, h_size):
if len(hosts) < 4:
if width is None or width < 0:
width = 500
else:
if width is None or width < 0:
width = 200
if height is None or height < 0:
height = 100
try:
# when there're only one host, only one row is not good.
if len(hosts) == 1:
graph_id_list = self.get_graphs_by_host_id(graph_name_list, hosts[0])
for i, graph_id in enumerate(graph_id_list):
if graph_id is not None:
self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id,
'width': width, 'height': height,
'x': i % h_size, 'y': i / h_size, 'colspan': 1, 'rowspan': 1,
'elements': 0, 'valign': 0, 'halign': 0,
'style': 0, 'dynamic': 0, 'sort_triggers': 0})
else:
for i, host in enumerate(hosts):
graph_id_list = self.get_graphs_by_host_id(graph_name_list, host)
for j, graph_id in enumerate(graph_id_list):
if graph_id is not None:
self._zapi.screenitem.create({'screenid': screen_id, 'resourcetype': 0, 'resourceid': graph_id,
'width': width, 'height': height,
'x': i, 'y': j, 'colspan': 1, 'rowspan': 1,
'elements': 0, 'valign': 0, 'halign': 0,
'style': 0, 'dynamic': 0, 'sort_triggers': 0})
except Already_Exists:
pass
def main():
module = AnsibleModule(
argument_spec=dict(
server_url=dict(required=True, aliases=['url']),
login_user=dict(required=True),
login_password=dict(required=True, no_log=True),
timeout=dict(type='int', default=10),
screens=dict(type='dict', required=True)
),
supports_check_mode=True
)
if not HAS_ZABBIX_API:
module.fail_json(msg="Missing requried zabbix-api module (check docs or install with: pip install zabbix-api)")
server_url = module.params['server_url']
login_user = module.params['login_user']
login_password = module.params['login_password']
timeout = module.params['timeout']
screens = module.params['screens']
zbx = None
# login to zabbix
try:
zbx = ZabbixAPIExtends(server_url, timeout=timeout)
zbx.login(login_user, login_password)
except Exception, e:
module.fail_json(msg="Failed to connect to Zabbix server: %s" % e)
screen = Screen(module, zbx)
created_screens = []
changed_screens = []
deleted_screens = []
for zabbix_screen in screens:
screen_name = zabbix_screen['screen_name']
screen_id = screen.get_screen_id(screen_name)
state = "absent" if "state" in zabbix_screen and zabbix_screen['state'] == "absent" else "present"
if state == "absent":
if screen_id:
screen_item_list = screen.get_screen_items(screen_id)
screen_item_id_list = []
for screen_item in screen_item_list:
screen_item_id = screen_item['screenitemid']
screen_item_id_list.append(screen_item_id)
screen.delete_screen_items(screen_id, screen_item_id_list)
screen.delete_screen(screen_id, screen_name)
deleted_screens.append(screen_name)
else:
host_group = zabbix_screen['host_group']
graph_names = zabbix_screen['graph_names']
graph_width = None
if 'graph_width' in zabbix_screen:
graph_width = zabbix_screen['graph_width']
graph_height = None
if 'graph_height' in zabbix_screen:
graph_height = zabbix_screen['graph_height']
host_group_id = screen.get_host_group_id(host_group)
hosts = screen.get_host_ids_by_group_id(host_group_id)
screen_item_id_list = []
resource_id_list = []
graph_ids, v_size = screen.get_graph_ids(hosts, graph_names)
h_size, v_size = screen.get_hsize_vsize(hosts, v_size)
if not screen_id:
# create screen
screen_id = screen.create_screen(screen_name, h_size, v_size)
screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size)
created_screens.append(screen_name)
else:
screen_item_list = screen.get_screen_items(screen_id)
for screen_item in screen_item_list:
screen_item_id = screen_item['screenitemid']
resource_id = screen_item['resourceid']
screen_item_id_list.append(screen_item_id)
resource_id_list.append(resource_id)
# when the screen items changed, then update
if graph_ids != resource_id_list:
deleted = screen.delete_screen_items(screen_id, screen_item_id_list)
if deleted:
screen.update_screen(screen_id, screen_name, h_size, v_size)
screen.create_screen_items(screen_id, hosts, graph_names, graph_width, graph_height, h_size)
changed_screens.append(screen_name)
if created_screens and changed_screens:
module.exit_json(changed=True, result="Successfully created screen(s): %s, and updated screen(s): %s" % (",".join(created_screens), ",".join(changed_screens)))
elif created_screens:
module.exit_json(changed=True, result="Successfully created screen(s): %s" % ",".join(created_screens))
elif changed_screens:
module.exit_json(changed=True, result="Successfully updated screen(s): %s" % ",".join(changed_screens))
elif deleted_screens:
module.exit_json(changed=True, result="Successfully deleted screen(s): %s" % ",".join(deleted_screens))
else:
module.exit_json(changed=False)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
proxysh/Safejumper-for-Mac | buildmac/Resources/env/lib/python2.7/site-packages/pyparsing.py | 213 | 229867 | # module pyparsing.py
#
# Copyright (c) 2003-2016 Paul T. McGuire
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = \
"""
pyparsing module - Classes and methods to define and execute parsing grammars
The pyparsing module is an alternative approach to creating and executing simple grammars,
vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you
don't need to learn a new syntax for defining grammars or matching expressions - the parsing module
provides a library of classes that you use to construct the grammar directly in Python.
Here is a program to parse "Hello, World!" (or any greeting of the form
C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements
(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to
L{Literal} expressions)::
from pyparsing import Word, alphas
# define grammar of a greeting
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
The program outputs the following::
Hello, World! -> ['Hello', ',', 'World', '!']
The Python representation of the grammar is quite readable, owing to the self-explanatory
class names, and the use of '+', '|' and '^' operators.
The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an
object with named attributes.
The pyparsing module handles some of the problems that are typically vexing when writing text parsers:
- extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.)
- quoted strings
- embedded comments
"""
__version__ = "2.1.10"
__versionTime__ = "07 Oct 2016 01:31 UTC"
__author__ = "Paul McGuire <[email protected]>"
import string
from weakref import ref as wkref
import copy
import sys
import warnings
import re
import sre_constants
import collections
import pprint
import traceback
import types
from datetime import datetime
try:
from _thread import RLock
except ImportError:
from threading import RLock
try:
from collections import OrderedDict as _OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict as _OrderedDict
except ImportError:
_OrderedDict = None
#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) )
__all__ = [
'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty',
'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal',
'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or',
'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException',
'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException',
'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter',
'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore',
'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col',
'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString',
'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums',
'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno',
'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral',
'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables',
'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity',
'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd',
'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute',
'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass',
'CloseMatch', 'tokenMap', 'pyparsing_common',
]
system_version = tuple(sys.version_info)[:3]
PY_3 = system_version[0] == 3
if PY_3:
_MAX_INT = sys.maxsize
basestring = str
unichr = chr
_ustr = str
# build list of single arg builtins, that can be used as parse actions
singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max]
else:
_MAX_INT = sys.maxint
range = xrange
def _ustr(obj):
"""Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries
str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It
then < returns the unicode object | encodes it with the default encoding | ... >.
"""
if isinstance(obj,unicode):
return obj
try:
# If this works, then _ustr(obj) has the same behaviour as str(obj), so
# it won't break any existing code.
return str(obj)
except UnicodeEncodeError:
# Else encode it
ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace')
xmlcharref = Regex('&#\d+;')
xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:])
return xmlcharref.transformString(ret)
# build list of single arg builtins, tolerant of Python version, that can be used as parse actions
singleArgBuiltins = []
import __builtin__
for fname in "sum len sorted reversed list tuple set any all min max".split():
try:
singleArgBuiltins.append(getattr(__builtin__,fname))
except AttributeError:
continue
_generatorType = type((y for y in range(1)))
def _xml_escape(data):
"""Escape &, <, >, ", ', etc. in a string of data."""
# ampersand must be replaced first
from_symbols = '&><"\''
to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split())
for from_,to_ in zip(from_symbols, to_symbols):
data = data.replace(from_, to_)
return data
class _Constants(object):
pass
alphas = string.ascii_uppercase + string.ascii_lowercase
nums = "0123456789"
hexnums = nums + "ABCDEFabcdef"
alphanums = alphas + nums
_bslash = chr(92)
printables = "".join(c for c in string.printable if c not in string.whitespace)
class ParseBaseException(Exception):
"""base exception class for all parsing runtime exceptions"""
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, pstr, loc=0, msg=None, elem=None ):
self.loc = loc
if msg is None:
self.msg = pstr
self.pstr = ""
else:
self.msg = msg
self.pstr = pstr
self.parserElement = elem
self.args = (pstr, loc, msg)
@classmethod
def _from_exception(cls, pe):
"""
internal factory method to simplify creating one type of ParseException
from another - avoids having __init__ signature conflicts among subclasses
"""
return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement)
def __getattr__( self, aname ):
"""supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
"""
if( aname == "lineno" ):
return lineno( self.loc, self.pstr )
elif( aname in ("col", "column") ):
return col( self.loc, self.pstr )
elif( aname == "line" ):
return line( self.loc, self.pstr )
else:
raise AttributeError(aname)
def __str__( self ):
return "%s (at char %d), (line:%d, col:%d)" % \
( self.msg, self.loc, self.lineno, self.column )
def __repr__( self ):
return _ustr(self)
def markInputline( self, markerString = ">!<" ):
"""Extracts the exception line from the input string, and marks
the location of the exception with a special symbol.
"""
line_str = self.line
line_column = self.column - 1
if markerString:
line_str = "".join((line_str[:line_column],
markerString, line_str[line_column:]))
return line_str.strip()
def __dir__(self):
return "lineno col line".split() + dir(type(self))
class ParseException(ParseBaseException):
"""
Exception thrown when parse expressions don't match class;
supported attributes by name are:
- lineno - returns the line number of the exception text
- col - returns the column number of the exception text
- line - returns the line containing the exception text
Example::
try:
Word(nums).setName("integer").parseString("ABC")
except ParseException as pe:
print(pe)
print("column: {}".format(pe.col))
prints::
Expected integer (at char 0), (line:1, col:1)
column: 1
"""
pass
class ParseFatalException(ParseBaseException):
"""user-throwable exception thrown when inconsistent parse content
is found; stops all parsing immediately"""
pass
class ParseSyntaxException(ParseFatalException):
"""just like L{ParseFatalException}, but thrown internally when an
L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop
immediately because an unbacktrackable syntax error has been found"""
pass
#~ class ReparseException(ParseBaseException):
#~ """Experimental class - parse actions can raise this exception to cause
#~ pyparsing to reparse the input string:
#~ - with a modified input string, and/or
#~ - with a modified start location
#~ Set the values of the ReparseException in the constructor, and raise the
#~ exception in a parse action to cause pyparsing to use the new string/location.
#~ Setting the values as None causes no change to be made.
#~ """
#~ def __init_( self, newstring, restartLoc ):
#~ self.newParseText = newstring
#~ self.reparseLoc = restartLoc
class RecursiveGrammarException(Exception):
"""exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive"""
def __init__( self, parseElementList ):
self.parseElementTrace = parseElementList
def __str__( self ):
return "RecursiveGrammarException: %s" % self.parseElementTrace
class _ParseResultsWithOffset(object):
def __init__(self,p1,p2):
self.tup = (p1,p2)
def __getitem__(self,i):
return self.tup[i]
def __repr__(self):
return repr(self.tup[0])
def setOffset(self,i):
self.tup = (self.tup[0],i)
class ParseResults(object):
"""
Structured parse results, to provide multiple means of access to the parsed data:
- as a list (C{len(results)})
- by list index (C{results[0], results[1]}, etc.)
- by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName})
Example::
integer = Word(nums)
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
# date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
# parseString returns a ParseResults object
result = date_str.parseString("1999/12/31")
def test(s, fn=repr):
print("%s -> %s" % (s, fn(eval(s))))
test("list(result)")
test("result[0]")
test("result['month']")
test("result.day")
test("'month' in result")
test("'minutes' in result")
test("result.dump()", str)
prints::
list(result) -> ['1999', '/', '12', '/', '31']
result[0] -> '1999'
result['month'] -> '12'
result.day -> '31'
'month' in result -> True
'minutes' in result -> False
result.dump() -> ['1999', '/', '12', '/', '31']
- day: 31
- month: 12
- year: 1999
"""
def __new__(cls, toklist=None, name=None, asList=True, modal=True ):
if isinstance(toklist, cls):
return toklist
retobj = object.__new__(cls)
retobj.__doinit = True
return retobj
# Performance tuning: we construct a *lot* of these, so keep this
# constructor as small and fast as possible
def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ):
if self.__doinit:
self.__doinit = False
self.__name = None
self.__parent = None
self.__accumNames = {}
self.__asList = asList
self.__modal = modal
if toklist is None:
toklist = []
if isinstance(toklist, list):
self.__toklist = toklist[:]
elif isinstance(toklist, _generatorType):
self.__toklist = list(toklist)
else:
self.__toklist = [toklist]
self.__tokdict = dict()
if name is not None and name:
if not modal:
self.__accumNames[name] = 0
if isinstance(name,int):
name = _ustr(name) # will always return a str, but use _ustr for consistency
self.__name = name
if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])):
if isinstance(toklist,basestring):
toklist = [ toklist ]
if asList:
if isinstance(toklist,ParseResults):
self[name] = _ParseResultsWithOffset(toklist.copy(),0)
else:
self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0)
self[name].__name = name
else:
try:
self[name] = toklist[0]
except (KeyError,TypeError,IndexError):
self[name] = toklist
def __getitem__( self, i ):
if isinstance( i, (int,slice) ):
return self.__toklist[i]
else:
if i not in self.__accumNames:
return self.__tokdict[i][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[i] ])
def __setitem__( self, k, v, isinstance=isinstance ):
if isinstance(v,_ParseResultsWithOffset):
self.__tokdict[k] = self.__tokdict.get(k,list()) + [v]
sub = v[0]
elif isinstance(k,(int,slice)):
self.__toklist[k] = v
sub = v
else:
self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)]
sub = v
if isinstance(sub,ParseResults):
sub.__parent = wkref(self)
def __delitem__( self, i ):
if isinstance(i,(int,slice)):
mylen = len( self.__toklist )
del self.__toklist[i]
# convert int to slice
if isinstance(i, int):
if i < 0:
i += mylen
i = slice(i, i+1)
# get removed indices
removed = list(range(*i.indices(mylen)))
removed.reverse()
# fixup indices in token dictionary
for name,occurrences in self.__tokdict.items():
for j in removed:
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position - (position > j))
else:
del self.__tokdict[i]
def __contains__( self, k ):
return k in self.__tokdict
def __len__( self ): return len( self.__toklist )
def __bool__(self): return ( not not self.__toklist )
__nonzero__ = __bool__
def __iter__( self ): return iter( self.__toklist )
def __reversed__( self ): return iter( self.__toklist[::-1] )
def _iterkeys( self ):
if hasattr(self.__tokdict, "iterkeys"):
return self.__tokdict.iterkeys()
else:
return iter(self.__tokdict)
def _itervalues( self ):
return (self[k] for k in self._iterkeys())
def _iteritems( self ):
return ((k, self[k]) for k in self._iterkeys())
if PY_3:
keys = _iterkeys
"""Returns an iterator of all named result keys (Python 3.x only)."""
values = _itervalues
"""Returns an iterator of all named result values (Python 3.x only)."""
items = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 3.x only)."""
else:
iterkeys = _iterkeys
"""Returns an iterator of all named result keys (Python 2.x only)."""
itervalues = _itervalues
"""Returns an iterator of all named result values (Python 2.x only)."""
iteritems = _iteritems
"""Returns an iterator of all named result key-value tuples (Python 2.x only)."""
def keys( self ):
"""Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iterkeys())
def values( self ):
"""Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x)."""
return list(self.itervalues())
def items( self ):
"""Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x)."""
return list(self.iteritems())
def haskeys( self ):
"""Since keys() returns an iterator, this method is helpful in bypassing
code that looks for the existence of any defined results names."""
return bool(self.__tokdict)
def pop( self, *args, **kwargs):
"""
Removes and returns item at specified index (default=C{last}).
Supports both C{list} and C{dict} semantics for C{pop()}. If passed no
argument or an integer argument, it will use C{list} semantics
and pop tokens from the list of parsed tokens. If passed a
non-integer argument (most likely a string), it will use C{dict}
semantics and pop the corresponding value from any defined
results names. A second default return value argument is
supported, just as in C{dict.pop()}.
Example::
def remove_first(tokens):
tokens.pop(0)
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321']
label = Word(alphas)
patt = label("LABEL") + OneOrMore(Word(nums))
print(patt.parseString("AAB 123 321").dump())
# Use pop() in a parse action to remove named result (note that corresponding value is not
# removed from list form of results)
def remove_LABEL(tokens):
tokens.pop("LABEL")
return tokens
patt.addParseAction(remove_LABEL)
print(patt.parseString("AAB 123 321").dump())
prints::
['AAB', '123', '321']
- LABEL: AAB
['AAB', '123', '321']
"""
if not args:
args = [-1]
for k,v in kwargs.items():
if k == 'default':
args = (args[0], v)
else:
raise TypeError("pop() got an unexpected keyword argument '%s'" % k)
if (isinstance(args[0], int) or
len(args) == 1 or
args[0] in self):
index = args[0]
ret = self[index]
del self[index]
return ret
else:
defaultvalue = args[1]
return defaultvalue
def get(self, key, defaultValue=None):
"""
Returns named result matching the given key, or if there is no
such name, then returns the given C{defaultValue} or C{None} if no
C{defaultValue} is specified.
Similar to C{dict.get()}.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString("1999/12/31")
print(result.get("year")) # -> '1999'
print(result.get("hour", "not specified")) # -> 'not specified'
print(result.get("hour")) # -> None
"""
if key in self:
return self[key]
else:
return defaultValue
def insert( self, index, insStr ):
"""
Inserts new element at location index in the list of parsed tokens.
Similar to C{list.insert()}.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to insert the parse location in the front of the parsed results
def insert_locn(locn, tokens):
tokens.insert(0, locn)
print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321']
"""
self.__toklist.insert(index, insStr)
# fixup indices in token dictionary
for name,occurrences in self.__tokdict.items():
for k, (value, position) in enumerate(occurrences):
occurrences[k] = _ParseResultsWithOffset(value, position + (position > index))
def append( self, item ):
"""
Add single element to end of ParseResults list of elements.
Example::
print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321']
# use a parse action to compute the sum of the parsed integers, and add it to the end
def append_sum(tokens):
tokens.append(sum(map(int, tokens)))
print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444]
"""
self.__toklist.append(item)
def extend( self, itemseq ):
"""
Add sequence of elements to end of ParseResults list of elements.
Example::
patt = OneOrMore(Word(alphas))
# use a parse action to append the reverse of the matched strings, to make a palindrome
def make_palindrome(tokens):
tokens.extend(reversed([t[::-1] for t in tokens]))
return ''.join(tokens)
print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
"""
if isinstance(itemseq, ParseResults):
self += itemseq
else:
self.__toklist.extend(itemseq)
def clear( self ):
"""
Clear all elements and results names.
"""
del self.__toklist[:]
self.__tokdict.clear()
def __getattr__( self, name ):
try:
return self[name]
except KeyError:
return ""
if name in self.__tokdict:
if name not in self.__accumNames:
return self.__tokdict[name][-1][0]
else:
return ParseResults([ v[0] for v in self.__tokdict[name] ])
else:
return ""
def __add__( self, other ):
ret = self.copy()
ret += other
return ret
def __iadd__( self, other ):
if other.__tokdict:
offset = len(self.__toklist)
addoffset = lambda a: offset if a<0 else a+offset
otheritems = other.__tokdict.items()
otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) )
for (k,vlist) in otheritems for v in vlist]
for k,v in otherdictitems:
self[k] = v
if isinstance(v[0],ParseResults):
v[0].__parent = wkref(self)
self.__toklist += other.__toklist
self.__accumNames.update( other.__accumNames )
return self
def __radd__(self, other):
if isinstance(other,int) and other == 0:
# useful for merging many ParseResults using sum() builtin
return self.copy()
else:
# this may raise a TypeError - so be it
return other + self
def __repr__( self ):
return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) )
def __str__( self ):
return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']'
def _asStringList( self, sep='' ):
out = []
for item in self.__toklist:
if out and sep:
out.append(sep)
if isinstance( item, ParseResults ):
out += item._asStringList()
else:
out.append( _ustr(item) )
return out
def asList( self ):
"""
Returns the parse results as a nested list of matching tokens, all converted to strings.
Example::
patt = OneOrMore(Word(alphas))
result = patt.parseString("sldkj lsdkj sldkj")
# even though the result prints in string-like form, it is actually a pyparsing ParseResults
print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj']
# Use asList() to create an actual list
result_list = result.asList()
print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj']
"""
return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist]
def asDict( self ):
"""
Returns the named parse results as a nested dictionary.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
result_dict = result.asDict()
print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'}
# even though a ParseResults supports dict-like access, sometime you just need to have a dict
import json
print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"}
"""
if PY_3:
item_fn = self.items
else:
item_fn = self.iteritems
def toItem(obj):
if isinstance(obj, ParseResults):
if obj.haskeys():
return obj.asDict()
else:
return [toItem(v) for v in obj]
else:
return obj
return dict((k,toItem(v)) for k,v in item_fn())
def copy( self ):
"""
Returns a new copy of a C{ParseResults} object.
"""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret
def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ):
"""
(Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names.
"""
nl = "\n"
out = []
namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items()
for v in vlist)
nextLevelIndent = indent + " "
# collapse out indents if formatting is not desired
if not formatted:
indent = ""
nextLevelIndent = ""
nl = ""
selfTag = None
if doctag is not None:
selfTag = doctag
else:
if self.__name:
selfTag = self.__name
if not selfTag:
if namedItemsOnly:
return ""
else:
selfTag = "ITEM"
out += [ nl, indent, "<", selfTag, ">" ]
for i,res in enumerate(self.__toklist):
if isinstance(res,ParseResults):
if i in namedItems:
out += [ res.asXML(namedItems[i],
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
out += [ res.asXML(None,
namedItemsOnly and doctag is None,
nextLevelIndent,
formatted)]
else:
# individual token, see if there is a name for it
resTag = None
if i in namedItems:
resTag = namedItems[i]
if not resTag:
if namedItemsOnly:
continue
else:
resTag = "ITEM"
xmlBodyText = _xml_escape(_ustr(res))
out += [ nl, nextLevelIndent, "<", resTag, ">",
xmlBodyText,
"</", resTag, ">" ]
out += [ nl, indent, "</", selfTag, ">" ]
return "".join(out)
def __lookup(self,sub):
for k,vlist in self.__tokdict.items():
for v,loc in vlist:
if sub is v:
return k
return None
def getName(self):
"""
Returns the results name for this token expression. Useful when several
different expressions might match at a particular location.
Example::
integer = Word(nums)
ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
house_number_expr = Suppress('#') + Word(nums, alphanums)
user_data = (Group(house_number_expr)("house_number")
| Group(ssn_expr)("ssn")
| Group(integer)("age"))
user_info = OneOrMore(user_data)
result = user_info.parseString("22 111-22-3333 #221B")
for item in result:
print(item.getName(), ':', item[0])
prints::
age : 22
ssn : 111-22-3333
house_number : 221B
"""
if self.__name:
return self.__name
elif self.__parent:
par = self.__parent()
if par:
return par.__lookup(self)
else:
return None
elif (len(self) == 1 and
len(self.__tokdict) == 1 and
next(iter(self.__tokdict.values()))[0][1] in (0,-1)):
return next(iter(self.__tokdict.keys()))
else:
return None
def dump(self, indent='', depth=0, full=True):
"""
Diagnostic method for listing out the contents of a C{ParseResults}.
Accepts an optional C{indent} argument so that this string can be embedded
in a nested display of other data.
Example::
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
result = date_str.parseString('12/31/1999')
print(result.dump())
prints::
['12', '/', '31', '/', '1999']
- day: 1999
- month: 31
- year: 12
"""
out = []
NL = '\n'
out.append( indent+_ustr(self.asList()) )
if full:
if self.haskeys():
items = sorted((str(k), v) for k,v in self.items())
for k,v in items:
if out:
out.append(NL)
out.append( "%s%s- %s: " % (indent,(' '*depth), k) )
if isinstance(v,ParseResults):
if v:
out.append( v.dump(indent,depth+1) )
else:
out.append(_ustr(v))
else:
out.append(repr(v))
elif any(isinstance(vv,ParseResults) for vv in self):
v = self
for i,vv in enumerate(v):
if isinstance(vv,ParseResults):
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) ))
else:
out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv)))
return "".join(out)
def pprint(self, *args, **kwargs):
"""
Pretty-printer for parsed results as a list, using the C{pprint} module.
Accepts additional positional or keyword args as defined for the
C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint})
Example::
ident = Word(alphas, alphanums)
num = Word(nums)
func = Forward()
term = ident | num | Group('(' + func + ')')
func <<= ident + Group(Optional(delimitedList(term)))
result = func.parseString("fna a,b,(fnb c,d,200),100")
result.pprint(width=40)
prints::
['fna',
['a',
'b',
['(', 'fnb', ['c', 'd', '200'], ')'],
'100']]
"""
pprint.pprint(self.asList(), *args, **kwargs)
# add support for pickle protocol
def __getstate__(self):
return ( self.__toklist,
( self.__tokdict.copy(),
self.__parent is not None and self.__parent() or None,
self.__accumNames,
self.__name ) )
def __setstate__(self,state):
self.__toklist = state[0]
(self.__tokdict,
par,
inAccumNames,
self.__name) = state[1]
self.__accumNames = {}
self.__accumNames.update(inAccumNames)
if par is not None:
self.__parent = wkref(par)
else:
self.__parent = None
def __getnewargs__(self):
return self.__toklist, self.__name, self.__asList, self.__modal
def __dir__(self):
return (dir(type(self)) + list(self.keys()))
collections.MutableMapping.register(ParseResults)
def col (loc,strg):
"""Returns current column within a string, counting newlines as line separators.
The first column is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
s = strg
return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc)
def lineno(loc,strg):
"""Returns current line number within a string, counting newlines as line separators.
The first line is number 1.
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
"""
return strg.count("\n",0,loc) + 1
def line( loc, strg ):
"""Returns the line of text containing loc within a string, counting newlines as line separators.
"""
lastCR = strg.rfind("\n", 0, loc)
nextCR = strg.find("\n", loc)
if nextCR >= 0:
return strg[lastCR+1:nextCR]
else:
return strg[lastCR+1:]
def _defaultStartDebugAction( instring, loc, expr ):
print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )))
def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ):
print ("Matched " + _ustr(expr) + " -> " + str(toks.asList()))
def _defaultExceptionDebugAction( instring, loc, expr, exc ):
print ("Exception raised:" + _ustr(exc))
def nullDebugAction(*args):
"""'Do-nothing' debug action, to suppress debugging output during parsing."""
pass
# Only works on Python 3.x - nonlocal is toxic to Python 2 installs
#~ 'decorator to trim function calls to match the arity of the target'
#~ def _trim_arity(func, maxargs=3):
#~ if func in singleArgBuiltins:
#~ return lambda s,l,t: func(t)
#~ limit = 0
#~ foundArity = False
#~ def wrapper(*args):
#~ nonlocal limit,foundArity
#~ while 1:
#~ try:
#~ ret = func(*args[limit:])
#~ foundArity = True
#~ return ret
#~ except TypeError:
#~ if limit == maxargs or foundArity:
#~ raise
#~ limit += 1
#~ continue
#~ return wrapper
# this version is Python 2.x-3.x cross-compatible
'decorator to trim function calls to match the arity of the target'
def _trim_arity(func, maxargs=2):
if func in singleArgBuiltins:
return lambda s,l,t: func(t)
limit = [0]
foundArity = [False]
# traceback return data structure changed in Py3.5 - normalize back to plain tuples
if system_version[:2] >= (3,5):
def extract_stack(limit=0):
# special handling for Python 3.5.0 - extra deep call stack by 1
offset = -3 if system_version == (3,5,0) else -2
frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset]
return [(frame_summary.filename, frame_summary.lineno)]
def extract_tb(tb, limit=0):
frames = traceback.extract_tb(tb, limit=limit)
frame_summary = frames[-1]
return [(frame_summary.filename, frame_summary.lineno)]
else:
extract_stack = traceback.extract_stack
extract_tb = traceback.extract_tb
# synthesize what would be returned by traceback.extract_stack at the call to
# user's parse action 'func', so that we don't incur call penalty at parse time
LINE_DIFF = 6
# IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND
# THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!!
this_line = extract_stack(limit=2)[-1]
pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF)
def wrapper(*args):
while 1:
try:
ret = func(*args[limit[0]:])
foundArity[0] = True
return ret
except TypeError:
# re-raise TypeErrors if they did not come from our arity testing
if foundArity[0]:
raise
else:
try:
tb = sys.exc_info()[-1]
if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth:
raise
finally:
del tb
if limit[0] <= maxargs:
limit[0] += 1
continue
raise
# copy func name to wrapper for sensible debug output
func_name = "<parse action>"
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
wrapper.__name__ = func_name
return wrapper
class ParserElement(object):
"""Abstract base level parser element class."""
DEFAULT_WHITE_CHARS = " \n\t\r"
verbose_stacktrace = False
@staticmethod
def setDefaultWhitespaceChars( chars ):
r"""
Overrides the default whitespace chars
Example::
# default whitespace chars are space, <TAB> and newline
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl']
# change to just treat newline as significant
ParserElement.setDefaultWhitespaceChars(" \t")
OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def']
"""
ParserElement.DEFAULT_WHITE_CHARS = chars
@staticmethod
def inlineLiteralsUsing(cls):
"""
Set class to be used for inclusion of string literals into a parser.
Example::
# default literal class used is Literal
integer = Word(nums)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# change to Suppress
ParserElement.inlineLiteralsUsing(Suppress)
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
date_str.parseString("1999/12/31") # -> ['1999', '12', '31']
"""
ParserElement._literalStringClass = cls
def __init__( self, savelist=False ):
self.parseAction = list()
self.failAction = None
#~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall
self.strRepr = None
self.resultsName = None
self.saveAsList = savelist
self.skipWhitespace = True
self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
self.copyDefaultWhiteChars = True
self.mayReturnEmpty = False # used when checking for left-recursion
self.keepTabs = False
self.ignoreExprs = list()
self.debug = False
self.streamlined = False
self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index
self.errmsg = ""
self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all)
self.debugActions = ( None, None, None ) #custom debug actions
self.re = None
self.callPreparse = True # used to avoid redundant calls to preParse
self.callDuringTry = False
def copy( self ):
"""
Make a copy of this C{ParserElement}. Useful for defining different parse actions
for the same parsing pattern, using copies of the original parse element.
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K")
integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M"))
prints::
[5120, 100, 655360, 268435456]
Equivalent form of C{expr.copy()} is just C{expr()}::
integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M")
"""
cpy = copy.copy( self )
cpy.parseAction = self.parseAction[:]
cpy.ignoreExprs = self.ignoreExprs[:]
if self.copyDefaultWhiteChars:
cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS
return cpy
def setName( self, name ):
"""
Define name for this expression, makes debugging and exception messages clearer.
Example::
Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1)
Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1)
"""
self.name = name
self.errmsg = "Expected " + self.name
if hasattr(self,"exception"):
self.exception.msg = self.errmsg
return self
def setResultsName( self, name, listAllMatches=False ):
"""
Define name for referencing matching tokens as a nested attribute
of the returned parse results.
NOTE: this returns a *copy* of the original C{ParserElement} object;
this is so that the client can define a basic element, such as an
integer, and reference it in multiple places with different names.
You can also set results names using the abbreviated syntax,
C{expr("name")} in place of C{expr.setResultsName("name")} -
see L{I{__call__}<__call__>}.
Example::
date_str = (integer.setResultsName("year") + '/'
+ integer.setResultsName("month") + '/'
+ integer.setResultsName("day"))
# equivalent form:
date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
"""
newself = self.copy()
if name.endswith("*"):
name = name[:-1]
listAllMatches=True
newself.resultsName = name
newself.modalResults = not listAllMatches
return newself
def setBreak(self,breakFlag = True):
"""Method to invoke the Python pdb debugger when this element is
about to be parsed. Set C{breakFlag} to True to enable, False to
disable.
"""
if breakFlag:
_parseMethod = self._parse
def breaker(instring, loc, doActions=True, callPreParse=True):
import pdb
pdb.set_trace()
return _parseMethod( instring, loc, doActions, callPreParse )
breaker._originalParseMethod = _parseMethod
self._parse = breaker
else:
if hasattr(self._parse,"_originalParseMethod"):
self._parse = self._parse._originalParseMethod
return self
def setParseAction( self, *fns, **kwargs ):
"""
Define action to perform when successfully matching parse element definition.
Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)},
C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where:
- s = the original string being parsed (see note below)
- loc = the location of the matching substring
- toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object
If the functions in fns modify the tokens, they can return them as the return
value from fn, and the modified list of tokens will replace the original.
Otherwise, fn does not need to return any value.
Optional keyword arguments:
- callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing
Note: the default parsing behavior is to expand tabs in the input string
before starting the parsing process. See L{I{parseString}<parseString>} for more information
on parsing strings containing C{<TAB>}s, and suggested methods to maintain a
consistent view of the parsed string, the parse location, and line and column
positions within the parsed string.
Example::
integer = Word(nums)
date_str = integer + '/' + integer + '/' + integer
date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31']
# use parse action to convert to ints at parse time
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
date_str = integer + '/' + integer + '/' + integer
# note that integer fields are now ints, not strings
date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31]
"""
self.parseAction = list(map(_trim_arity, list(fns)))
self.callDuringTry = kwargs.get("callDuringTry", False)
return self
def addParseAction( self, *fns, **kwargs ):
"""
Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}.
See examples in L{I{copy}<copy>}.
"""
self.parseAction += list(map(_trim_arity, list(fns)))
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def addCondition(self, *fns, **kwargs):
"""Add a boolean predicate function to expression's list of parse actions. See
L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction},
functions passed to C{addCondition} need to return boolean success/fail of the condition.
Optional keyword arguments:
- message = define a custom message to be used in the raised exception
- fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException
Example::
integer = Word(nums).setParseAction(lambda toks: int(toks[0]))
year_int = integer.copy()
year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later")
date_str = year_int + '/' + integer + '/' + integer
result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1)
"""
msg = kwargs.get("message", "failed user-defined condition")
exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException
for fn in fns:
def pa(s,l,t):
if not bool(_trim_arity(fn)(s,l,t)):
raise exc_type(s,l,msg)
self.parseAction.append(pa)
self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False)
return self
def setFailAction( self, fn ):
"""Define action to perform if parsing fails at this expression.
Fail acton fn is a callable function that takes the arguments
C{fn(s,loc,expr,err)} where:
- s = string being parsed
- loc = location where expression match was attempted and failed
- expr = the parse expression that failed
- err = the exception thrown
The function returns no value. It may throw C{L{ParseFatalException}}
if it is desired to stop parsing immediately."""
self.failAction = fn
return self
def _skipIgnorables( self, instring, loc ):
exprsFound = True
while exprsFound:
exprsFound = False
for e in self.ignoreExprs:
try:
while 1:
loc,dummy = e._parse( instring, loc )
exprsFound = True
except ParseException:
pass
return loc
def preParse( self, instring, loc ):
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
if self.skipWhitespace:
wt = self.whiteChars
instrlen = len(instring)
while loc < instrlen and instring[loc] in wt:
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
return loc, []
def postParse( self, instring, loc, tokenlist ):
return tokenlist
#~ @profile
def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ):
debugging = ( self.debug ) #and doActions )
if debugging or self.failAction:
#~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))
if (self.debugActions[0] ):
self.debugActions[0]( instring, loc, self )
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
try:
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
except ParseBaseException as err:
#~ print ("Exception raised:", err)
if self.debugActions[2]:
self.debugActions[2]( instring, tokensStart, self, err )
if self.failAction:
self.failAction( instring, tokensStart, self, err )
raise
else:
if callPreParse and self.callPreparse:
preloc = self.preParse( instring, loc )
else:
preloc = loc
tokensStart = preloc
if self.mayIndexError or loc >= len(instring):
try:
loc,tokens = self.parseImpl( instring, preloc, doActions )
except IndexError:
raise ParseException( instring, len(instring), self.errmsg, self )
else:
loc,tokens = self.parseImpl( instring, preloc, doActions )
tokens = self.postParse( instring, loc, tokens )
retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults )
if self.parseAction and (doActions or self.callDuringTry):
if debugging:
try:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
except ParseBaseException as err:
#~ print "Exception raised in user parse action:", err
if (self.debugActions[2] ):
self.debugActions[2]( instring, tokensStart, self, err )
raise
else:
for fn in self.parseAction:
tokens = fn( instring, tokensStart, retTokens )
if tokens is not None:
retTokens = ParseResults( tokens,
self.resultsName,
asList=self.saveAsList and isinstance(tokens,(ParseResults,list)),
modal=self.modalResults )
if debugging:
#~ print ("Matched",self,"->",retTokens.asList())
if (self.debugActions[1] ):
self.debugActions[1]( instring, tokensStart, loc, self, retTokens )
return loc, retTokens
def tryParse( self, instring, loc ):
try:
return self._parse( instring, loc, doActions=False )[0]
except ParseFatalException:
raise ParseException( instring, loc, self.errmsg, self)
def canParseNext(self, instring, loc):
try:
self.tryParse(instring, loc)
except (ParseException, IndexError):
return False
else:
return True
class _UnboundedCache(object):
def __init__(self):
cache = {}
self.not_in_cache = not_in_cache = object()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
def clear(self):
cache.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
if _OrderedDict is not None:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = _OrderedDict()
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
if len(cache) > size:
cache.popitem(False)
def clear(self):
cache.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
else:
class _FifoCache(object):
def __init__(self, size):
self.not_in_cache = not_in_cache = object()
cache = {}
key_fifo = collections.deque([], size)
def get(self, key):
return cache.get(key, not_in_cache)
def set(self, key, value):
cache[key] = value
if len(cache) > size:
cache.pop(key_fifo.popleft(), None)
key_fifo.append(key)
def clear(self):
cache.clear()
key_fifo.clear()
self.get = types.MethodType(get, self)
self.set = types.MethodType(set, self)
self.clear = types.MethodType(clear, self)
# argument cache for optimizing repeated calls when backtracking through recursive expressions
packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail
packrat_cache_lock = RLock()
packrat_cache_stats = [0, 0]
# this method gets repeatedly called during backtracking with the same arguments -
# we can cache these arguments and save ourselves the trouble of re-parsing the contained expression
def _parseCache( self, instring, loc, doActions=True, callPreParse=True ):
HIT, MISS = 0, 1
lookup = (self, instring, loc, callPreParse, doActions)
with ParserElement.packrat_cache_lock:
cache = ParserElement.packrat_cache
value = cache.get(lookup)
if value is cache.not_in_cache:
ParserElement.packrat_cache_stats[MISS] += 1
try:
value = self._parseNoCache(instring, loc, doActions, callPreParse)
except ParseBaseException as pe:
# cache a copy of the exception, without the traceback
cache.set(lookup, pe.__class__(*pe.args))
raise
else:
cache.set(lookup, (value[0], value[1].copy()))
return value
else:
ParserElement.packrat_cache_stats[HIT] += 1
if isinstance(value, Exception):
raise value
return (value[0], value[1].copy())
_parse = _parseNoCache
@staticmethod
def resetCache():
ParserElement.packrat_cache.clear()
ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats)
_packratEnabled = False
@staticmethod
def enablePackrat(cache_size_limit=128):
"""Enables "packrat" parsing, which adds memoizing to the parsing logic.
Repeated parse attempts at the same string location (which happens
often in many complex grammars) can immediately return a cached value,
instead of re-executing parsing/validating code. Memoizing is done of
both valid results and parsing exceptions.
Parameters:
- cache_size_limit - (default=C{128}) - if an integer value is provided
will limit the size of the packrat cache; if None is passed, then
the cache size will be unbounded; if 0 is passed, the cache will
be effectively disabled.
This speedup may break existing programs that use parse actions that
have side-effects. For this reason, packrat parsing is disabled when
you first import pyparsing. To activate the packrat feature, your
program must call the class method C{ParserElement.enablePackrat()}. If
your program uses C{psyco} to "compile as you go", you must call
C{enablePackrat} before calling C{psyco.full()}. If you do not do this,
Python will crash. For best results, call C{enablePackrat()} immediately
after importing pyparsing.
Example::
import pyparsing
pyparsing.ParserElement.enablePackrat()
"""
if not ParserElement._packratEnabled:
ParserElement._packratEnabled = True
if cache_size_limit is None:
ParserElement.packrat_cache = ParserElement._UnboundedCache()
else:
ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit)
ParserElement._parse = ParserElement._parseCache
def parseString( self, instring, parseAll=False ):
"""
Execute the parse expression with the given string.
This is the main interface to the client code, once the complete
expression has been built.
If you want the grammar to require that the entire input string be
successfully parsed, then set C{parseAll} to True (equivalent to ending
the grammar with C{L{StringEnd()}}).
Note: C{parseString} implicitly calls C{expandtabs()} on the input string,
in order to report proper column numbers in parse actions.
If the input string contains tabs and
the grammar uses parse actions that use the C{loc} argument to index into the
string being parsed, you can ensure you have a consistent view of the input
string by:
- calling C{parseWithTabs} on your grammar before calling C{parseString}
(see L{I{parseWithTabs}<parseWithTabs>})
- define your parse action using the full C{(s,loc,toks)} signature, and
reference the input string using the parse action's C{s} argument
- explictly expand the tabs in your input string before calling
C{parseString}
Example::
Word('a').parseString('aaaaabaaa') # -> ['aaaaa']
Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text
"""
ParserElement.resetCache()
if not self.streamlined:
self.streamline()
#~ self.saveAsList = True
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = instring.expandtabs()
try:
loc, tokens = self._parse( instring, 0 )
if parseAll:
loc = self.preParse( instring, loc )
se = Empty() + StringEnd()
se._parse( instring, loc )
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
else:
return tokens
def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ):
"""
Scan the input string for expression matches. Each match will return the
matching tokens, start location, and end location. May be called with optional
C{maxMatches} argument, to clip scanning after 'n' matches are found. If
C{overlap} is specified, then overlapping matches will be reported.
Note that the start and end locations are reported relative to the string
being parsed. See L{I{parseString}<parseString>} for more information on parsing
strings with embedded tabs.
Example::
source = "sldjf123lsdjjkf345sldkjf879lkjsfd987"
print(source)
for tokens,start,end in Word(alphas).scanString(source):
print(' '*start + '^'*(end-start))
print(' '*start + tokens[0])
prints::
sldjf123lsdjjkf345sldkjf879lkjsfd987
^^^^^
sldjf
^^^^^^^
lsdjjkf
^^^^^^
sldkjf
^^^^^^
lkjsfd
"""
if not self.streamlined:
self.streamline()
for e in self.ignoreExprs:
e.streamline()
if not self.keepTabs:
instring = _ustr(instring).expandtabs()
instrlen = len(instring)
loc = 0
preparseFn = self.preParse
parseFn = self._parse
ParserElement.resetCache()
matches = 0
try:
while loc <= instrlen and matches < maxMatches:
try:
preloc = preparseFn( instring, loc )
nextLoc,tokens = parseFn( instring, preloc, callPreParse=False )
except ParseException:
loc = preloc+1
else:
if nextLoc > loc:
matches += 1
yield tokens, preloc, nextLoc
if overlap:
nextloc = preparseFn( instring, loc )
if nextloc > loc:
loc = nextLoc
else:
loc += 1
else:
loc = nextLoc
else:
loc = preloc+1
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def transformString( self, instring ):
"""
Extension to C{L{scanString}}, to modify matching text with modified tokens that may
be returned from a parse action. To use C{transformString}, define a grammar and
attach a parse action to it that modifies the returned token list.
Invoking C{transformString()} on a target string will then scan for matches,
and replace the matched text patterns according to the logic in the parse
action. C{transformString()} returns the resulting transformed string.
Example::
wd = Word(alphas)
wd.setParseAction(lambda toks: toks[0].title())
print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york."))
Prints::
Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York.
"""
out = []
lastE = 0
# force preservation of <TAB>s, to minimize unwanted transformation of string, and to
# keep string locs straight between transformString and scanString
self.keepTabs = True
try:
for t,s,e in self.scanString( instring ):
out.append( instring[lastE:s] )
if t:
if isinstance(t,ParseResults):
out += t.asList()
elif isinstance(t,list):
out += t
else:
out.append(t)
lastE = e
out.append(instring[lastE:])
out = [o for o in out if o]
return "".join(map(_ustr,_flatten(out)))
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def searchString( self, instring, maxMatches=_MAX_INT ):
"""
Another extension to C{L{scanString}}, simplifying the access to the tokens found
to match the given parse expression. May be called with optional
C{maxMatches} argument, to clip searching after 'n' matches are found.
Example::
# a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters
cap_word = Word(alphas.upper(), alphas.lower())
print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity"))
prints::
['More', 'Iron', 'Lead', 'Gold', 'I']
"""
try:
return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ])
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False):
"""
Generator method to split a string using the given expression as a separator.
May be called with optional C{maxsplit} argument, to limit the number of splits;
and the optional C{includeSeparators} argument (default=C{False}), if the separating
matching text should be included in the split results.
Example::
punc = oneOf(list(".,;:/-!?"))
print(list(punc.split("This, this?, this sentence, is badly punctuated!")))
prints::
['This', ' this', '', ' this sentence', ' is badly punctuated', '']
"""
splits = 0
last = 0
for t,s,e in self.scanString(instring, maxMatches=maxsplit):
yield instring[last:s]
if includeSeparators:
yield t[0]
last = e
yield instring[last:]
def __add__(self, other ):
"""
Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement
converts them to L{Literal}s by default.
Example::
greet = Word(alphas) + "," + Word(alphas) + "!"
hello = "Hello, World!"
print (hello, "->", greet.parseString(hello))
Prints::
Hello, World! -> ['Hello', ',', 'World', '!']
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, other ] )
def __radd__(self, other ):
"""
Implementation of + operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other + self
def __sub__(self, other):
"""
Implementation of - operator, returns C{L{And}} with error stop
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return And( [ self, And._ErrorStop(), other ] )
def __rsub__(self, other ):
"""
Implementation of - operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other - self
def __mul__(self,other):
"""
Implementation of * operator, allows use of C{expr * 3} in place of
C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer
tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples
may also include C{None} as in:
- C{expr*(n,None)} or C{expr*(n,)} is equivalent
to C{expr*n + L{ZeroOrMore}(expr)}
(read as "at least n instances of C{expr}")
- C{expr*(None,n)} is equivalent to C{expr*(0,n)}
(read as "0 to n instances of C{expr}")
- C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)}
- C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)}
Note that C{expr*(None,n)} does not raise an exception if
more than n exprs exist in the input stream; that is,
C{expr*(None,n)} does not enforce a maximum number of expr
occurrences. If this behavior is desired, then write
C{expr*(None,n) + ~expr}
"""
if isinstance(other,int):
minElements, optElements = other,0
elif isinstance(other,tuple):
other = (other + (None, None))[:2]
if other[0] is None:
other = (0, other[1])
if isinstance(other[0],int) and other[1] is None:
if other[0] == 0:
return ZeroOrMore(self)
if other[0] == 1:
return OneOrMore(self)
else:
return self*other[0] + ZeroOrMore(self)
elif isinstance(other[0],int) and isinstance(other[1],int):
minElements, optElements = other
optElements -= minElements
else:
raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1]))
else:
raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other))
if minElements < 0:
raise ValueError("cannot multiply ParserElement by negative value")
if optElements < 0:
raise ValueError("second tuple value must be greater or equal to first tuple value")
if minElements == optElements == 0:
raise ValueError("cannot multiply ParserElement by 0 or (0,0)")
if (optElements):
def makeOptionalList(n):
if n>1:
return Optional(self + makeOptionalList(n-1))
else:
return Optional(self)
if minElements:
if minElements == 1:
ret = self + makeOptionalList(optElements)
else:
ret = And([self]*minElements) + makeOptionalList(optElements)
else:
ret = makeOptionalList(optElements)
else:
if minElements == 1:
ret = self
else:
ret = And([self]*minElements)
return ret
def __rmul__(self, other):
return self.__mul__(other)
def __or__(self, other ):
"""
Implementation of | operator - returns C{L{MatchFirst}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return MatchFirst( [ self, other ] )
def __ror__(self, other ):
"""
Implementation of | operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other | self
def __xor__(self, other ):
"""
Implementation of ^ operator - returns C{L{Or}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Or( [ self, other ] )
def __rxor__(self, other ):
"""
Implementation of ^ operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other ^ self
def __and__(self, other ):
"""
Implementation of & operator - returns C{L{Each}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return Each( [ self, other ] )
def __rand__(self, other ):
"""
Implementation of & operator when left operand is not a C{L{ParserElement}}
"""
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
if not isinstance( other, ParserElement ):
warnings.warn("Cannot combine element of type %s with ParserElement" % type(other),
SyntaxWarning, stacklevel=2)
return None
return other & self
def __invert__( self ):
"""
Implementation of ~ operator - returns C{L{NotAny}}
"""
return NotAny( self )
def __call__(self, name=None):
"""
Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}.
If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be
passed as C{True}.
If C{name} is omitted, same as calling C{L{copy}}.
Example::
# these are equivalent
userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno")
userdata = Word(alphas)("name") + Word(nums+"-")("socsecno")
"""
if name is not None:
return self.setResultsName(name)
else:
return self.copy()
def suppress( self ):
"""
Suppresses the output of this C{ParserElement}; useful to keep punctuation from
cluttering up returned output.
"""
return Suppress( self )
def leaveWhitespace( self ):
"""
Disables the skipping of whitespace before matching the characters in the
C{ParserElement}'s defined pattern. This is normally only used internally by
the pyparsing module, but may be needed in some whitespace-sensitive grammars.
"""
self.skipWhitespace = False
return self
def setWhitespaceChars( self, chars ):
"""
Overrides the default whitespace chars
"""
self.skipWhitespace = True
self.whiteChars = chars
self.copyDefaultWhiteChars = False
return self
def parseWithTabs( self ):
"""
Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string.
Must be called before C{parseString} when the input grammar contains elements that
match C{<TAB>} characters.
"""
self.keepTabs = True
return self
def ignore( self, other ):
"""
Define expression to be ignored (e.g., comments) while doing pattern
matching; may be called repeatedly, to define multiple comment or other
ignorable patterns.
Example::
patt = OneOrMore(Word(alphas))
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj']
patt.ignore(cStyleComment)
patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd']
"""
if isinstance(other, basestring):
other = Suppress(other)
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
self.ignoreExprs.append(other)
else:
self.ignoreExprs.append( Suppress( other.copy() ) )
return self
def setDebugActions( self, startAction, successAction, exceptionAction ):
"""
Enable display of debugging messages while doing pattern matching.
"""
self.debugActions = (startAction or _defaultStartDebugAction,
successAction or _defaultSuccessDebugAction,
exceptionAction or _defaultExceptionDebugAction)
self.debug = True
return self
def setDebug( self, flag=True ):
"""
Enable display of debugging messages while doing pattern matching.
Set C{flag} to True to enable, False to disable.
Example::
wd = Word(alphas).setName("alphaword")
integer = Word(nums).setName("numword")
term = wd | integer
# turn on debugging for wd
wd.setDebug()
OneOrMore(term).parseString("abc 123 xyz 890")
prints::
Match alphaword at loc 0(1,1)
Matched alphaword -> ['abc']
Match alphaword at loc 3(1,4)
Exception raised:Expected alphaword (at char 4), (line:1, col:5)
Match alphaword at loc 7(1,8)
Matched alphaword -> ['xyz']
Match alphaword at loc 11(1,12)
Exception raised:Expected alphaword (at char 12), (line:1, col:13)
Match alphaword at loc 15(1,16)
Exception raised:Expected alphaword (at char 15), (line:1, col:16)
The output shown is that produced by the default debug actions - custom debug actions can be
specified using L{setDebugActions}. Prior to attempting
to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"}
is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"}
message is shown. Also note the use of L{setName} to assign a human-readable name to the expression,
which makes debugging and exception messages easier to understand - for instance, the default
name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}.
"""
if flag:
self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction )
else:
self.debug = False
return self
def __str__( self ):
return self.name
def __repr__( self ):
return _ustr(self)
def streamline( self ):
self.streamlined = True
self.strRepr = None
return self
def checkRecursion( self, parseElementList ):
pass
def validate( self, validateTrace=[] ):
"""
Check defined expressions for valid structure, check for infinite recursive definitions.
"""
self.checkRecursion( [] )
def parseFile( self, file_or_filename, parseAll=False ):
"""
Execute the parse expression on the given file or filename.
If a filename is specified (instead of a file object),
the entire file is opened, read, and closed before parsing.
"""
try:
file_contents = file_or_filename.read()
except AttributeError:
with open(file_or_filename, "r") as f:
file_contents = f.read()
try:
return self.parseString(file_contents, parseAll)
except ParseBaseException as exc:
if ParserElement.verbose_stacktrace:
raise
else:
# catch and re-raise exception from here, clears out pyparsing internal stack trace
raise exc
def __eq__(self,other):
if isinstance(other, ParserElement):
return self is other or vars(self) == vars(other)
elif isinstance(other, basestring):
return self.matches(other)
else:
return super(ParserElement,self)==other
def __ne__(self,other):
return not (self == other)
def __hash__(self):
return hash(id(self))
def __req__(self,other):
return self == other
def __rne__(self,other):
return not (self == other)
def matches(self, testString, parseAll=True):
"""
Method for quick testing of a parser against a test string. Good for simple
inline microtests of sub expressions while building up larger parser.
Parameters:
- testString - to test against this expression for a match
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
Example::
expr = Word(nums)
assert expr.matches("100")
"""
try:
self.parseString(_ustr(testString), parseAll=parseAll)
return True
except ParseBaseException:
return False
def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False):
"""
Execute the parse expression on a series of test strings, showing each
test, the parsed results or where the parse failed. Quick and easy way to
run a parse expression against a list of sample strings.
Parameters:
- tests - a list of separate test strings, or a multiline string of test strings
- parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests
- comment - (default=C{'#'}) - expression for indicating embedded comments in the test
string; pass None to disable comment filtering
- fullDump - (default=C{True}) - dump results as list followed by results names in nested outline;
if False, only dump nested list
- printResults - (default=C{True}) prints test output to stdout
- failureTests - (default=C{False}) indicates if these tests are expected to fail parsing
Returns: a (success, results) tuple, where success indicates that all tests succeeded
(or failed if C{failureTests} is True), and the results contain a list of lines of each
test's output
Example::
number_expr = pyparsing_common.number.copy()
result = number_expr.runTests('''
# unsigned integer
100
# negative integer
-100
# float with scientific notation
6.02e23
# integer with scientific notation
1e-12
''')
print("Success" if result[0] else "Failed!")
result = number_expr.runTests('''
# stray character
100Z
# missing leading digit before '.'
-.100
# too many '.'
3.14.159
''', failureTests=True)
print("Success" if result[0] else "Failed!")
prints::
# unsigned integer
100
[100]
# negative integer
-100
[-100]
# float with scientific notation
6.02e23
[6.02e+23]
# integer with scientific notation
1e-12
[1e-12]
Success
# stray character
100Z
^
FAIL: Expected end of text (at char 3), (line:1, col:4)
# missing leading digit before '.'
-.100
^
FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1)
# too many '.'
3.14.159
^
FAIL: Expected end of text (at char 4), (line:1, col:5)
Success
Each test string must be on a single line. If you want to test a string that spans multiple
lines, create a test like this::
expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines")
(Note that this is a raw string literal, you must include the leading 'r'.)
"""
if isinstance(tests, basestring):
tests = list(map(str.strip, tests.rstrip().splitlines()))
if isinstance(comment, basestring):
comment = Literal(comment)
allResults = []
comments = []
success = True
for t in tests:
if comment is not None and comment.matches(t, False) or comments and not t:
comments.append(t)
continue
if not t:
continue
out = ['\n'.join(comments), t]
comments = []
try:
t = t.replace(r'\n','\n')
result = self.parseString(t, parseAll=parseAll)
out.append(result.dump(full=fullDump))
success = success and not failureTests
except ParseBaseException as pe:
fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else ""
if '\n' in t:
out.append(line(pe.loc, t))
out.append(' '*(col(pe.loc,t)-1) + '^' + fatal)
else:
out.append(' '*pe.loc + '^' + fatal)
out.append("FAIL: " + str(pe))
success = success and failureTests
result = pe
except Exception as exc:
out.append("FAIL-EXCEPTION: " + str(exc))
success = success and failureTests
result = exc
if printResults:
if fullDump:
out.append('')
print('\n'.join(out))
allResults.append((t, result))
return success, allResults
class Token(ParserElement):
"""
Abstract C{ParserElement} subclass, for defining atomic matching patterns.
"""
def __init__( self ):
super(Token,self).__init__( savelist=False )
class Empty(Token):
"""
An empty token, will always match.
"""
def __init__( self ):
super(Empty,self).__init__()
self.name = "Empty"
self.mayReturnEmpty = True
self.mayIndexError = False
class NoMatch(Token):
"""
A token that will never match.
"""
def __init__( self ):
super(NoMatch,self).__init__()
self.name = "NoMatch"
self.mayReturnEmpty = True
self.mayIndexError = False
self.errmsg = "Unmatchable token"
def parseImpl( self, instring, loc, doActions=True ):
raise ParseException(instring, loc, self.errmsg, self)
class Literal(Token):
"""
Token to exactly match a specified string.
Example::
Literal('blah').parseString('blah') # -> ['blah']
Literal('blah').parseString('blahfooblah') # -> ['blah']
Literal('blah').parseString('bla') # -> Exception: Expected "blah"
For case-insensitive matching, use L{CaselessLiteral}.
For keyword matching (force word break before and after the matched string),
use L{Keyword} or L{CaselessKeyword}.
"""
def __init__( self, matchString ):
super(Literal,self).__init__()
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Literal; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.__class__ = Empty
self.name = '"%s"' % _ustr(self.match)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
# Performance tuning: this routine gets called a *lot*
# if this is a single character match string and the first character matches,
# short-circuit as quickly as possible, and avoid calling startswith
#~ @profile
def parseImpl( self, instring, loc, doActions=True ):
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
_L = Literal
ParserElement._literalStringClass = Literal
class Keyword(Token):
"""
Token to exactly match a specified string as a keyword, that is, it must be
immediately followed by a non-keyword character. Compare with C{L{Literal}}:
- C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}.
- C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'}
Accepts two optional constructor arguments in addition to the keyword string:
- C{identChars} is a string of characters that would be valid identifier characters,
defaulting to all alphanumerics + "_" and "$"
- C{caseless} allows case-insensitive matching, default is C{False}.
Example::
Keyword("start").parseString("start") # -> ['start']
Keyword("start").parseString("starting") # -> Exception
For case-insensitive matching, use L{CaselessKeyword}.
"""
DEFAULT_KEYWORD_CHARS = alphanums+"_$"
def __init__( self, matchString, identChars=None, caseless=False ):
super(Keyword,self).__init__()
if identChars is None:
identChars = Keyword.DEFAULT_KEYWORD_CHARS
self.match = matchString
self.matchLen = len(matchString)
try:
self.firstMatchChar = matchString[0]
except IndexError:
warnings.warn("null string passed to Keyword; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.name = '"%s"' % self.match
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = False
self.mayIndexError = False
self.caseless = caseless
if caseless:
self.caselessmatch = matchString.upper()
identChars = identChars.upper()
self.identChars = set(identChars)
def parseImpl( self, instring, loc, doActions=True ):
if self.caseless:
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and
(loc == 0 or instring[loc-1].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
else:
if (instring[loc] == self.firstMatchChar and
(self.matchLen==1 or instring.startswith(self.match,loc)) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and
(loc == 0 or instring[loc-1] not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
def copy(self):
c = super(Keyword,self).copy()
c.identChars = Keyword.DEFAULT_KEYWORD_CHARS
return c
@staticmethod
def setDefaultKeywordChars( chars ):
"""Overrides the default Keyword chars
"""
Keyword.DEFAULT_KEYWORD_CHARS = chars
class CaselessLiteral(Literal):
"""
Token to match a specified string, ignoring case of letters.
Note: the matched results will always be in the case of the given
match string, NOT the case of the input text.
Example::
OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD']
(Contrast with example for L{CaselessKeyword}.)
"""
def __init__( self, matchString ):
super(CaselessLiteral,self).__init__( matchString.upper() )
# Preserve the defining literal.
self.returnString = matchString
self.name = "'%s'" % self.returnString
self.errmsg = "Expected " + self.name
def parseImpl( self, instring, loc, doActions=True ):
if instring[ loc:loc+self.matchLen ].upper() == self.match:
return loc+self.matchLen, self.returnString
raise ParseException(instring, loc, self.errmsg, self)
class CaselessKeyword(Keyword):
"""
Caseless version of L{Keyword}.
Example::
OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD']
(Contrast with example for L{CaselessLiteral}.)
"""
def __init__( self, matchString, identChars=None ):
super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True )
def parseImpl( self, instring, loc, doActions=True ):
if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and
(loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ):
return loc+self.matchLen, self.match
raise ParseException(instring, loc, self.errmsg, self)
class CloseMatch(Token):
"""
A variation on L{Literal} which matches "close" matches, that is,
strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters:
- C{match_string} - string to be matched
- C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match
The results from a successful parse will contain the matched text from the input string and the following named results:
- C{mismatches} - a list of the positions within the match_string where mismatches were found
- C{original} - the original match_string used to compare against the input string
If C{mismatches} is an empty list, then the match was an exact match.
Example::
patt = CloseMatch("ATCATCGAATGGA")
patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']})
patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1)
# exact match
patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']})
# close match allowing up to 2 mismatches
patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2)
patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']})
"""
def __init__(self, match_string, maxMismatches=1):
super(CloseMatch,self).__init__()
self.name = match_string
self.match_string = match_string
self.maxMismatches = maxMismatches
self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches)
self.mayIndexError = False
self.mayReturnEmpty = False
def parseImpl( self, instring, loc, doActions=True ):
start = loc
instrlen = len(instring)
maxloc = start + len(self.match_string)
if maxloc <= instrlen:
match_string = self.match_string
match_stringloc = 0
mismatches = []
maxMismatches = self.maxMismatches
for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)):
src,mat = s_m
if src != mat:
mismatches.append(match_stringloc)
if len(mismatches) > maxMismatches:
break
else:
loc = match_stringloc + 1
results = ParseResults([instring[start:loc]])
results['original'] = self.match_string
results['mismatches'] = mismatches
return loc, results
raise ParseException(instring, loc, self.errmsg, self)
class Word(Token):
"""
Token for matching words composed of allowed character sets.
Defined with string containing all allowed initial characters,
an optional string containing allowed body characters (if omitted,
defaults to the initial character set), and an optional minimum,
maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction. An optional
C{excludeChars} parameter can list characters that might be found in
the input C{bodyChars} string; useful to define a word of all printables
except for one or two characters, for instance.
L{srange} is useful for defining custom character set strings for defining
C{Word} expressions, using range notation from regular expression character sets.
A common mistake is to use C{Word} to match a specific literal string, as in
C{Word("Address")}. Remember that C{Word} uses the string argument to define
I{sets} of matchable characters. This expression would match "Add", "AAA",
"dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'.
To match an exact literal string, use L{Literal} or L{Keyword}.
pyparsing includes helper strings for building Words:
- L{alphas}
- L{nums}
- L{alphanums}
- L{hexnums}
- L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.)
- L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.)
- L{printables} (any non-whitespace character)
Example::
# a word composed of digits
integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9"))
# a word with a leading capital, and zero or more lowercase
capital_word = Word(alphas.upper(), alphas.lower())
# hostnames are alphanumeric, with leading alpha, and '-'
hostname = Word(alphas, alphanums+'-')
# roman numeral (not a strict parser, accepts invalid mix of characters)
roman = Word("IVXLCDM")
# any string of non-whitespace characters, except for ','
csv_value = Word(printables, excludeChars=",")
"""
def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ):
super(Word,self).__init__()
if excludeChars:
initChars = ''.join(c for c in initChars if c not in excludeChars)
if bodyChars:
bodyChars = ''.join(c for c in bodyChars if c not in excludeChars)
self.initCharsOrig = initChars
self.initChars = set(initChars)
if bodyChars :
self.bodyCharsOrig = bodyChars
self.bodyChars = set(bodyChars)
else:
self.bodyCharsOrig = initChars
self.bodyChars = set(initChars)
self.maxSpecified = max > 0
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.asKeyword = asKeyword
if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0):
if self.bodyCharsOrig == self.initCharsOrig:
self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig)
elif len(self.initCharsOrig) == 1:
self.reString = "%s[%s]*" % \
(re.escape(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
else:
self.reString = "[%s][%s]*" % \
(_escapeRegexRangeChars(self.initCharsOrig),
_escapeRegexRangeChars(self.bodyCharsOrig),)
if self.asKeyword:
self.reString = r"\b"+self.reString+r"\b"
try:
self.re = re.compile( self.reString )
except Exception:
self.re = None
def parseImpl( self, instring, loc, doActions=True ):
if self.re:
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
if not(instring[ loc ] in self.initChars):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
bodychars = self.bodyChars
maxloc = start + self.maxLen
maxloc = min( maxloc, instrlen )
while loc < maxloc and instring[loc] in bodychars:
loc += 1
throwException = False
if loc - start < self.minLen:
throwException = True
if self.maxSpecified and loc < instrlen and instring[loc] in bodychars:
throwException = True
if self.asKeyword:
if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars):
throwException = True
if throwException:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(Word,self).__str__()
except Exception:
pass
if self.strRepr is None:
def charsAsStr(s):
if len(s)>4:
return s[:4]+"..."
else:
return s
if ( self.initCharsOrig != self.bodyCharsOrig ):
self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) )
else:
self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig)
return self.strRepr
class Regex(Token):
"""
Token for matching strings that match a given regular expression.
Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module.
If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as
named parse results.
Example::
realnum = Regex(r"[+-]?\d+\.\d*")
date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)')
# ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression
roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})")
"""
compiledREtype = type(re.compile("[A-Z]"))
def __init__( self, pattern, flags=0):
"""The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags."""
super(Regex,self).__init__()
if isinstance(pattern, basestring):
if not pattern:
warnings.warn("null string passed to Regex; use Empty() instead",
SyntaxWarning, stacklevel=2)
self.pattern = pattern
self.flags = flags
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % pattern,
SyntaxWarning, stacklevel=2)
raise
elif isinstance(pattern, Regex.compiledREtype):
self.re = pattern
self.pattern = \
self.reString = str(pattern)
self.flags = flags
else:
raise ValueError("Regex may only be constructed with a string or a compiled RE object")
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = self.re.match(instring,loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
d = result.groupdict()
ret = ParseResults(result.group())
if d:
for k in d:
ret[k] = d[k]
return loc,ret
def __str__( self ):
try:
return super(Regex,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "Re:(%s)" % repr(self.pattern)
return self.strRepr
class QuotedString(Token):
r"""
Token for matching strings that are delimited by quoting characters.
Defined with the following parameters:
- quoteChar - string of one or more characters defining the quote delimiting string
- escChar - character to escape quotes, typically backslash (default=C{None})
- escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None})
- multiline - boolean indicating whether quotes can span multiple lines (default=C{False})
- unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True})
- endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar)
- convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True})
Example::
qs = QuotedString('"')
print(qs.searchString('lsjdf "This is the quote" sldjf'))
complex_qs = QuotedString('{{', endQuoteChar='}}')
print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf'))
sql_qs = QuotedString('"', escQuote='""')
print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf'))
prints::
[['This is the quote']]
[['This is the "quote"']]
[['This is the quote with "embedded" quotes']]
"""
def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True):
super(QuotedString,self).__init__()
# remove white space from quote chars - wont work anyway
quoteChar = quoteChar.strip()
if not quoteChar:
warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
if endQuoteChar is None:
endQuoteChar = quoteChar
else:
endQuoteChar = endQuoteChar.strip()
if not endQuoteChar:
warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2)
raise SyntaxError()
self.quoteChar = quoteChar
self.quoteCharLen = len(quoteChar)
self.firstQuoteChar = quoteChar[0]
self.endQuoteChar = endQuoteChar
self.endQuoteCharLen = len(endQuoteChar)
self.escChar = escChar
self.escQuote = escQuote
self.unquoteResults = unquoteResults
self.convertWhitespaceEscapes = convertWhitespaceEscapes
if multiline:
self.flags = re.MULTILINE | re.DOTALL
self.pattern = r'%s(?:[^%s%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
else:
self.flags = 0
self.pattern = r'%s(?:[^%s\n\r%s]' % \
( re.escape(self.quoteChar),
_escapeRegexRangeChars(self.endQuoteChar[0]),
(escChar is not None and _escapeRegexRangeChars(escChar) or '') )
if len(self.endQuoteChar) > 1:
self.pattern += (
'|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]),
_escapeRegexRangeChars(self.endQuoteChar[i]))
for i in range(len(self.endQuoteChar)-1,0,-1)) + ')'
)
if escQuote:
self.pattern += (r'|(?:%s)' % re.escape(escQuote))
if escChar:
self.pattern += (r'|(?:%s.)' % re.escape(escChar))
self.escCharReplacePattern = re.escape(self.escChar)+"(.)"
self.pattern += (r')*%s' % re.escape(self.endQuoteChar))
try:
self.re = re.compile(self.pattern, self.flags)
self.reString = self.pattern
except sre_constants.error:
warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern,
SyntaxWarning, stacklevel=2)
raise
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayIndexError = False
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
ret = result.group()
if self.unquoteResults:
# strip off quotes
ret = ret[self.quoteCharLen:-self.endQuoteCharLen]
if isinstance(ret,basestring):
# replace escaped whitespace
if '\\' in ret and self.convertWhitespaceEscapes:
ws_map = {
r'\t' : '\t',
r'\n' : '\n',
r'\f' : '\f',
r'\r' : '\r',
}
for wslit,wschar in ws_map.items():
ret = ret.replace(wslit, wschar)
# replace escaped characters
if self.escChar:
ret = re.sub(self.escCharReplacePattern,"\g<1>",ret)
# replace escaped quotes
if self.escQuote:
ret = ret.replace(self.escQuote, self.endQuoteChar)
return loc, ret
def __str__( self ):
try:
return super(QuotedString,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar)
return self.strRepr
class CharsNotIn(Token):
"""
Token for matching words composed of characters I{not} in a given set (will
include whitespace in matched characters if not listed in the provided exclusion set - see example).
Defined with string containing all disallowed characters, and an optional
minimum, maximum, and/or exact length. The default value for C{min} is 1 (a
minimum value < 1 is not valid); the default values for C{max} and C{exact}
are 0, meaning no maximum or exact length restriction.
Example::
# define a comma-separated-value as anything that is not a ','
csv_value = CharsNotIn(',')
print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213"))
prints::
['dkls', 'lsdkjf', 's12 34', '@!#', '213']
"""
def __init__( self, notChars, min=1, max=0, exact=0 ):
super(CharsNotIn,self).__init__()
self.skipWhitespace = False
self.notChars = notChars
if min < 1:
raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted")
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
self.name = _ustr(self)
self.errmsg = "Expected " + self.name
self.mayReturnEmpty = ( self.minLen == 0 )
self.mayIndexError = False
def parseImpl( self, instring, loc, doActions=True ):
if instring[loc] in self.notChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
notchars = self.notChars
maxlen = min( start+self.maxLen, len(instring) )
while loc < maxlen and \
(instring[loc] not in notchars):
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def __str__( self ):
try:
return super(CharsNotIn, self).__str__()
except Exception:
pass
if self.strRepr is None:
if len(self.notChars) > 4:
self.strRepr = "!W:(%s...)" % self.notChars[:4]
else:
self.strRepr = "!W:(%s)" % self.notChars
return self.strRepr
class White(Token):
"""
Special matching class for matching whitespace. Normally, whitespace is ignored
by pyparsing grammars. This class is included when some whitespace structures
are significant. Define with a string containing the whitespace characters to be
matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments,
as defined for the C{L{Word}} class.
"""
whiteStrs = {
" " : "<SPC>",
"\t": "<TAB>",
"\n": "<LF>",
"\r": "<CR>",
"\f": "<FF>",
}
def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0):
super(White,self).__init__()
self.matchWhite = ws
self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) )
#~ self.leaveWhitespace()
self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite))
self.mayReturnEmpty = True
self.errmsg = "Expected " + self.name
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
self.maxLen = exact
self.minLen = exact
def parseImpl( self, instring, loc, doActions=True ):
if not(instring[ loc ] in self.matchWhite):
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
maxloc = start + self.maxLen
maxloc = min( maxloc, len(instring) )
while loc < maxloc and instring[loc] in self.matchWhite:
loc += 1
if loc - start < self.minLen:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
class _PositionToken(Token):
def __init__( self ):
super(_PositionToken,self).__init__()
self.name=self.__class__.__name__
self.mayReturnEmpty = True
self.mayIndexError = False
class GoToColumn(_PositionToken):
"""
Token to advance to a specific column of input text; useful for tabular report scraping.
"""
def __init__( self, colno ):
super(GoToColumn,self).__init__()
self.col = colno
def preParse( self, instring, loc ):
if col(loc,instring) != self.col:
instrlen = len(instring)
if self.ignoreExprs:
loc = self._skipIgnorables( instring, loc )
while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col :
loc += 1
return loc
def parseImpl( self, instring, loc, doActions=True ):
thiscol = col( loc, instring )
if thiscol > self.col:
raise ParseException( instring, loc, "Text not in expected column", self )
newloc = loc + self.col - thiscol
ret = instring[ loc: newloc ]
return newloc, ret
class LineStart(_PositionToken):
"""
Matches if current position is at the beginning of a line within the parse string
Example::
test = '''\
AAA this line
AAA and this line
AAA but not this one
B AAA and definitely not this one
'''
for t in (LineStart() + 'AAA' + restOfLine).searchString(test):
print(t)
Prints::
['AAA', ' this line']
['AAA', ' and this line']
"""
def __init__( self ):
super(LineStart,self).__init__()
self.errmsg = "Expected start of line"
def parseImpl( self, instring, loc, doActions=True ):
if col(loc, instring) == 1:
return loc, []
raise ParseException(instring, loc, self.errmsg, self)
class LineEnd(_PositionToken):
"""
Matches if current position is at the end of a line within the parse string
"""
def __init__( self ):
super(LineEnd,self).__init__()
self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") )
self.errmsg = "Expected end of line"
def parseImpl( self, instring, loc, doActions=True ):
if loc<len(instring):
if instring[loc] == "\n":
return loc+1, "\n"
else:
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class StringStart(_PositionToken):
"""
Matches if current position is at the beginning of the parse string
"""
def __init__( self ):
super(StringStart,self).__init__()
self.errmsg = "Expected start of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc != 0:
# see if entire string up to here is just whitespace and ignoreables
if loc != self.preParse( instring, 0 ):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class StringEnd(_PositionToken):
"""
Matches if current position is at the end of the parse string
"""
def __init__( self ):
super(StringEnd,self).__init__()
self.errmsg = "Expected end of text"
def parseImpl( self, instring, loc, doActions=True ):
if loc < len(instring):
raise ParseException(instring, loc, self.errmsg, self)
elif loc == len(instring):
return loc+1, []
elif loc > len(instring):
return loc, []
else:
raise ParseException(instring, loc, self.errmsg, self)
class WordStart(_PositionToken):
"""
Matches if the current position is at the beginning of a Word, and
is not preceded by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of
the string being parsed, or at the beginning of a line.
"""
def __init__(self, wordChars = printables):
super(WordStart,self).__init__()
self.wordChars = set(wordChars)
self.errmsg = "Not at the start of a word"
def parseImpl(self, instring, loc, doActions=True ):
if loc != 0:
if (instring[loc-1] in self.wordChars or
instring[loc] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class WordEnd(_PositionToken):
"""
Matches if the current position is at the end of a Word, and
is not followed by any character in a given set of C{wordChars}
(default=C{printables}). To emulate the C{\b} behavior of regular expressions,
use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of
the string being parsed, or at the end of a line.
"""
def __init__(self, wordChars = printables):
super(WordEnd,self).__init__()
self.wordChars = set(wordChars)
self.skipWhitespace = False
self.errmsg = "Not at the end of a word"
def parseImpl(self, instring, loc, doActions=True ):
instrlen = len(instring)
if instrlen>0 and loc<instrlen:
if (instring[loc] in self.wordChars or
instring[loc-1] not in self.wordChars):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
class ParseExpression(ParserElement):
"""
Abstract subclass of ParserElement, for combining and post-processing parsed tokens.
"""
def __init__( self, exprs, savelist = False ):
super(ParseExpression,self).__init__(savelist)
if isinstance( exprs, _generatorType ):
exprs = list(exprs)
if isinstance( exprs, basestring ):
self.exprs = [ ParserElement._literalStringClass( exprs ) ]
elif isinstance( exprs, collections.Iterable ):
exprs = list(exprs)
# if sequence of strings provided, wrap with Literal
if all(isinstance(expr, basestring) for expr in exprs):
exprs = map(ParserElement._literalStringClass, exprs)
self.exprs = list(exprs)
else:
try:
self.exprs = list( exprs )
except TypeError:
self.exprs = [ exprs ]
self.callPreparse = False
def __getitem__( self, i ):
return self.exprs[i]
def append( self, other ):
self.exprs.append( other )
self.strRepr = None
return self
def leaveWhitespace( self ):
"""Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on
all contained expressions."""
self.skipWhitespace = False
self.exprs = [ e.copy() for e in self.exprs ]
for e in self.exprs:
e.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
else:
super( ParseExpression, self).ignore( other )
for e in self.exprs:
e.ignore( self.ignoreExprs[-1] )
return self
def __str__( self ):
try:
return super(ParseExpression,self).__str__()
except Exception:
pass
if self.strRepr is None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) )
return self.strRepr
def streamline( self ):
super(ParseExpression,self).streamline()
for e in self.exprs:
e.streamline()
# collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d )
# but only if there are no parse actions or resultsNames on the nested And's
# (likewise for Or's and MatchFirst's)
if ( len(self.exprs) == 2 ):
other = self.exprs[0]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = other.exprs[:] + [ self.exprs[1] ]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
other = self.exprs[-1]
if ( isinstance( other, self.__class__ ) and
not(other.parseAction) and
other.resultsName is None and
not other.debug ):
self.exprs = self.exprs[:-1] + other.exprs[:]
self.strRepr = None
self.mayReturnEmpty |= other.mayReturnEmpty
self.mayIndexError |= other.mayIndexError
self.errmsg = "Expected " + _ustr(self)
return self
def setResultsName( self, name, listAllMatches=False ):
ret = super(ParseExpression,self).setResultsName(name,listAllMatches)
return ret
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
for e in self.exprs:
e.validate(tmp)
self.checkRecursion( [] )
def copy(self):
ret = super(ParseExpression,self).copy()
ret.exprs = [e.copy() for e in self.exprs]
return ret
class And(ParseExpression):
"""
Requires all given C{ParseExpression}s to be found in the given order.
Expressions may be separated by whitespace.
May be constructed using the C{'+'} operator.
May also be constructed using the C{'-'} operator, which will suppress backtracking.
Example::
integer = Word(nums)
name_expr = OneOrMore(Word(alphas))
expr = And([integer("id"),name_expr("name"),integer("age")])
# more easily written as:
expr = integer("id") + name_expr("name") + integer("age")
"""
class _ErrorStop(Empty):
def __init__(self, *args, **kwargs):
super(And._ErrorStop,self).__init__(*args, **kwargs)
self.name = '-'
self.leaveWhitespace()
def __init__( self, exprs, savelist = True ):
super(And,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.setWhitespaceChars( self.exprs[0].whiteChars )
self.skipWhitespace = self.exprs[0].skipWhitespace
self.callPreparse = True
def parseImpl( self, instring, loc, doActions=True ):
# pass False as last arg to _parse for first element, since we already
# pre-parsed the string as part of our And pre-parsing
loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False )
errorStop = False
for e in self.exprs[1:]:
if isinstance(e, And._ErrorStop):
errorStop = True
continue
if errorStop:
try:
loc, exprtokens = e._parse( instring, loc, doActions )
except ParseSyntaxException:
raise
except ParseBaseException as pe:
pe.__traceback__ = None
raise ParseSyntaxException._from_exception(pe)
except IndexError:
raise ParseSyntaxException(instring, len(instring), self.errmsg, self)
else:
loc, exprtokens = e._parse( instring, loc, doActions )
if exprtokens or exprtokens.haskeys():
resultlist += exprtokens
return loc, resultlist
def __iadd__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #And( [ self, other ] )
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
if not e.mayReturnEmpty:
break
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
class Or(ParseExpression):
"""
Requires that at least one C{ParseExpression} is found.
If two expressions match, the expression that matches the longest string will be used.
May be constructed using the C{'^'} operator.
Example::
# construct Or using '^' operator
number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789"))
prints::
[['123'], ['3.1416'], ['789']]
"""
def __init__( self, exprs, savelist = False ):
super(Or,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
matches = []
for e in self.exprs:
try:
loc2 = e.tryParse( instring, loc )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
else:
# save match among all matches, to retry longest to shortest
matches.append((loc2, e))
if matches:
matches.sort(key=lambda x: -x[0])
for _,e in matches:
try:
return e._parse( instring, loc, doActions )
except ParseException as err:
err.__traceback__ = None
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ixor__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #Or( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class MatchFirst(ParseExpression):
"""
Requires that at least one C{ParseExpression} is found.
If two expressions match, the first one listed is the one that will match.
May be constructed using the C{'|'} operator.
Example::
# construct MatchFirst using '|' operator
# watch the order of expressions to match
number = Word(nums) | Combine(Word(nums) + '.' + Word(nums))
print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']]
# put more selective expression first
number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums)
print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']]
"""
def __init__( self, exprs, savelist = False ):
super(MatchFirst,self).__init__(exprs, savelist)
if self.exprs:
self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs)
else:
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
maxExcLoc = -1
maxException = None
for e in self.exprs:
try:
ret = e._parse( instring, loc, doActions )
return ret
except ParseException as err:
if err.loc > maxExcLoc:
maxException = err
maxExcLoc = err.loc
except IndexError:
if len(instring) > maxExcLoc:
maxException = ParseException(instring,len(instring),e.errmsg,self)
maxExcLoc = len(instring)
# only got here if no expression matched, raise exception for match that made it the furthest
else:
if maxException is not None:
maxException.msg = self.errmsg
raise maxException
else:
raise ParseException(instring, loc, "no defined alternatives to match", self)
def __ior__(self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass( other )
return self.append( other ) #MatchFirst( [ self, other ] )
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class Each(ParseExpression):
"""
Requires all given C{ParseExpression}s to be found, but in any order.
Expressions may be separated by whitespace.
May be constructed using the C{'&'} operator.
Example::
color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN")
shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON")
integer = Word(nums)
shape_attr = "shape:" + shape_type("shape")
posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn")
color_attr = "color:" + color("color")
size_attr = "size:" + integer("size")
# use Each (using operator '&') to accept attributes in any order
# (shape and posn are required, color and size are optional)
shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr)
shape_spec.runTests('''
shape: SQUARE color: BLACK posn: 100, 120
shape: CIRCLE size: 50 color: BLUE posn: 50,80
color:GREEN size:20 shape:TRIANGLE posn:20,40
'''
)
prints::
shape: SQUARE color: BLACK posn: 100, 120
['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']]
- color: BLACK
- posn: ['100', ',', '120']
- x: 100
- y: 120
- shape: SQUARE
shape: CIRCLE size: 50 color: BLUE posn: 50,80
['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']]
- color: BLUE
- posn: ['50', ',', '80']
- x: 50
- y: 80
- shape: CIRCLE
- size: 50
color: GREEN size: 20 shape: TRIANGLE posn: 20,40
['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']]
- color: GREEN
- posn: ['20', ',', '40']
- x: 20
- y: 40
- shape: TRIANGLE
- size: 20
"""
def __init__( self, exprs, savelist = True ):
super(Each,self).__init__(exprs, savelist)
self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs)
self.skipWhitespace = True
self.initExprGroups = True
def parseImpl( self, instring, loc, doActions=True ):
if self.initExprGroups:
self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional))
opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ]
opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)]
self.optionals = opt1 + opt2
self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ]
self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ]
self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ]
self.required += self.multirequired
self.initExprGroups = False
tmpLoc = loc
tmpReqd = self.required[:]
tmpOpt = self.optionals[:]
matchOrder = []
keepMatching = True
while keepMatching:
tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired
failed = []
for e in tmpExprs:
try:
tmpLoc = e.tryParse( instring, tmpLoc )
except ParseException:
failed.append(e)
else:
matchOrder.append(self.opt1map.get(id(e),e))
if e in tmpReqd:
tmpReqd.remove(e)
elif e in tmpOpt:
tmpOpt.remove(e)
if len(failed) == len(tmpExprs):
keepMatching = False
if tmpReqd:
missing = ", ".join(_ustr(e) for e in tmpReqd)
raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing )
# add any unmatched Optionals, in case they have default values defined
matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt]
resultlist = []
for e in matchOrder:
loc,results = e._parse(instring,loc,doActions)
resultlist.append(results)
finalResults = sum(resultlist, ParseResults([]))
return loc, finalResults
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}"
return self.strRepr
def checkRecursion( self, parseElementList ):
subRecCheckList = parseElementList[:] + [ self ]
for e in self.exprs:
e.checkRecursion( subRecCheckList )
class ParseElementEnhance(ParserElement):
"""
Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens.
"""
def __init__( self, expr, savelist=False ):
super(ParseElementEnhance,self).__init__(savelist)
if isinstance( expr, basestring ):
if issubclass(ParserElement._literalStringClass, Token):
expr = ParserElement._literalStringClass(expr)
else:
expr = ParserElement._literalStringClass(Literal(expr))
self.expr = expr
self.strRepr = None
if expr is not None:
self.mayIndexError = expr.mayIndexError
self.mayReturnEmpty = expr.mayReturnEmpty
self.setWhitespaceChars( expr.whiteChars )
self.skipWhitespace = expr.skipWhitespace
self.saveAsList = expr.saveAsList
self.callPreparse = expr.callPreparse
self.ignoreExprs.extend(expr.ignoreExprs)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr is not None:
return self.expr._parse( instring, loc, doActions, callPreParse=False )
else:
raise ParseException("",loc,self.errmsg,self)
def leaveWhitespace( self ):
self.skipWhitespace = False
self.expr = self.expr.copy()
if self.expr is not None:
self.expr.leaveWhitespace()
return self
def ignore( self, other ):
if isinstance( other, Suppress ):
if other not in self.ignoreExprs:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
else:
super( ParseElementEnhance, self).ignore( other )
if self.expr is not None:
self.expr.ignore( self.ignoreExprs[-1] )
return self
def streamline( self ):
super(ParseElementEnhance,self).streamline()
if self.expr is not None:
self.expr.streamline()
return self
def checkRecursion( self, parseElementList ):
if self in parseElementList:
raise RecursiveGrammarException( parseElementList+[self] )
subRecCheckList = parseElementList[:] + [ self ]
if self.expr is not None:
self.expr.checkRecursion( subRecCheckList )
def validate( self, validateTrace=[] ):
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion( [] )
def __str__( self ):
try:
return super(ParseElementEnhance,self).__str__()
except Exception:
pass
if self.strRepr is None and self.expr is not None:
self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) )
return self.strRepr
class FollowedBy(ParseElementEnhance):
"""
Lookahead matching of the given parse expression. C{FollowedBy}
does I{not} advance the parsing position within the input string, it only
verifies that the specified parse expression matches at the current
position. C{FollowedBy} always returns a null token list.
Example::
# use FollowedBy to match a label only if it is followed by a ':'
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint()
prints::
[['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']]
"""
def __init__( self, expr ):
super(FollowedBy,self).__init__(expr)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
self.expr.tryParse( instring, loc )
return loc, []
class NotAny(ParseElementEnhance):
"""
Lookahead to disallow matching with the given parse expression. C{NotAny}
does I{not} advance the parsing position within the input string, it only
verifies that the specified parse expression does I{not} match at the current
position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny}
always returns a null token list. May be constructed using the '~' operator.
Example::
"""
def __init__( self, expr ):
super(NotAny,self).__init__(expr)
#~ self.leaveWhitespace()
self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs
self.mayReturnEmpty = True
self.errmsg = "Found unwanted token, "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
if self.expr.canParseNext(instring, loc):
raise ParseException(instring, loc, self.errmsg, self)
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "~{" + _ustr(self.expr) + "}"
return self.strRepr
class _MultipleMatch(ParseElementEnhance):
def __init__( self, expr, stopOn=None):
super(_MultipleMatch, self).__init__(expr)
self.saveAsList = True
ender = stopOn
if isinstance(ender, basestring):
ender = ParserElement._literalStringClass(ender)
self.not_ender = ~ender if ender is not None else None
def parseImpl( self, instring, loc, doActions=True ):
self_expr_parse = self.expr._parse
self_skip_ignorables = self._skipIgnorables
check_ender = self.not_ender is not None
if check_ender:
try_not_ender = self.not_ender.tryParse
# must be at least one (but first see if we are the stopOn sentinel;
# if so, fail)
if check_ender:
try_not_ender(instring, loc)
loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False )
try:
hasIgnoreExprs = (not not self.ignoreExprs)
while 1:
if check_ender:
try_not_ender(instring, loc)
if hasIgnoreExprs:
preloc = self_skip_ignorables( instring, loc )
else:
preloc = loc
loc, tmptokens = self_expr_parse( instring, preloc, doActions )
if tmptokens or tmptokens.haskeys():
tokens += tmptokens
except (ParseException,IndexError):
pass
return loc, tokens
class OneOrMore(_MultipleMatch):
"""
Repetition of one or more of the given expression.
Parameters:
- expr - expression that must match one or more times
- stopOn - (default=C{None}) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: BLACK"
OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']]
# use stopOn attribute for OneOrMore to avoid reading label string as part of the data
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']]
# could also be written as
(attr_expr * (1,)).parseString(text).pprint()
"""
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "{" + _ustr(self.expr) + "}..."
return self.strRepr
class ZeroOrMore(_MultipleMatch):
"""
Optional repetition of zero or more of the given expression.
Parameters:
- expr - expression that must match zero or more times
- stopOn - (default=C{None}) - expression for a terminating sentinel
(only required if the sentinel would ordinarily match the repetition
expression)
Example: similar to L{OneOrMore}
"""
def __init__( self, expr, stopOn=None):
super(ZeroOrMore,self).__init__(expr, stopOn=stopOn)
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
return super(ZeroOrMore, self).parseImpl(instring, loc, doActions)
except (ParseException,IndexError):
return loc, []
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]..."
return self.strRepr
class _NullToken(object):
def __bool__(self):
return False
__nonzero__ = __bool__
def __str__(self):
return ""
_optionalNotMatched = _NullToken()
class Optional(ParseElementEnhance):
"""
Optional matching of the given expression.
Parameters:
- expr - expression that must match zero or more times
- default (optional) - value to be returned if the optional expression is not found.
Example::
# US postal code can be a 5-digit zip, plus optional 4-digit qualifier
zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4)))
zip.runTests('''
# traditional ZIP code
12345
# ZIP+4 form
12101-0001
# invalid ZIP
98765-
''')
prints::
# traditional ZIP code
12345
['12345']
# ZIP+4 form
12101-0001
['12101-0001']
# invalid ZIP
98765-
^
FAIL: Expected end of text (at char 5), (line:1, col:6)
"""
def __init__( self, expr, default=_optionalNotMatched ):
super(Optional,self).__init__( expr, savelist=False )
self.saveAsList = self.expr.saveAsList
self.defaultValue = default
self.mayReturnEmpty = True
def parseImpl( self, instring, loc, doActions=True ):
try:
loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False )
except (ParseException,IndexError):
if self.defaultValue is not _optionalNotMatched:
if self.expr.resultsName:
tokens = ParseResults([ self.defaultValue ])
tokens[self.expr.resultsName] = self.defaultValue
else:
tokens = [ self.defaultValue ]
else:
tokens = []
return loc, tokens
def __str__( self ):
if hasattr(self,"name"):
return self.name
if self.strRepr is None:
self.strRepr = "[" + _ustr(self.expr) + "]"
return self.strRepr
class SkipTo(ParseElementEnhance):
"""
Token for skipping over all undefined text until the matched expression is found.
Parameters:
- expr - target expression marking the end of the data to be skipped
- include - (default=C{False}) if True, the target expression is also parsed
(the skipped text and target expression are returned as a 2-element list).
- ignore - (default=C{None}) used to define grammars (typically quoted strings and
comments) that might contain false matches to the target expression
- failOn - (default=C{None}) define expressions that are not allowed to be
included in the skipped test; if found before the target expression is found,
the SkipTo is not a match
Example::
report = '''
Outstanding Issues Report - 1 Jan 2000
# | Severity | Description | Days Open
-----+----------+-------------------------------------------+-----------
101 | Critical | Intermittent system crash | 6
94 | Cosmetic | Spelling error on Login ('log|n') | 14
79 | Minor | System slow when running too many reports | 47
'''
integer = Word(nums)
SEP = Suppress('|')
# use SkipTo to simply match everything up until the next SEP
# - ignore quoted strings, so that a '|' character inside a quoted string does not match
# - parse action will call token.strip() for each matched token, i.e., the description body
string_data = SkipTo(SEP, ignore=quotedString)
string_data.setParseAction(tokenMap(str.strip))
ticket_expr = (integer("issue_num") + SEP
+ string_data("sev") + SEP
+ string_data("desc") + SEP
+ integer("days_open"))
for tkt in ticket_expr.searchString(report):
print tkt.dump()
prints::
['101', 'Critical', 'Intermittent system crash', '6']
- days_open: 6
- desc: Intermittent system crash
- issue_num: 101
- sev: Critical
['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14']
- days_open: 14
- desc: Spelling error on Login ('log|n')
- issue_num: 94
- sev: Cosmetic
['79', 'Minor', 'System slow when running too many reports', '47']
- days_open: 47
- desc: System slow when running too many reports
- issue_num: 79
- sev: Minor
"""
def __init__( self, other, include=False, ignore=None, failOn=None ):
super( SkipTo, self ).__init__( other )
self.ignoreExpr = ignore
self.mayReturnEmpty = True
self.mayIndexError = False
self.includeMatch = include
self.asList = False
if isinstance(failOn, basestring):
self.failOn = ParserElement._literalStringClass(failOn)
else:
self.failOn = failOn
self.errmsg = "No match found for "+_ustr(self.expr)
def parseImpl( self, instring, loc, doActions=True ):
startloc = loc
instrlen = len(instring)
expr = self.expr
expr_parse = self.expr._parse
self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None
self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None
tmploc = loc
while tmploc <= instrlen:
if self_failOn_canParseNext is not None:
# break if failOn expression matches
if self_failOn_canParseNext(instring, tmploc):
break
if self_ignoreExpr_tryParse is not None:
# advance past ignore expressions
while 1:
try:
tmploc = self_ignoreExpr_tryParse(instring, tmploc)
except ParseBaseException:
break
try:
expr_parse(instring, tmploc, doActions=False, callPreParse=False)
except (ParseException, IndexError):
# no match, advance loc in string
tmploc += 1
else:
# matched skipto expr, done
break
else:
# ran off the end of the input string without matching skipto expr, fail
raise ParseException(instring, loc, self.errmsg, self)
# build up return values
loc = tmploc
skiptext = instring[startloc:loc]
skipresult = ParseResults(skiptext)
if self.includeMatch:
loc, mat = expr_parse(instring,loc,doActions,callPreParse=False)
skipresult += mat
return loc, skipresult
class Forward(ParseElementEnhance):
"""
Forward declaration of an expression to be defined later -
used for recursive grammars, such as algebraic infix notation.
When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator.
Note: take care when assigning to C{Forward} not to overlook precedence of operators.
Specifically, '|' has a lower precedence than '<<', so that::
fwdExpr << a | b | c
will actually be evaluated as::
(fwdExpr << a) | b | c
thereby leaving b and c out as parseable alternatives. It is recommended that you
explicitly group the values inserted into the C{Forward}::
fwdExpr << (a | b | c)
Converting to use the '<<=' operator instead will avoid this problem.
See L{ParseResults.pprint} for an example of a recursive parser created using
C{Forward}.
"""
def __init__( self, other=None ):
super(Forward,self).__init__( other, savelist=False )
def __lshift__( self, other ):
if isinstance( other, basestring ):
other = ParserElement._literalStringClass(other)
self.expr = other
self.strRepr = None
self.mayIndexError = self.expr.mayIndexError
self.mayReturnEmpty = self.expr.mayReturnEmpty
self.setWhitespaceChars( self.expr.whiteChars )
self.skipWhitespace = self.expr.skipWhitespace
self.saveAsList = self.expr.saveAsList
self.ignoreExprs.extend(self.expr.ignoreExprs)
return self
def __ilshift__(self, other):
return self << other
def leaveWhitespace( self ):
self.skipWhitespace = False
return self
def streamline( self ):
if not self.streamlined:
self.streamlined = True
if self.expr is not None:
self.expr.streamline()
return self
def validate( self, validateTrace=[] ):
if self not in validateTrace:
tmp = validateTrace[:]+[self]
if self.expr is not None:
self.expr.validate(tmp)
self.checkRecursion([])
def __str__( self ):
if hasattr(self,"name"):
return self.name
return self.__class__.__name__ + ": ..."
# stubbed out for now - creates awful memory and perf issues
self._revertClass = self.__class__
self.__class__ = _ForwardNoRecurse
try:
if self.expr is not None:
retString = _ustr(self.expr)
else:
retString = "None"
finally:
self.__class__ = self._revertClass
return self.__class__.__name__ + ": " + retString
def copy(self):
if self.expr is not None:
return super(Forward,self).copy()
else:
ret = Forward()
ret <<= self
return ret
class _ForwardNoRecurse(Forward):
def __str__( self ):
return "..."
class TokenConverter(ParseElementEnhance):
"""
Abstract subclass of C{ParseExpression}, for converting parsed results.
"""
def __init__( self, expr, savelist=False ):
super(TokenConverter,self).__init__( expr )#, savelist )
self.saveAsList = False
class Combine(TokenConverter):
"""
Converter to concatenate all matching tokens to a single string.
By default, the matching patterns must also be contiguous in the input string;
this can be disabled by specifying C{'adjacent=False'} in the constructor.
Example::
real = Word(nums) + '.' + Word(nums)
print(real.parseString('3.1416')) # -> ['3', '.', '1416']
# will also erroneously match the following
print(real.parseString('3. 1416')) # -> ['3', '.', '1416']
real = Combine(Word(nums) + '.' + Word(nums))
print(real.parseString('3.1416')) # -> ['3.1416']
# no match when there are internal spaces
print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...)
"""
def __init__( self, expr, joinString="", adjacent=True ):
super(Combine,self).__init__( expr )
# suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself
if adjacent:
self.leaveWhitespace()
self.adjacent = adjacent
self.skipWhitespace = True
self.joinString = joinString
self.callPreparse = True
def ignore( self, other ):
if self.adjacent:
ParserElement.ignore(self, other)
else:
super( Combine, self).ignore( other )
return self
def postParse( self, instring, loc, tokenlist ):
retToks = tokenlist.copy()
del retToks[:]
retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults)
if self.resultsName and retToks.haskeys():
return [ retToks ]
else:
return retToks
class Group(TokenConverter):
"""
Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions.
Example::
ident = Word(alphas)
num = Word(nums)
term = ident | num
func = ident + Optional(delimitedList(term))
print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100']
func = ident + Group(Optional(delimitedList(term)))
print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']]
"""
def __init__( self, expr ):
super(Group,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
return [ tokenlist ]
class Dict(TokenConverter):
"""
Converter to return a repetitive expression as a list, but also as a dictionary.
Each element can also be referenced using the first token in the expression as its key.
Useful for tabular report scraping when the first column can be used as a item key.
Example::
data_word = Word(alphas)
label = data_word + FollowedBy(':')
attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join))
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
# print attributes as plain groups
print(OneOrMore(attr_expr).parseString(text).dump())
# instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names
result = Dict(OneOrMore(Group(attr_expr))).parseString(text)
print(result.dump())
# access named fields as dict entries, or output as dict
print(result['shape'])
print(result.asDict())
prints::
['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap']
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
{'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'}
See more examples at L{ParseResults} of accessing fields by results name.
"""
def __init__( self, expr ):
super(Dict,self).__init__( expr )
self.saveAsList = True
def postParse( self, instring, loc, tokenlist ):
for i,tok in enumerate(tokenlist):
if len(tok) == 0:
continue
ikey = tok[0]
if isinstance(ikey,int):
ikey = _ustr(tok[0]).strip()
if len(tok)==1:
tokenlist[ikey] = _ParseResultsWithOffset("",i)
elif len(tok)==2 and not isinstance(tok[1],ParseResults):
tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i)
else:
dictvalue = tok.copy() #ParseResults(i)
del dictvalue[0]
if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()):
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i)
else:
tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i)
if self.resultsName:
return [ tokenlist ]
else:
return tokenlist
class Suppress(TokenConverter):
"""
Converter for ignoring the results of a parsed expression.
Example::
source = "a, b, c,d"
wd = Word(alphas)
wd_list1 = wd + ZeroOrMore(',' + wd)
print(wd_list1.parseString(source))
# often, delimiters that are useful during parsing are just in the
# way afterward - use Suppress to keep them out of the parsed output
wd_list2 = wd + ZeroOrMore(Suppress(',') + wd)
print(wd_list2.parseString(source))
prints::
['a', ',', 'b', ',', 'c', ',', 'd']
['a', 'b', 'c', 'd']
(See also L{delimitedList}.)
"""
def postParse( self, instring, loc, tokenlist ):
return []
def suppress( self ):
return self
class OnlyOnce(object):
"""
Wrapper for parse actions, to ensure they are only called once.
"""
def __init__(self, methodCall):
self.callable = _trim_arity(methodCall)
self.called = False
def __call__(self,s,l,t):
if not self.called:
results = self.callable(s,l,t)
self.called = True
return results
raise ParseException(s,l,"")
def reset(self):
self.called = False
def traceParseAction(f):
"""
Decorator for debugging parse actions.
When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".}
When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised.
Example::
wd = Word(alphas)
@traceParseAction
def remove_duplicate_chars(tokens):
return ''.join(sorted(set(''.join(tokens)))
wds = OneOrMore(wd).setParseAction(remove_duplicate_chars)
print(wds.parseString("slkdjs sld sldd sdlf sdljf"))
prints::
>>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {}))
<<leaving remove_duplicate_chars (ret: 'dfjkls')
['dfjkls']
"""
f = _trim_arity(f)
def z(*paArgs):
thisFunc = f.__name__
s,l,t = paArgs[-3:]
if len(paArgs)>3:
thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc
sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) )
try:
ret = f(*paArgs)
except Exception as exc:
sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) )
raise
sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) )
return ret
try:
z.__name__ = f.__name__
except AttributeError:
pass
return z
#
# global helpers
#
def delimitedList( expr, delim=",", combine=False ):
"""
Helper to define a delimited list of expressions - the delimiter defaults to ','.
By default, the list elements and delimiters can have intervening whitespace, and
comments, but this can be overridden by passing C{combine=True} in the constructor.
If C{combine} is set to C{True}, the matching tokens are returned as a single token
string, with the delimiters included; otherwise, the matching tokens are returned
as a list of tokens, with the delimiters suppressed.
Example::
delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc']
delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE']
"""
dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..."
if combine:
return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName)
else:
return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName)
def countedArray( expr, intExpr=None ):
"""
Helper to define a counted list of expressions.
This helper defines a pattern of the form::
integer expr expr expr...
where the leading integer tells how many expr expressions follow.
The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed.
If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value.
Example::
countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd']
# in this parser, the leading integer value is given in binary,
# '10' indicating that 2 values are in the array
binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2))
countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd']
"""
arrayExpr = Forward()
def countFieldParseAction(s,l,t):
n = t[0]
arrayExpr << (n and Group(And([expr]*n)) or Group(empty))
return []
if intExpr is None:
intExpr = Word(nums).setParseAction(lambda t:int(t[0]))
else:
intExpr = intExpr.copy()
intExpr.setName("arrayLen")
intExpr.addParseAction(countFieldParseAction, callDuringTry=True)
return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...')
def _flatten(L):
ret = []
for i in L:
if isinstance(i,list):
ret.extend(_flatten(i))
else:
ret.append(i)
return ret
def matchPreviousLiteral(expr):
"""
Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousLiteral(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches a
previous literal, will also match the leading C{"1:1"} in C{"1:10"}.
If this is not desired, use C{matchPreviousExpr}.
Do I{not} use with packrat parsing enabled.
"""
rep = Forward()
def copyTokenToRepeater(s,l,t):
if t:
if len(t) == 1:
rep << t[0]
else:
# flatten t tokens
tflat = _flatten(t.asList())
rep << And(Literal(tt) for tt in tflat)
else:
rep << Empty()
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def matchPreviousExpr(expr):
"""
Helper to define an expression that is indirectly defined from
the tokens matched in a previous expression, that is, it looks
for a 'repeat' of a previous expression. For example::
first = Word(nums)
second = matchPreviousExpr(first)
matchExpr = first + ":" + second
will match C{"1:1"}, but not C{"1:2"}. Because this matches by
expressions, will I{not} match the leading C{"1:1"} in C{"1:10"};
the expressions are evaluated first, and then compared, so
C{"1"} is compared with C{"10"}.
Do I{not} use with packrat parsing enabled.
"""
rep = Forward()
e2 = expr.copy()
rep <<= e2
def copyTokenToRepeater(s,l,t):
matchTokens = _flatten(t.asList())
def mustMatchTheseTokens(s,l,t):
theseTokens = _flatten(t.asList())
if theseTokens != matchTokens:
raise ParseException("",0,"")
rep.setParseAction( mustMatchTheseTokens, callDuringTry=True )
expr.addParseAction(copyTokenToRepeater, callDuringTry=True)
rep.setName('(prev) ' + _ustr(expr))
return rep
def _escapeRegexRangeChars(s):
#~ escape these chars: ^-]
for c in r"\^-]":
s = s.replace(c,_bslash+c)
s = s.replace("\n",r"\n")
s = s.replace("\t",r"\t")
return _ustr(s)
def oneOf( strs, caseless=False, useRegex=True ):
"""
Helper to quickly define a set of alternative Literals, and makes sure to do
longest-first testing when there is a conflict, regardless of the input order,
but returns a C{L{MatchFirst}} for best performance.
Parameters:
- strs - a string of space-delimited literals, or a collection of string literals
- caseless - (default=C{False}) - treat all literals as caseless
- useRegex - (default=C{True}) - as an optimization, will generate a Regex
object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or
if creating a C{Regex} raises an exception)
Example::
comp_oper = oneOf("< = > <= >= !=")
var = Word(alphas)
number = Word(nums)
term = var | number
comparison_expr = term + comp_oper + term
print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12"))
prints::
[['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
"""
if caseless:
isequal = ( lambda a,b: a.upper() == b.upper() )
masks = ( lambda a,b: b.upper().startswith(a.upper()) )
parseElementClass = CaselessLiteral
else:
isequal = ( lambda a,b: a == b )
masks = ( lambda a,b: b.startswith(a) )
parseElementClass = Literal
symbols = []
if isinstance(strs,basestring):
symbols = strs.split()
elif isinstance(strs, collections.Iterable):
symbols = list(strs)
else:
warnings.warn("Invalid argument to oneOf, expected string or iterable",
SyntaxWarning, stacklevel=2)
if not symbols:
return NoMatch()
i = 0
while i < len(symbols)-1:
cur = symbols[i]
for j,other in enumerate(symbols[i+1:]):
if ( isequal(other, cur) ):
del symbols[i+j+1]
break
elif ( masks(cur, other) ):
del symbols[i+j+1]
symbols.insert(i,other)
cur = other
break
else:
i += 1
if not caseless and useRegex:
#~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] ))
try:
if len(symbols)==len("".join(symbols)):
return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols))
else:
return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols))
except Exception:
warnings.warn("Exception creating Regex for oneOf, building MatchFirst",
SyntaxWarning, stacklevel=2)
# last resort, just use MatchFirst
return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols))
def dictOf( key, value ):
"""
Helper to easily and clearly define a dictionary by specifying the respective patterns
for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens
in the proper order. The key pattern can include delimiting markers or punctuation,
as long as they are suppressed, thereby leaving the significant key text. The value
pattern can include named results, so that the C{Dict} results can include named token
fields.
Example::
text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join))
print(OneOrMore(attr_expr).parseString(text).dump())
attr_label = label
attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)
# similar to Dict, but simpler call format
result = dictOf(attr_label, attr_value).parseString(text)
print(result.dump())
print(result['shape'])
print(result.shape) # object attribute access works too
print(result.asDict())
prints::
[['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
- color: light blue
- posn: upper left
- shape: SQUARE
- texture: burlap
SQUARE
SQUARE
{'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
"""
return Dict( ZeroOrMore( Group ( key + value ) ) )
def originalTextFor(expr, asString=True):
"""
Helper to return the original, untokenized text for a given expression. Useful to
restore the parsed fields of an HTML start tag into the raw tag text itself, or to
revert separate tokens with intervening whitespace back to the original matching
input text. By default, returns astring containing the original parsed text.
If the optional C{asString} argument is passed as C{False}, then the return value is a
C{L{ParseResults}} containing any results names that were originally matched, and a
single token containing the original matched text from the input string. So if
the expression passed to C{L{originalTextFor}} contains expressions with defined
results names, you must set C{asString} to C{False} if you want to preserve those
results name values.
Example::
src = "this is test <b> bold <i>text</i> </b> normal text "
for tag in ("b","i"):
opener,closer = makeHTMLTags(tag)
patt = originalTextFor(opener + SkipTo(closer) + closer)
print(patt.searchString(src)[0])
prints::
['<b> bold <i>text</i> </b>']
['<i>text</i>']
"""
locMarker = Empty().setParseAction(lambda s,loc,t: loc)
endlocMarker = locMarker.copy()
endlocMarker.callPreparse = False
matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
if asString:
extractText = lambda s,l,t: s[t._original_start:t._original_end]
else:
def extractText(s,l,t):
t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]]
matchExpr.setParseAction(extractText)
matchExpr.ignoreExprs = expr.ignoreExprs
return matchExpr
def ungroup(expr):
"""
Helper to undo pyparsing's default grouping of And expressions, even
if all but one are non-empty.
"""
return TokenConverter(expr).setParseAction(lambda t:t[0])
def locatedExpr(expr):
"""
Helper to decorate a returned token with its starting and ending locations in the input string.
This helper adds the following results names:
- locn_start = location where matched expression begins
- locn_end = location where matched expression ends
- value = the actual parsed results
Be careful if the input text contains C{<TAB>} characters, you may want to call
C{L{ParserElement.parseWithTabs}}
Example::
wd = Word(alphas)
for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"):
print(match)
prints::
[[0, 'ljsdf', 5]]
[[8, 'lksdjjf', 15]]
[[18, 'lkkjj', 23]]
"""
locator = Empty().setParseAction(lambda s,l,t: l)
return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end"))
# convenience constants for positional expressions
empty = Empty().setName("empty")
lineStart = LineStart().setName("lineStart")
lineEnd = LineEnd().setName("lineEnd")
stringStart = StringStart().setName("stringStart")
stringEnd = StringEnd().setName("stringEnd")
_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1])
_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16)))
_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8)))
_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(printables, excludeChars=r'\]', exact=1) | Regex(r"\w", re.UNICODE)
_charRange = Group(_singleChar + Suppress("-") + _singleChar)
_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]"
def srange(s):
r"""
Helper to easily define string ranges for use in Word construction. Borrows
syntax from regexp '[]' string range definitions::
srange("[0-9]") -> "0123456789"
srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz"
srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_"
The input string must be enclosed in []'s, and the returned string is the expanded
character set joined into a single string.
The values enclosed in the []'s may be:
- a single character
- an escaped character with a leading backslash (such as C{\-} or C{\]})
- an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character)
(C{\0x##} is also supported for backwards compatibility)
- an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character)
- a range of any of the above, separated by a dash (C{'a-z'}, etc.)
- any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.)
"""
_expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1))
try:
return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body)
except Exception:
return ""
def matchOnlyAtCol(n):
"""
Helper method for defining parse actions that require matching at a specific
column in the input text.
"""
def verifyCol(strg,locn,toks):
if col(locn,strg) != n:
raise ParseException(strg,locn,"matched token not at column %d" % n)
return verifyCol
def replaceWith(replStr):
"""
Helper method for common parse actions that simply return a literal value. Especially
useful when used with C{L{transformString<ParserElement.transformString>}()}.
Example::
num = Word(nums).setParseAction(lambda toks: int(toks[0]))
na = oneOf("N/A NA").setParseAction(replaceWith(math.nan))
term = na | num
OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234]
"""
return lambda s,l,t: [replStr]
def removeQuotes(s,l,t):
"""
Helper parse action for removing quotation marks from parsed quoted strings.
Example::
# by default, quotation marks are included in parsed results
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
# use removeQuotes to strip quotation marks from parsed results
quotedString.setParseAction(removeQuotes)
quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
"""
return t[0][1:-1]
def tokenMap(func, *args):
"""
Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional
args are passed, they are forwarded to the given function as additional arguments after
the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the
parsed data to an integer using base 16.
Example (compare the last to example in L{ParserElement.transformString}::
hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16))
hex_ints.runTests('''
00 11 22 aa FF 0a 0d 1a
''')
upperword = Word(alphas).setParseAction(tokenMap(str.upper))
OneOrMore(upperword).runTests('''
my kingdom for a horse
''')
wd = Word(alphas).setParseAction(tokenMap(str.title))
OneOrMore(wd).setParseAction(' '.join).runTests('''
now is the winter of our discontent made glorious summer by this sun of york
''')
prints::
00 11 22 aa FF 0a 0d 1a
[0, 17, 34, 170, 255, 10, 13, 26]
my kingdom for a horse
['MY', 'KINGDOM', 'FOR', 'A', 'HORSE']
now is the winter of our discontent made glorious summer by this sun of york
['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York']
"""
def pa(s,l,t):
return [func(tokn, *args) for tokn in t]
try:
func_name = getattr(func, '__name__',
getattr(func, '__class__').__name__)
except Exception:
func_name = str(func)
pa.__name__ = func_name
return pa
upcaseTokens = tokenMap(lambda t: _ustr(t).upper())
"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}"""
downcaseTokens = tokenMap(lambda t: _ustr(t).lower())
"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}"""
def _makeTags(tagStr, xml):
"""Internal helper to construct opening and closing tag expressions, given a tag name"""
if isinstance(tagStr,basestring):
resname = tagStr
tagStr = Keyword(tagStr, caseless=not xml)
else:
resname = tagStr.name
tagAttrName = Word(alphas,alphanums+"_-:")
if (xml):
tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes )
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
else:
printablesLessRAbrack = "".join(c for c in printables if c not in ">")
tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack)
openTag = Suppress("<") + tagStr("tag") + \
Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \
Optional( Suppress("=") + tagAttrValue ) ))) + \
Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">")
closeTag = Combine(_L("</") + tagStr + ">")
openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname)
closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname)
openTag.tag = resname
closeTag.tag = resname
return openTag, closeTag
def makeHTMLTags(tagStr):
"""
Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches
tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values.
Example::
text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
# makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple
a,a_end = makeHTMLTags("A")
link_expr = a + SkipTo(a_end)("link_text") + a_end
for link in link_expr.searchString(text):
# attributes in the <A> tag (like "href" shown here) are also accessible as named results
print(link.link_text, '->', link.href)
prints::
pyparsing -> http://pyparsing.wikispaces.com
"""
return _makeTags( tagStr, False )
def makeXMLTags(tagStr):
"""
Helper to construct opening and closing tag expressions for XML, given a tag name. Matches
tags only in the given upper/lower case.
Example: similar to L{makeHTMLTags}
"""
return _makeTags( tagStr, True )
def withAttribute(*args,**attrDict):
"""
Helper to create a validating parse action to be used with start tags created
with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag
with a required attribute value, to avoid false matches on common tags such as
C{<TD>} or C{<DIV>}.
Call C{withAttribute} with a series of attribute names and values. Specify the list
of filter attributes names and values as:
- keyword arguments, as in C{(align="right")}, or
- as an explicit dict with C{**} operator, when an attribute name is also a Python
reserved word, as in C{**{"class":"Customer", "align":"right"}}
- a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") )
For attribute names with a namespace prefix, you must use the second form. Attribute
names are matched insensitive to upper/lower case.
If just testing for C{class} (with or without a namespace), use C{L{withClass}}.
To verify that the attribute exists, but without specifying a value, pass
C{withAttribute.ANY_VALUE} as the value.
Example::
html = '''
<div>
Some text
<div type="grid">1 4 0 1 0</div>
<div type="graph">1,3 2,3 1,1</div>
<div>this has no type</div>
</div>
'''
div,div_end = makeHTMLTags("div")
# only match div tag having a type attribute with value "grid"
div_grid = div().setParseAction(withAttribute(type="grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
# construct a match with any div tag having a type attribute, regardless of the value
div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
if args:
attrs = args[:]
else:
attrs = attrDict.items()
attrs = [(k,v) for k,v in attrs]
def pa(s,l,tokens):
for attrName,attrValue in attrs:
if attrName not in tokens:
raise ParseException(s,l,"no matching attribute " + attrName)
if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue:
raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" %
(attrName, tokens[attrName], attrValue))
return pa
withAttribute.ANY_VALUE = object()
def withClass(classname, namespace=''):
"""
Simplified version of C{L{withAttribute}} when matching on a div class - made
difficult because C{class} is a reserved word in Python.
Example::
html = '''
<div>
Some text
<div class="grid">1 4 0 1 0</div>
<div class="graph">1,3 2,3 1,1</div>
<div>this <div> has no class</div>
</div>
'''
div,div_end = makeHTMLTags("div")
div_grid = div().setParseAction(withClass("grid"))
grid_expr = div_grid + SkipTo(div | div_end)("body")
for grid_header in grid_expr.searchString(html):
print(grid_header.body)
div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE))
div_expr = div_any_type + SkipTo(div | div_end)("body")
for div_header in div_expr.searchString(html):
print(div_header.body)
prints::
1 4 0 1 0
1 4 0 1 0
1,3 2,3 1,1
"""
classattr = "%s:class" % namespace if namespace else "class"
return withAttribute(**{classattr : classname})
opAssoc = _Constants()
opAssoc.LEFT = object()
opAssoc.RIGHT = object()
def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ):
"""
Helper method for constructing grammars of expressions made up of
operators working in a precedence hierarchy. Operators may be unary or
binary, left- or right-associative. Parse actions can also be attached
to operator expressions. The generated parser will also recognize the use
of parentheses to override operator precedences (see example below).
Note: if you define a deep operator list, you may see performance issues
when using infixNotation. See L{ParserElement.enablePackrat} for a
mechanism to potentially improve your parser performance.
Parameters:
- baseExpr - expression representing the most basic element for the nested
- opList - list of tuples, one for each operator precedence level in the
expression grammar; each tuple is of the form
(opExpr, numTerms, rightLeftAssoc, parseAction), where:
- opExpr is the pyparsing expression for the operator;
may also be a string, which will be converted to a Literal;
if numTerms is 3, opExpr is a tuple of two expressions, for the
two operators separating the 3 terms
- numTerms is the number of terms for this operator (must
be 1, 2, or 3)
- rightLeftAssoc is the indicator whether the operator is
right or left associative, using the pyparsing-defined
constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}.
- parseAction is the parse action to be associated with
expressions matching this operator expression (the
parse action tuple member may be omitted)
- lpar - expression for matching left-parentheses (default=C{Suppress('(')})
- rpar - expression for matching right-parentheses (default=C{Suppress(')')})
Example::
# simple example of four-function arithmetic with ints and variable names
integer = pyparsing_common.signed_integer
varname = pyparsing_common.identifier
arith_expr = infixNotation(integer | varname,
[
('-', 1, opAssoc.RIGHT),
(oneOf('* /'), 2, opAssoc.LEFT),
(oneOf('+ -'), 2, opAssoc.LEFT),
])
arith_expr.runTests('''
5+3*6
(5+3)*6
-2--11
''', fullDump=False)
prints::
5+3*6
[[5, '+', [3, '*', 6]]]
(5+3)*6
[[[5, '+', 3], '*', 6]]
-2--11
[[['-', 2], '-', ['-', 11]]]
"""
ret = Forward()
lastExpr = baseExpr | ( lpar + ret + rpar )
for i,operDef in enumerate(opList):
opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4]
termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr
if arity == 3:
if opExpr is None or len(opExpr) != 2:
raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions")
opExpr1, opExpr2 = opExpr
thisExpr = Forward().setName(termName)
if rightLeftAssoc == opAssoc.LEFT:
if arity == 1:
matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) )
else:
matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \
Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
elif rightLeftAssoc == opAssoc.RIGHT:
if arity == 1:
# try to avoid LR with this extra test
if not isinstance(opExpr, Optional):
opExpr = Optional(opExpr)
matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr )
elif arity == 2:
if opExpr is not None:
matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) )
else:
matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) )
elif arity == 3:
matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \
Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr )
else:
raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
else:
raise ValueError("operator must indicate right or left associativity")
if pa:
matchExpr.setParseAction( pa )
thisExpr <<= ( matchExpr.setName(termName) | lastExpr )
lastExpr = thisExpr
ret <<= lastExpr
return ret
operatorPrecedence = infixNotation
"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release."""
dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes")
sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes")
quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'|
Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes")
unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal")
def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()):
"""
Helper method for defining nested lists enclosed in opening and closing
delimiters ("(" and ")" are the default).
Parameters:
- opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression
- closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression
- content - expression for items within the nested lists (default=C{None})
- ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString})
If an expression is not provided for the content argument, the nested
expression will capture all whitespace-delimited content between delimiters
as a list of separate values.
Use the C{ignoreExpr} argument to define expressions that may contain
opening or closing characters that should not be treated as opening
or closing characters for nesting, such as quotedString or a comment
expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}.
The default is L{quotedString}, but if no expressions are to be ignored,
then pass C{None} for this argument.
Example::
data_type = oneOf("void int short long char float double")
decl_data_type = Combine(data_type + Optional(Word('*')))
ident = Word(alphas+'_', alphanums+'_')
number = pyparsing_common.number
arg = Group(decl_data_type + ident)
LPAR,RPAR = map(Suppress, "()")
code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment))
c_function = (decl_data_type("type")
+ ident("name")
+ LPAR + Optional(delimitedList(arg), [])("args") + RPAR
+ code_body("body"))
c_function.ignore(cStyleComment)
source_code = '''
int is_odd(int x) {
return (x%2);
}
int dec_to_hex(char hchar) {
if (hchar >= '0' && hchar <= '9') {
return (ord(hchar)-ord('0'));
} else {
return (10+ord(hchar)-ord('A'));
}
}
'''
for func in c_function.searchString(source_code):
print("%(name)s (%(type)s) args: %(args)s" % func)
prints::
is_odd (int) args: [['int', 'x']]
dec_to_hex (int) args: [['char', 'hchar']]
"""
if opener == closer:
raise ValueError("opening and closing strings cannot be the same")
if content is None:
if isinstance(opener,basestring) and isinstance(closer,basestring):
if len(opener) == 1 and len(closer)==1:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS
).setParseAction(lambda t:t[0].strip()))
else:
if ignoreExpr is not None:
content = (Combine(OneOrMore(~ignoreExpr +
~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) +
CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1))
).setParseAction(lambda t:t[0].strip()))
else:
raise ValueError("opening and closing arguments must be strings if no content expression is given")
ret = Forward()
if ignoreExpr is not None:
ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) )
else:
ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) )
ret.setName('nested %s%s expression' % (opener,closer))
return ret
def indentedBlock(blockStatementExpr, indentStack, indent=True):
"""
Helper method for defining space-delimited indentation blocks, such as
those used to define block statements in Python source code.
Parameters:
- blockStatementExpr - expression defining syntax of statement that
is repeated within the indented block
- indentStack - list created by caller to manage indentation stack
(multiple statementWithIndentedBlock expressions within a single grammar
should share a common indentStack)
- indent - boolean indicating whether block must be indented beyond the
the current level; set to False for block of left-most statements
(default=C{True})
A valid block must contain at least one C{blockStatement}.
Example::
data = '''
def A(z):
A1
B = 100
G = A2
A2
A3
B
def BB(a,b,c):
BB1
def BBA():
bba1
bba2
bba3
C
D
def spam(x,y):
def eggs(z):
pass
'''
indentStack = [1]
stmt = Forward()
identifier = Word(alphas, alphanums)
funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":")
func_body = indentedBlock(stmt, indentStack)
funcDef = Group( funcDecl + func_body )
rvalue = Forward()
funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")")
rvalue << (funcCall | identifier | Word(nums))
assignment = Group(identifier + "=" + rvalue)
stmt << ( funcDef | assignment | identifier )
module_body = OneOrMore(stmt)
parseTree = module_body.parseString(data)
parseTree.pprint()
prints::
[['def',
'A',
['(', 'z', ')'],
':',
[['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
'B',
['def',
'BB',
['(', 'a', 'b', 'c', ')'],
':',
[['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
'C',
'D',
['def',
'spam',
['(', 'x', 'y', ')'],
':',
[[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
"""
def checkPeerIndent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if curCol != indentStack[-1]:
if curCol > indentStack[-1]:
raise ParseFatalException(s,l,"illegal nesting")
raise ParseException(s,l,"not a peer entry")
def checkSubIndent(s,l,t):
curCol = col(l,s)
if curCol > indentStack[-1]:
indentStack.append( curCol )
else:
raise ParseException(s,l,"not a subentry")
def checkUnindent(s,l,t):
if l >= len(s): return
curCol = col(l,s)
if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]):
raise ParseException(s,l,"not an unindent")
indentStack.pop()
NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress())
INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT')
PEER = Empty().setParseAction(checkPeerIndent).setName('')
UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT')
if indent:
smExpr = Group( Optional(NL) +
#~ FollowedBy(blockStatementExpr) +
INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT)
else:
smExpr = Group( Optional(NL) +
(OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) )
blockStatementExpr.ignore(_bslash + LineEnd())
return smExpr.setName('indented block')
alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]")
punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]")
anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag'))
_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\''))
commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity")
def replaceHTMLEntity(t):
"""Helper parser action to replace common HTML entities with their special characters"""
return _htmlEntityMap.get(t.entity)
# it's easy to get these comment structures wrong - they're very common, so may as well make them available
cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment")
"Comment of the form C{/* ... */}"
htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment")
"Comment of the form C{<!-- ... -->}"
restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line")
dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment")
"Comment of the form C{// ... (to end of line)}"
cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment")
"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}"
javaStyleComment = cppStyleComment
"Same as C{L{cppStyleComment}}"
pythonStyleComment = Regex(r"#.*").setName("Python style comment")
"Comment of the form C{# ... (to end of line)}"
_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') +
Optional( Word(" \t") +
~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem")
commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList")
"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas.
This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}."""
# some other useful expressions - using lower-case class name since we are really using this as a namespace
class pyparsing_common:
"""
Here are some common low-level expressions that may be useful in jump-starting parser development:
- numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>})
- common L{programming identifiers<identifier>}
- network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>})
- ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>}
- L{UUID<uuid>}
- L{comma-separated list<comma_separated_list>}
Parse actions:
- C{L{convertToInteger}}
- C{L{convertToFloat}}
- C{L{convertToDate}}
- C{L{convertToDatetime}}
- C{L{stripHTMLTags}}
- C{L{upcaseTokens}}
- C{L{downcaseTokens}}
Example::
pyparsing_common.number.runTests('''
# any int or real number, returned as the appropriate type
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.fnumber.runTests('''
# any int or real number, returned as float
100
-100
+100
3.14159
6.02e23
1e-12
''')
pyparsing_common.hex_integer.runTests('''
# hex numbers
100
FF
''')
pyparsing_common.fraction.runTests('''
# fractions
1/2
-3/4
''')
pyparsing_common.mixed_integer.runTests('''
# mixed fractions
1
1/2
-3/4
1-3/4
''')
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests('''
# uuid
12345678-1234-5678-1234-567812345678
''')
prints::
# any int or real number, returned as the appropriate type
100
[100]
-100
[-100]
+100
[100]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# any int or real number, returned as float
100
[100.0]
-100
[-100.0]
+100
[100.0]
3.14159
[3.14159]
6.02e23
[6.02e+23]
1e-12
[1e-12]
# hex numbers
100
[256]
FF
[255]
# fractions
1/2
[0.5]
-3/4
[-0.75]
# mixed fractions
1
[1]
1/2
[0.5]
-3/4
[-0.75]
1-3/4
[1.75]
# uuid
12345678-1234-5678-1234-567812345678
[UUID('12345678-1234-5678-1234-567812345678')]
"""
convertToInteger = tokenMap(int)
"""
Parse action for converting parsed integers to Python int
"""
convertToFloat = tokenMap(float)
"""
Parse action for converting parsed numbers to Python float
"""
integer = Word(nums).setName("integer").setParseAction(convertToInteger)
"""expression that parses an unsigned integer, returns an int"""
hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16))
"""expression that parses a hexadecimal integer, returns an int"""
signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger)
"""expression that parses an integer with optional leading sign, returns an int"""
fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction")
"""fractional expression of an integer divided by an integer, returns a float"""
fraction.addParseAction(lambda t: t[0]/t[-1])
mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction")
"""mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
mixed_integer.addParseAction(sum)
real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat)
"""expression that parses a floating point number and returns a float"""
sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat)
"""expression that parses a floating point number with optional scientific notation and returns a float"""
# streamlining this expression makes the docs nicer-looking
number = (sci_real | real | signed_integer).streamline()
"""any numeric expression, returns the corresponding Python type"""
fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat)
"""any int or real number, returned as float"""
identifier = Word(alphas+'_', alphanums+'_').setName("identifier")
"""typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address")
"IPv4 address (C{0.0.0.0 - 255.255.255.255})"
_ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer")
_full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address")
_short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address")
_short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8)
_mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address")
ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address")
"IPv6 address (long, short, or mixed form)"
mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address")
"MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
@staticmethod
def convertToDate(fmt="%Y-%m-%d"):
"""
Helper to create a parse action for converting parsed date string to Python datetime.date
Params -
- fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"})
Example::
date_expr = pyparsing_common.iso8601_date.copy()
date_expr.setParseAction(pyparsing_common.convertToDate())
print(date_expr.parseString("1999-12-31"))
prints::
[datetime.date(1999, 12, 31)]
"""
def cvt_fn(s,l,t):
try:
return datetime.strptime(t[0], fmt).date()
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
@staticmethod
def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"):
"""
Helper to create a parse action for converting parsed datetime string to Python datetime.datetime
Params -
- fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"})
Example::
dt_expr = pyparsing_common.iso8601_datetime.copy()
dt_expr.setParseAction(pyparsing_common.convertToDatetime())
print(dt_expr.parseString("1999-12-31T23:59:59.999"))
prints::
[datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
"""
def cvt_fn(s,l,t):
try:
return datetime.strptime(t[0], fmt)
except ValueError as ve:
raise ParseException(s, l, str(ve))
return cvt_fn
iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date")
"ISO8601 date (C{yyyy-mm-dd})"
iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime")
"ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}"
uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID")
"UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})"
_html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress()
@staticmethod
def stripHTMLTags(s, l, tokens):
"""
Parse action to remove HTML tags from web page HTML source
Example::
# strip HTML links from normal text
text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>'
td,td_end = makeHTMLTags("TD")
table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end
print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page'
"""
return pyparsing_common._html_stripper.transformString(tokens[0])
_commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',')
+ Optional( White(" \t") ) ) ).streamline().setName("commaItem")
comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list")
"""Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper()))
"""Parse action to convert tokens to upper case."""
downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower()))
"""Parse action to convert tokens to lower case."""
if __name__ == "__main__":
selectToken = CaselessLiteral("select")
fromToken = CaselessLiteral("from")
ident = Word(alphas, alphanums + "_$")
columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
columnNameList = Group(delimitedList(columnName)).setName("columns")
columnSpec = ('*' | columnNameList)
tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens)
tableNameList = Group(delimitedList(tableName)).setName("tables")
simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables")
# demo runTests method, including embedded comments in test string
simpleSQL.runTests("""
# '*' as column list and dotted table name
select * from SYS.XYZZY
# caseless match on "SELECT", and casts back to "select"
SELECT * from XYZZY, ABC
# list of column names, and mixed case SELECT keyword
Select AA,BB,CC from Sys.dual
# multiple tables
Select A, B, C from Sys.dual, Table2
# invalid SELECT keyword - should fail
Xelect A, B, C from Sys.dual
# incomplete command - should fail
Select
# invalid column name - should fail
Select ^^^ frox Sys.dual
""")
pyparsing_common.number.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
# any int or real number, returned as float
pyparsing_common.fnumber.runTests("""
100
-100
+100
3.14159
6.02e23
1e-12
""")
pyparsing_common.hex_integer.runTests("""
100
FF
""")
import uuid
pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID))
pyparsing_common.uuid.runTests("""
12345678-1234-5678-1234-567812345678
""")
| gpl-2.0 |
ryfeus/lambda-packs | Keras_tensorflow_nightly/source2.7/tensorflow/contrib/layers/python/layers/embedding_ops.py | 2 | 41512 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Embedding functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
__all__ = [
"safe_embedding_lookup_sparse", "scattered_embedding_lookup",
"scattered_embedding_lookup_sparse", "embedding_lookup_unique",
"embedding_lookup_sparse_with_distributed_aggregation"
]
def safe_embedding_lookup_sparse(embedding_weights,
sparse_ids,
sparse_weights=None,
combiner=None,
default_id=None,
name=None,
partition_strategy="div",
max_norm=None):
"""Lookup embedding results, accounting for invalid IDs and empty features.
The partitioned embedding in `embedding_weights` must all be the same shape
except for the first dimension. The first dimension is allowed to vary as the
vocabulary size is not necessarily a multiple of `P`. `embedding_weights`
may be a `PartitionedVariable` as returned by using `tf.get_variable()` with a
partitioner.
Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
with non-positive weight. For an entry with no features, the embedding vector
for `default_id` is returned, or the 0-vector if `default_id` is not supplied.
The ids and weights may be multi-dimensional. Embeddings are always aggregated
along the last dimension.
Args:
embedding_weights: A list of `P` float tensors or values representing
partitioned embedding tensors. Alternatively, a `PartitionedVariable`,
created by partitioning along dimension 0. The total unpartitioned
shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the
vocab size and `e_1, ..., e_m` are the embedding dimensions.
sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
ids. `d_0` is typically batch size.
sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
float weights corresponding to `sparse_ids`, or `None` if all weights
are be assumed to be 1.0.
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
the default.
default_id: The id to use for an entry with no features.
name: A name for this operation (optional).
partition_strategy: A string specifying the partitioning strategy.
Currently `"div"` and `"mod"` are supported. Default is `"div"`.
max_norm: If not None, all embeddings are l2-normalized to max_norm before
combining.
Returns:
Dense tensor of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.
Raises:
ValueError: if `embedding_weights` is empty.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if embedding_weights is None:
raise ValueError("Missing embedding_weights %s." % embedding_weights)
if isinstance(embedding_weights, variables.PartitionedVariable):
embedding_weights = list(embedding_weights) # get underlying Variables.
if not isinstance(embedding_weights, list):
embedding_weights = [embedding_weights]
if len(embedding_weights) < 1:
raise ValueError("Missing embedding_weights %s." % embedding_weights)
dtype = sparse_weights.dtype if sparse_weights is not None else None
if isinstance(embedding_weights, variables.PartitionedVariable):
embedding_weights = list(embedding_weights)
embedding_weights = [
ops.convert_to_tensor(w, dtype=dtype) for w in embedding_weights
]
contrib_tensor_util.assert_same_float_dtype(embedding_weights +
[sparse_weights])
with ops.name_scope(name, "embedding_lookup",
embedding_weights + [sparse_ids,
sparse_weights]) as scope:
# Reshape higher-rank sparse ids and weights to linear segment ids.
original_shape = sparse_ids.dense_shape
original_rank_dim = sparse_ids.dense_shape.get_shape()[0]
original_rank = (
array_ops.size(original_shape)
if original_rank_dim.value is None
else original_rank_dim.value)
sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [
math_ops.reduce_prod(
array_ops.slice(original_shape, [0], [original_rank - 1])),
array_ops.gather(original_shape, original_rank - 1)])
if sparse_weights is not None:
sparse_weights = sparse_tensor.SparseTensor(
sparse_ids.indices,
sparse_weights.values, sparse_ids.dense_shape)
# Prune invalid ids and weights.
sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights)
# Fill in dummy values for empty features, if necessary.
sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(sparse_ids,
default_id or
0)
if sparse_weights is not None:
sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0)
result = embedding_ops.embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights,
combiner=combiner,
partition_strategy=partition_strategy,
name=None if default_id is None else scope,
max_norm=max_norm)
if default_id is None:
# Broadcast is_row_empty to the same shape as embedding_lookup_result,
# for use in Select.
is_row_empty = array_ops.tile(
array_ops.reshape(is_row_empty, [-1, 1]),
array_ops.stack([1, array_ops.shape(result)[1]]))
result = array_ops.where(is_row_empty,
array_ops.zeros_like(result),
result,
name=scope)
# Reshape back from linear ids back into higher-dimensional dense result.
final_result = array_ops.reshape(
result,
array_ops.concat([
array_ops.slice(
math_ops.cast(original_shape, dtypes.int32), [0],
[original_rank - 1]),
array_ops.slice(array_ops.shape(result), [1], [-1])
], 0))
final_result.set_shape(tensor_shape.unknown_shape(
(original_rank_dim - 1).value).concatenate(result.get_shape()[1:]))
return final_result
def _prune_invalid_ids(sparse_ids, sparse_weights):
"""Prune invalid IDs (< 0) from the input ids and weights."""
is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
if sparse_weights is not None:
is_id_valid = math_ops.logical_and(
is_id_valid, math_ops.greater(sparse_weights.values, 0))
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
if sparse_weights is not None:
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
return sparse_ids, sparse_weights
def scattered_embedding_lookup(params,
values,
dimension,
name=None,
hash_key=None):
"""Looks up embeddings using parameter hashing for each value in `values`.
The i-th embedding component of a value v in `values` is found by retrieving
the weight whose index is a fingerprint of the pair (v,i).
The concept is explored as "feature hashing" for model compression in this
paper: http://arxiv.org/pdf/1504.04788.pdf
Feature hashing has the pleasant effect of allowing us to compute an embedding
without needing a pre-determined vocabulary, relieving some amount of process
complexity. It also allows for us to maintain embeddings for possibly
trillions of features with a fixed amount of memory.
Note that this is superior to out-of-vocabulary shared "hash buckets" in that
the embedding is extremely likely to be unique for each token as opposed to
being shared across probably-colliding tokens. The price is that we must
compute a hash once for each scalar in the token's embedding as opposed to
once per token.
If `params` is a list, it represents a partition of the embedding parameters.
Each tensor in the list should have the same length, except for the first ones
which may have an additional element. For instance 10 parameters can be
partitioned in 4 tensors with length `[3, 3, 2, 2]`.
Args:
params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`.
Each tensor must be of rank 1 with fully-defined shape.
values: `Tensor` of values to be embedded with shape `[d0, ..., dn]`.
dimension: Embedding dimension.
name: An optional name for this op.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
Returns:
A `Tensor` with shape `[d0, ..., dn, dimension]`.
Raises:
ValueError: if dimension is not positive or the partition size is invalid.
"""
if dimension is None:
raise ValueError("You must specify dimension.")
return _sampled_scattered_embedding_lookup(
params, values, dimension=dimension, sampled_candidates=None,
hash_key=hash_key, name=name)
def _sampled_scattered_embedding_lookup(
params, values, dimension=None, sampled_candidates=None, hash_key=None,
name=None):
"""Looks up embeddings using parameter hashing for each value in `values`.
This method looks up selected embedding dimensions if `sampled_candidates` is
given, otherwise looks up all dimensions.
The i-th embedding component of a value v in `values` is found by retrieving
the weight whose index is a fingerprint of the pair (v,i).
The concept is explored as "feature hashing" for model compression in this
paper: http://arxiv.org/pdf/1504.04788.pdf
Feature hashing has the pleasant effect of allowing us to compute an embedding
without needing a pre-determined vocabulary, relieving some amount of process
complexity. It also allows for us to maintain embeddings for possibly
trillions of features with a fixed amount of memory.
Note that this is superior to out-of-vocabulary shared "hash buckets" in that
the embedding is extremely likely to be unique for each token as opposed to
being shared across probably-colliding tokens. The price is that we must
compute a hash once for each scalar in the token's embedding as opposed to
once per token.
If `params` is a list, it represents a partition of the embedding parameters.
Each tensor in the list should have the same length, except for the first ones
which may have an additional element. For instance 10 parameters can be
partitioned in 4 tensors with length `[3, 3, 2, 2]`.
Args:
params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`.
Each tensor must be of rank 1 with fully-defined shape.
values: `Tensor` of values to be embedded with shape `[d0, ..., dn]`.
dimension: Embedding dimension. The user must specify either `dimension` or
`sampled_candidates`.
sampled_candidates: An optional `Tensor` of slice indices to keep along the
final dimension with shape `[d0, ..., dn, N]`. If given, `dimension` is
ignored. If `None`, looks up all candidates.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
name: An optional name for this op.
Returns:
A `Tensor` with shape `[d0, ..., dn, dimension]`.
If `sampled_candidates` is given, the output shape is `[d0, ..., dn, N]`
Raises:
ValueError: if dimension is not positive or the partition size is invalid.
"""
if isinstance(params, variables.PartitionedVariable):
params = list(params)
if not isinstance(params, list):
params = [params]
with ops.name_scope(name, "scattered_embedding_lookup",
params + [dimension, values]):
# Flatten the values
values_shape = array_ops.shape(values)
values = array_ops.reshape(values, [-1, 1])
if sampled_candidates is None:
if dimension is None:
raise ValueError(
"You must specify either dimension or sampled_candidates.")
if dimension <= 0:
raise ValueError("Dimension must be >0. Given is %d" % dimension)
sampled_candidates = array_ops.tile(array_ops.expand_dims(
math_ops.range(0, dimension), 0), array_ops.shape(values))
else:
dimension = array_ops.shape(sampled_candidates)[
math_ops.subtract(array_ops.rank(sampled_candidates), 1)]
sampled_candidates_shape = array_ops.shape(sampled_candidates)
dimension_tensor = array_ops.reshape(dimension, shape=[1,])
expected_shape = array_ops.concat([values_shape, dimension_tensor], 0)
with ops.control_dependencies([control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(sampled_candidates_shape,
expected_shape)),
["The shape of sampled_candidates: ", sampled_candidates_shape,
" does not match the shape of values: ", values_shape])]):
# Flatten sampled_candidates, same way as values are flattened.
sampled_candidates = array_ops.reshape(sampled_candidates,
[-1, dimension])
num_partitions = len(params)
partition_sizes = []
for p in range(num_partitions):
shape = params[p].get_shape()
shape.assert_has_rank(1)
shape.assert_is_fully_defined()
partition_sizes.append(shape[0].value)
num_params = sum(partition_sizes) # Total number of parameters.
# Assert the size of each partition.
for p in range(num_partitions):
expected_size = (num_params - p - 1) // num_partitions + 1
if partition_sizes[p] != expected_size:
raise ValueError("Tensor %d in params has size %d, expected %d." %
(p, partition_sizes[p], expected_size))
# With two values v1 and v2 and 3 dimensions, we will cross
# [[0, 1, 2], [0, 1, 2]] with [[v1], [v2]].
tensors_to_cross = [sampled_candidates, values]
ids = sparse_feature_cross_op.sparse_feature_cross(
tensors_to_cross, hashed_output=True, num_buckets=num_params,
hash_key=hash_key)
ids = sparse_ops.sparse_tensor_to_dense(ids)
# No need to validate the indices since we have checked the params
# dimensions and we know the largest id.
result = embedding_ops.embedding_lookup(
params, ids, partition_strategy="div")
return array_ops.reshape(result,
array_ops.concat([values_shape, [dimension]], 0))
def scattered_embedding_lookup_sparse(params,
sparse_values,
dimension,
combiner=None,
default_value=None,
name=None,
hash_key=None):
"""Looks up embeddings of a sparse feature using parameter hashing.
See `tf.contrib.layers.scattered_embedding_lookup` for embedding with hashing.
Args:
params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`.
Each tensor must be of rank 1 with fully-defined shape.
sparse_values: A 2-D `SparseTensor` containing the values to be embedded.
Some rows may be empty.
dimension: Embedding dimension
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
the default.
default_value: The value to use for an entry with no features.
name: An optional name for this op.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
Returns:
Dense tensor with shape [N, dimension] with N the number of rows in
sparse_values.
Raises:
TypeError: If sparse_values is not a SparseTensor.
ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if isinstance(params, variables.PartitionedVariable):
params = list(params)
if not isinstance(params, list):
params = [params]
if not isinstance(sparse_values, sparse_tensor.SparseTensor):
raise TypeError("sparse_values must be SparseTensor")
with ops.name_scope(name, "scattered_embedding_lookup_sparse",
params + [sparse_values]) as scope:
# Fill in the empty rows.
if default_value is None:
# Random default values to reduce the risk of collision.
if sparse_values.dtype == dtypes.string:
default_value = "6ZxWzWOHxZ"
else:
default_value = 1288896567
sparse_values, _ = sparse_ops.sparse_fill_empty_rows(
sparse_values, default_value)
segment_ids = sparse_values.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
values = sparse_values.values
values, idx = array_ops.unique(values)
embeddings = scattered_embedding_lookup(
params, values, dimension, hash_key=hash_key)
if combiner == "sum":
embeddings = math_ops.sparse_segment_sum(embeddings, idx, segment_ids,
name=scope)
elif combiner == "mean":
embeddings = math_ops.sparse_segment_mean(embeddings, idx, segment_ids,
name=scope)
elif combiner == "sqrtn":
embeddings = math_ops.sparse_segment_sqrt_n(embeddings, idx, segment_ids,
name=scope)
else:
raise ValueError("Combiner must be one of 'mean', 'sqrtn' or 'sum'.")
return embeddings
def embedding_lookup_unique(params, ids, name=None):
"""Version of embedding_lookup that avoids duplicate lookups.
This can save communication in the case of repeated ids.
Same interface as embedding_lookup. Except it supports multi-dimensional `ids`
which allows to not reshape input/output to fit gather.
Args:
params: A list of tensors with the same shape and type, or a
`PartitionedVariable`. Shape `[index, d1, d2, ...]`.
ids: A one-dimensional `Tensor` with type `int32` or `int64` containing
the ids to be looked up in `params`. Shape `[ids1, ids2, ...]`.
name: A name for this operation (optional).
Returns:
A `Tensor` with the same type as the tensors in `params` and dimension of
`[ids1, ids2, d1, d2, ...]`.
Raises:
ValueError: If `params` is empty.
"""
with ops.name_scope(name, "EmbeddingLookupUnique", [params, ids]):
ids = ops.convert_to_tensor(ids)
shape = array_ops.shape(ids)
ids_flat = array_ops.reshape(
ids, math_ops.reduce_prod(shape, keepdims=True))
unique_ids, idx = array_ops.unique(ids_flat)
unique_embeddings = embedding_ops.embedding_lookup(params, unique_ids)
embeds_flat = array_ops.gather(unique_embeddings, idx)
embed_shape = array_ops.concat(
[shape, array_ops.shape(unique_embeddings)[1:]], 0)
embeds = array_ops.reshape(embeds_flat, embed_shape)
embeds.set_shape(ids.get_shape().concatenate(
unique_embeddings.get_shape()[1:]))
return embeds
def _sampled_scattered_embedding_lookup_sparse(params,
sp_values,
dimension=None,
sampled_candidates=None,
hash_key=None,
with_sign_hash=False,
name=None):
"""Looks up embeddings using parameter hashing for sparse values.
This method looks up selected embedding dimensions if `sampled_candidates` is
given, otherwise looks up all dimensions.
The i-th embedding component of a value v in `values` is found by retrieving
the weight whose index is a fingerprint of the pair (v,i).
The concept is explored as "feature hashing" for model compression in this
paper: http://arxiv.org/pdf/1504.04788.pdf
This is logically equivalent to:
* Transforming `sp_values` (which has shape `[d0, d1]`) into a one-hot
`Tensor` of shape `[d0, N]`.
* Multiplying with a `Tensor` `h` of shape `[N, dimension]`, where
`h(i, j) = params[hash(i, j)]`.
Args:
params: A float `Tensor` with rank 1 and fully-defined shape.
sp_values: A 2D `SparseTensor` to be embedded with shape `[d0, d1]`.
dimension: An int `Tensor` of the final dimension. The user needs to provide
either `dimension` or `sampled_candidates`.
sampled_candidates: An optional `Tensor` of column indices to keep along
the final dimension with shape `[d0, N]`. If given, `dimension` is
ignored. If `None`, looks up all candidates.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
with_sign_hash: A `bool` indicating whether `h(i, j)` should be multiplied
by `+1` or `-1`, where the value selected is determined by hashing
`(i, j)`. This is often necessary to remove bias resulting from hash
collisions.
name: An optional name for this op.
Returns:
A `Tensor` of shape `[d0, dimension]`.
If `sampled_candidates` is given, the output shape is `[d0, N]`.
Raises:
TypeError: If sp_values is not `SparseTensor`.
ValueError: If both `dimension` and `sampled_candidates` are `None`.
"""
if not isinstance(sp_values, sparse_tensor.SparseTensor):
raise TypeError("sp_values must be SparseTensor")
with ops.name_scope(
name=name,
default_name="sampled_scattered_embedding_lookup_sparse",
values=[sp_values, params, dimension, sampled_candidates]) as name_scope:
segment_ids = sp_values.indices[:, 0]
if sampled_candidates is not None:
# Tile sampled_candidates so there is one line corresponding to each
# element in sp_values.values
sampled_candidates = array_ops.gather(sampled_candidates, segment_ids)
embeddings = _sampled_scattered_embedding_lookup(
params, sp_values.values, dimension=dimension,
sampled_candidates=sampled_candidates,
hash_key=hash_key, name="values_lookup")
if with_sign_hash:
signs = _sampled_scattered_embedding_lookup(
array_ops.constant([-1., 1.]), sp_values.values, dimension=dimension,
sampled_candidates=sampled_candidates, hash_key=hash_key,
name="signs_lookup")
embeddings = math_ops.multiply(signs, embeddings, name="signs_hash")
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
num_segments = array_ops.shape(sp_values)[0]
return math_ops.unsorted_segment_sum(embeddings, segment_ids,
num_segments=num_segments,
name=name_scope)
def embedding_lookup_sparse_with_distributed_aggregation(
params,
sp_ids,
sp_weights,
partition_strategy="mod",
name=None,
combiner=None,
max_norm=None):
"""Computes embeddings for the given ids and weights.
Embeddings belonging to same param are aggregated on that device first. This
op is intended to decrease data transmission and improve parallelism. See
`tf.nn.embedding_lookup_sparse` for the functionality and example of this op.
Args:
params: A single tensor representing the complete embedding tensor,
or a list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for the given `partition_strategy`.
sp_ids: N x M SparseTensor of int64 ids (typically from FeatureValueToId),
where N is typically batch size and M is arbitrary.
sp_weights: either a SparseTensor of float / double weights, or None to
indicate all weights should be taken to be 1. If specified, sp_weights
must have exactly the same shape and indices as sp_ids.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: Optional name for the op.
combiner: A string specifying the reduction op. Currently "mean", "sqrtn"
and "sum" are supported.
"sum" computes the weighted sum of the embedding results for each row.
"mean" is the weighted sum divided by the total weight.
"sqrtn" is the weighted sum divided by the square root of the sum of the
squares of the weights.
max_norm: If not None, each embedding is normalized to have l2 norm equal
to max_norm before combining.
Returns:
A dense tensor representing the combined embeddings for the
sparse ids. For each row in the dense tensor represented by sp_ids, the op
looks up the embeddings for all ids in that row, multiplies them by the
corresponding weight, and combines these embeddings as specified.
Raises:
TypeError: If sp_ids is not a SparseTensor, or if sp_weights is neither
None nor SparseTensor.
ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
if not isinstance(sp_ids, sparse_tensor.SparseTensor):
raise TypeError("sp_ids must be SparseTensor")
ignore_weights = sp_weights is None
if not ignore_weights:
if not isinstance(sp_weights, sparse_tensor.SparseTensor):
raise TypeError("sp_weights must be either None or SparseTensor")
sp_ids.values.get_shape().assert_is_compatible_with(
sp_weights.values.get_shape())
sp_ids.indices.get_shape().assert_is_compatible_with(
sp_weights.indices.get_shape())
sp_ids.dense_shape.get_shape().assert_is_compatible_with(
sp_weights.dense_shape.get_shape())
# TODO(yleon): Add enhanced node assertions to verify that sp_ids and
# sp_weights have equal indices and shapes.
with ops.name_scope(name, "embedding_lookup_sparse",
params + [sp_ids]) as name:
segment_ids = sp_ids.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
ids = sp_ids.values
if ignore_weights:
ids, idx = array_ops.unique(ids)
else:
idx = None
weights = None if ignore_weights else sp_weights.values
embeddings = _embedding_lookup_with_distributed_aggregation(
params,
ids,
partition_strategy=partition_strategy,
max_norm=max_norm,
weights=weights,
idx=idx,
segment_ids=segment_ids)
# Set weights to all one if ignore weights.
if ignore_weights:
weights = array_ops.fill([array_ops.shape(segment_ids)[0]], 1)
if weights.dtype != embeddings.dtype:
weights = math_ops.cast(weights, embeddings.dtype)
# Reshape weights.
ones = array_ops.fill(
array_ops.expand_dims(array_ops.rank(embeddings) - 1, 0), 1)
bcast_weights_shape = array_ops.concat([array_ops.shape(weights), ones], 0)
orig_weights_shape = weights.get_shape()
weights = array_ops.reshape(weights, bcast_weights_shape)
if embeddings.get_shape().ndims is not None:
weights.set_shape(
orig_weights_shape.concatenate(
[1 for _ in range(embeddings.get_shape().ndims - 1)]))
if combiner == "mean":
weight_sum = math_ops.segment_sum(weights, segment_ids)
embeddings = math_ops.div(embeddings, weight_sum)
elif combiner == "sqrtn":
weights_squared = math_ops.pow(weights, 2)
weight_sum = math_ops.segment_sum(weights_squared, segment_ids)
weight_sum_sqrt = math_ops.sqrt(weight_sum)
embeddings = math_ops.div(embeddings, weight_sum_sqrt)
elif combiner != "sum":
assert False, "Unrecognized combiner"
return embeddings
def _do_gather(params, ids, name=None):
"""Deals with doing gather differently for resource variables."""
if isinstance(params, resource_variable_ops.ResourceVariable):
return params.sparse_read(ids, name=name)
return array_ops.gather(params, ids, name=name)
def _embedding_lookup_with_distributed_aggregation(params,
ids,
partition_strategy="mod",
name=None,
max_norm=None,
weights=None,
idx=None,
segment_ids=None):
"""Lookup helper for embedding_lookup_sparse_with_distributed_aggregation."""
if params is None or params == []: # pylint: disable=g-explicit-bool-comparison
raise ValueError("Need at least one param")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
def maybe_normalize(x):
if max_norm is not None:
if x.get_shape().ndims is not None:
ndims = x.get_shape().ndims
else:
ndims = array_ops.size(array_ops.shape(x))
return clip_ops.clip_by_norm(x, max_norm, axes=list(range(1, ndims)))
return x
with ops.name_scope(name, "embedding_lookup_with_distributed_aggregation",
params + [ids]) as name:
np = len(params) # Number of partitions
# Preserve the resource variable status to avoid accidental dense reads.
if not any(
isinstance(p, resource_variable_ops.ResourceVariable) for p in params):
params = ops.convert_n_to_tensor_or_indexed_slices(params, name="params")
if np == 1:
with ops.colocate_with(params[0]):
ret = maybe_normalize(_do_gather(params[0], ids))
ignore_weights = weights is None
if not ignore_weights:
if weights.dtype != ret.dtype:
weights = math_ops.cast(weights, ret.dtype)
# Reshape to allow broadcast
ones = array_ops.fill(
array_ops.expand_dims(array_ops.rank(ret) - 1, 0), 1)
bcast_weights_shape = array_ops.concat(
[array_ops.shape(weights), ones], 0)
orig_weights_shape = weights.get_shape()
weights = array_ops.reshape(weights, bcast_weights_shape)
# Set weights shape after reshape
if ret.get_shape().ndims is not None:
weights.set_shape(
orig_weights_shape.concatenate(
[1 for _ in range(ret.get_shape().ndims - 1)]))
ret *= weights
return math_ops.segment_sum(ret, segment_ids, name=name)
else:
return math_ops.sparse_segment_sum(ret, idx, segment_ids, name=name)
else:
ids = ops.convert_to_tensor(ids, name="ids")
flat_ids = array_ops.reshape(ids, [-1])
original_indices = math_ops.range(array_ops.size(flat_ids))
# Create p_assignments and set new_ids depending on the strategy.
if partition_strategy == "mod":
p_assignments = flat_ids % np
new_ids = flat_ids // np
elif partition_strategy == "div":
# Compute num_total_ids as the sum of dim-0 of params, then assign to
# partitions based on a constant number of ids per partition. Optimize
# if we already know the full shape statically.
dim_0_size = params[0].get_shape()[0]
for p in xrange(1, np):
dim_0_size += params[p].get_shape()[0]
if dim_0_size.value:
num_total_ids = constant_op.constant(dim_0_size.value, flat_ids.dtype)
else:
dim_0_sizes = []
for p in xrange(np):
if params[p].get_shape()[0].value is not None:
dim_0_sizes.append(params[p].get_shape()[0].value)
else:
with ops.colocate_with(params[p]):
dim_0_sizes.append(array_ops.shape(params[p])[0])
num_total_ids = math_ops.reduce_sum(
math_ops.cast(array_ops.stack(dim_0_sizes), flat_ids.dtype))
ids_per_partition = num_total_ids // np
extras = num_total_ids % np
p_assignments = math_ops.maximum(flat_ids // (ids_per_partition + 1), (
flat_ids - extras) // ids_per_partition)
# Emulate a conditional using a boolean indicator tensor
is_in_first_extras_partitions = math_ops.cast(p_assignments < extras,
flat_ids.dtype)
new_ids = (is_in_first_extras_partitions * (flat_ids %
(ids_per_partition + 1)) +
(1 - is_in_first_extras_partitions) * (
(flat_ids - extras) % ids_per_partition))
else:
raise ValueError("Unrecognized partition strategy: " +
partition_strategy)
# Cast partition assignments to int32 for use in dynamic_partition.
# There really should not be more than 2^32 partitions.
p_assignments = math_ops.cast(p_assignments, dtypes.int32)
# Partition list of ids based on assignments into np separate lists
gather_ids = data_flow_ops.dynamic_partition(new_ids, p_assignments, np)
# Similarly, partition the original indices.
pindices = data_flow_ops.dynamic_partition(original_indices,
p_assignments, np)
# Do np separate lookups, finding embeddings for plist[p] in params[p]
partitioned_result = []
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result.append(_do_gather(params[p], gather_ids[p]))
ignore_weights = weights is None
if not ignore_weights:
# Partition weights according to pindices.
partitioned_weight = []
for p in xrange(np):
partitioned_weight.append(array_ops.gather(weights, pindices[p]))
# Reshape each partition result.
element_shape = params[0].get_shape()[1:]
for p in params[1:]:
element_shape = element_shape.merge_with(p.get_shape()[1:])
if element_shape.is_fully_defined():
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result[p] = array_ops.reshape(
partitioned_result[p],
array_ops.concat([array_ops.shape(pindices[p]), element_shape],
0))
else:
with ops.colocate_with(params[0]):
params_shape = array_ops.shape(params[0])
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result[p] = array_ops.reshape(
partitioned_result[p],
array_ops.concat([
array_ops.shape(pindices[p]), array_ops.slice(
params_shape, [1], [-1])
], 0))
# Normalize each partition result.
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result[p] = maybe_normalize(partitioned_result[p])
if not ignore_weights:
# Multiply each partition result with partition weights.
for p in xrange(np):
with ops.colocate_with(params[p]):
if partitioned_weight[p].dtype != partitioned_result[p].dtype:
partitioned_weight[p] = math_ops.cast(partitioned_weight[p],
partitioned_result[p].dtype)
# Reshape partition weights.
ones = array_ops.fill(
array_ops.expand_dims(
array_ops.rank(partitioned_result[p]) - 1, 0), 1)
bcast_weights_shape = array_ops.concat(
[array_ops.shape(partitioned_weight[p]), ones], 0)
orig_weights_shape = partitioned_weight[p].get_shape()
partitioned_weight[p] = array_ops.reshape(partitioned_weight[p],
bcast_weights_shape)
if partitioned_result[p].get_shape().ndims is not None:
partitioned_weight[p].set_shape(
orig_weights_shape.concatenate([
1
for _ in range(partitioned_result[p].get_shape().ndims -
1)
]))
partitioned_result[p] *= partitioned_weight[p]
partitioned_segment_ids = []
for p in xrange(np):
if not ignore_weights:
# Partition segment_ids according to pindices.
p_segment_ids = array_ops.gather(segment_ids, pindices[p])
# Number the p_segment_ids to meet segment_sum's requirements. Note
# that unique_p_segment_ids contains unique segment ids of this
# partition and these ids' order is unchanged.
unique_p_segment_ids, unique_p_segment_idx = array_ops.unique(
p_segment_ids)
partitioned_segment_ids.append(unique_p_segment_ids)
# segment_sum this partition's result.
with ops.colocate_with(params[p]):
partitioned_result[p] = math_ops.segment_sum(
partitioned_result[p], unique_p_segment_idx)
else:
# When ignore weights, we need to get indexs of elements in idx and
# segment_ids.
_, exclude_idx = array_ops.setdiff1d(idx, pindices[p])
all_idx = math_ops.range(array_ops.shape(idx)[0])
_, include_idx = array_ops.setdiff1d(all_idx, exclude_idx)
# Gather segment_ids and idx according to indexs.
p_segment_ids = array_ops.gather(segment_ids, include_idx)
p_idx = array_ops.gather(idx, include_idx)
# Number the p_segment_ids, same as ignore_weights case above.
unique_p_segment_ids, unique_p_segment_idx = array_ops.unique(
p_segment_ids)
_, unique_p_idx_idx = array_ops.unique(p_idx)
partitioned_segment_ids.append(unique_p_segment_ids)
with ops.colocate_with(params[p]):
partitioned_result[p] = math_ops.sparse_segment_sum(
partitioned_result[p], unique_p_idx_idx, unique_p_segment_idx)
# Concat each partition's segment_ids and result for final segment_sum.
concat_segment_ids = array_ops.concat(partitioned_segment_ids, 0)
concat_partitioned_result = array_ops.concat(partitioned_result, 0)
return math_ops.unsorted_segment_sum(
concat_partitioned_result,
concat_segment_ids,
math_ops.reduce_max(concat_segment_ids) + 1,
name=name)
| mit |
nlproc/splunkml | bin/multiclassify.py | 1 | 2142 | import sys, os, itertools
try:
import cStringIO as StringIO
except:
import StringIO
import numpy as np
import scipy.sparse as sp
from gensim.corpora import TextCorpus
from gensim.models import LsiModel, TfidfModel, LdaModel
from gensim.matutils import corpus2csc
from sklearn.feature_extraction import FeatureHasher
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.decomposition import PCA
def is_number(str):
try:
n = float(str)
return True
except (ValueError, TypeError):
return False
def process_records(records, fields, target, textmodel=None):
tokenize = CountVectorizer().build_analyzer()
input = None
X = None
y_labels = []
for i, record in enumerate(records):
nums = []
strs = []
y_labels.append(record.get(target))
for field in fields:
if is_number(record.get(field)):
nums.append(record[field])
else:
strs.append(str(record.get(field) or "").lower())
if strs:
if input is None:
input = StringIO.StringIO()
print >> input, " ".join(tokenize(" ".join(strs)))
if nums:
if X is None:
X = sp.lil_matrix((len(records),len(nums)))
X[i] = np.array(nums, dtype=np.float64)
if input is not None:
if X is not None:
X_2 = X.tocsr()
else:
X_2 = None
if isinstance(textmodel,basestring):
if textmodel == 'lsi':
corpus = TextCorpus(input)
textmodel = LsiModel(corpus, chunksize=1000)
elif textmodel == 'tfidf':
corpus = TextCorpus(input)
textmodel = TfidfModel(corpus)
elif textmodel == 'hashing':
textmodel = None
hasher = FeatureHasher(n_features=2 ** 18, input_type="string")
input.seek(0)
X = hasher.transform(tokenize(line.strip()) for line in input)
if textmodel:
num_terms = len(textmodel.id2word or getattr(textmodel, 'dfs',[]))
X = corpus2csc(textmodel[corpus], num_terms).transpose()
if X_2 is not None:
# print >> sys.stderr, "X SHAPE:", X.shape
# print >> sys.stderr, "X_2 SHAPE:", X_2.shape
X = sp.hstack([X, X_2], format='csr')
elif X is not None:
textmodel = None
X = X.tocsr()
print >> sys.stderr, "X SHAPE:", X.shape
return X, y_labels, textmodel
| apache-2.0 |
stefanw/django-cms | cms/tests/admin.py | 8 | 76720 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import json
import datetime
from cms import api
from cms.utils.urlutils import admin_reverse
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from djangocms_text_ckeditor.models import Text
from django.contrib import admin
from django.contrib.admin.models import LogEntry
from django.contrib.admin.sites import site
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission, AnonymousUser
from django.contrib.sites.models import Site
from django.core.urlresolvers import reverse
from django.http import (Http404, HttpResponseBadRequest, HttpResponseForbidden, HttpResponse,
QueryDict, HttpResponseNotFound)
from django.utils.datastructures import MultiValueDictKeyError
from django.utils.encoding import force_text, smart_str
from django.utils import timezone
from django.utils.six.moves.urllib.parse import urlparse
from cms.admin.change_list import CMSChangeList
from cms.admin.forms import PageForm, AdvancedSettingsForm
from cms.admin.pageadmin import PageAdmin
from cms.admin.permissionadmin import PagePermissionInlineAdmin
from cms.api import create_page, create_title, add_plugin, assign_user_to_page, publish_page
from cms.constants import PLUGIN_MOVE_ACTION
from cms.models import UserSettings, StaticPlaceholder
from cms.models.pagemodel import Page
from cms.models.permissionmodels import GlobalPagePermission, PagePermission
from cms.models.placeholdermodel import Placeholder
from cms.models.pluginmodel import CMSPlugin
from cms.models.titlemodels import Title
from cms.test_utils import testcases as base
from cms.test_utils.testcases import CMSTestCase, URL_CMS_PAGE_DELETE, URL_CMS_PAGE, URL_CMS_TRANSLATION_DELETE
from cms.test_utils.util.fuzzy_int import FuzzyInt
from cms.utils import get_cms_setting
from cms.utils.compat import DJANGO_1_6
class AdminTestsBase(CMSTestCase):
@property
def admin_class(self):
return site._registry[Page]
def _get_guys(self, admin_only=False, use_global_permissions=True):
admiN_user = self.get_superuser()
if admin_only:
return admiN_user
USERNAME = 'test'
if get_user_model().USERNAME_FIELD == 'email':
normal_guy = get_user_model().objects.create_user(USERNAME, '[email protected]', '[email protected]')
else:
normal_guy = get_user_model().objects.create_user(USERNAME, '[email protected]', USERNAME)
normal_guy.is_staff = True
normal_guy.is_active = True
normal_guy.save()
normal_guy.user_permissions = Permission.objects.filter(
codename__in=['change_page', 'change_title', 'add_page', 'add_title', 'delete_page', 'delete_title']
)
if use_global_permissions:
gpp = GlobalPagePermission.objects.create(
user=normal_guy,
can_change=True,
can_delete=True,
can_change_advanced_settings=False,
can_publish=True,
can_change_permissions=False,
can_move_page=True,
)
gpp.sites = Site.objects.all()
return admiN_user, normal_guy
class AdminTestCase(AdminTestsBase):
def test_extension_not_in_admin(self):
admin_user, staff = self._get_guys()
with self.login_user_context(admin_user):
request = self.get_request('/admin/cms/page/1/', 'en',)
response = site.index(request)
self.assertNotContains(response, '/mytitleextension/')
self.assertNotContains(response, '/mypageextension/')
def test_permissioned_page_list(self):
"""
Makes sure that a user with restricted page permissions can view
the page list.
"""
admin_user, normal_guy = self._get_guys(use_global_permissions=False)
current_site = Site.objects.get(pk=1)
page = create_page("Test page", "nav_playground.html", "en",
site=current_site, created_by=admin_user)
PagePermission.objects.create(page=page, user=normal_guy)
with self.login_user_context(normal_guy):
resp = self.client.get(URL_CMS_PAGE)
self.assertEqual(resp.status_code, 200)
def test_edit_does_not_reset_page_adv_fields(self):
"""
Makes sure that if a non-superuser with no rights to edit advanced page
fields edits a page, those advanced fields are not touched.
"""
OLD_PAGE_NAME = 'Test Page'
NEW_PAGE_NAME = 'Test page 2'
REVERSE_ID = 'Test'
OVERRIDE_URL = 'my/override/url'
admin_user, normal_guy = self._get_guys()
current_site = Site.objects.get(pk=1)
# The admin creates the page
page = create_page(OLD_PAGE_NAME, "nav_playground.html", "en",
site=current_site, created_by=admin_user)
page.reverse_id = REVERSE_ID
page.save()
title = page.get_title_obj()
title.has_url_overwrite = True
title.path = OVERRIDE_URL
title.save()
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(title.overwrite_url, OVERRIDE_URL)
# The user edits the page (change the page name for ex.)
page_data = {
'title': NEW_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'pagepermission_set-TOTAL_FORMS': 0,
'pagepermission_set-INITIAL_FORMS': 0,
'pagepermission_set-MAX_NUM_FORMS': 0,
'pagepermission_set-2-TOTAL_FORMS': 0,
'pagepermission_set-2-INITIAL_FORMS': 0,
'pagepermission_set-2-MAX_NUM_FORMS': 0
}
# required only if user haves can_change_permission
with self.login_user_context(normal_guy):
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), NEW_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
title = page.get_title_obj()
self.assertEqual(title.overwrite_url, OVERRIDE_URL)
# The admin edits the page (change the page name for ex.)
page_data = {
'title': OLD_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'reverse_id': page.reverse_id,
'pagepermission_set-TOTAL_FORMS': 0, # required only if user haves can_change_permission
'pagepermission_set-INITIAL_FORMS': 0,
'pagepermission_set-MAX_NUM_FORMS': 0,
'pagepermission_set-2-TOTAL_FORMS': 0,
'pagepermission_set-2-INITIAL_FORMS': 0,
'pagepermission_set-2-MAX_NUM_FORMS': 0
}
with self.login_user_context(admin_user):
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
title = page.get_title_obj()
self.assertEqual(title.overwrite_url, OVERRIDE_URL)
def test_edit_does_not_reset_apphook(self):
"""
Makes sure that if a non-superuser with no rights to edit advanced page
fields edits a page, those advanced fields are not touched.
"""
OLD_PAGE_NAME = 'Test Page'
NEW_PAGE_NAME = 'Test page 2'
REVERSE_ID = 'Test'
APPLICATION_URLS = 'project.sampleapp.urls'
admin_user, normal_guy = self._get_guys()
current_site = Site.objects.get(pk=1)
# The admin creates the page
page = create_page(OLD_PAGE_NAME, "nav_playground.html", "en",
site=current_site, created_by=admin_user)
page.reverse_id = REVERSE_ID
page.save()
title = page.get_title_obj()
title.has_url_overwrite = True
title.save()
page.application_urls = APPLICATION_URLS
page.save()
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(page.application_urls, APPLICATION_URLS)
# The user edits the page (change the page name for ex.)
page_data = {
'title': NEW_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'pagepermission_set-TOTAL_FORMS': 0,
'pagepermission_set-INITIAL_FORMS': 0,
'pagepermission_set-MAX_NUM_FORMS': 0,
'pagepermission_set-2-TOTAL_FORMS': 0,
'pagepermission_set-2-INITIAL_FORMS': 0,
'pagepermission_set-2-MAX_NUM_FORMS': 0,
}
with self.login_user_context(normal_guy):
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), NEW_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(page.application_urls, APPLICATION_URLS)
title = page.get_title_obj()
# The admin edits the page (change the page name for ex.)
page_data = {
'title': OLD_PAGE_NAME,
'slug': page.get_slug(),
'language': title.language,
'site': page.site.pk,
'template': page.template,
'reverse_id': page.reverse_id,
}
with self.login_user_context(admin_user):
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
resp = self.client.post(base.URL_CMS_PAGE_CHANGE % page.pk, page_data,
follow=True)
self.assertEqual(resp.status_code, 200)
self.assertTemplateNotUsed(resp, 'admin/login.html')
page = Page.objects.get(pk=page.pk)
self.assertEqual(page.get_title(), OLD_PAGE_NAME)
self.assertEqual(page.reverse_id, REVERSE_ID)
self.assertEqual(page.application_urls, '')
def test_2apphooks_with_same_namespace(self):
PAGE1 = 'Test Page'
PAGE2 = 'Test page 2'
APPLICATION_URLS = 'project.sampleapp.urls'
admin_user, normal_guy = self._get_guys()
current_site = Site.objects.get(pk=1)
# The admin creates the page
page = create_page(PAGE1, "nav_playground.html", "en",
site=current_site, created_by=admin_user)
page2 = create_page(PAGE2, "nav_playground.html", "en",
site=current_site, created_by=admin_user)
page.application_urls = APPLICATION_URLS
page.application_namespace = "space1"
page.save()
page2.application_urls = APPLICATION_URLS
page2.save()
# The admin edits the page (change the page name for ex.)
page_data = {
'title': PAGE2,
'slug': page2.get_slug(),
'language': 'en',
'site': page.site.pk,
'template': page2.template,
'application_urls': 'SampleApp',
'application_namespace': 'space1',
}
with self.login_user_context(admin_user):
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page.pk, page_data)
self.assertEqual(resp.status_code, 302)
self.assertEqual(Page.objects.filter(application_namespace="space1").count(), 1)
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page2.pk, page_data)
self.assertEqual(resp.status_code, 200)
page_data['application_namespace'] = 'space2'
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page2.pk, page_data)
self.assertEqual(resp.status_code, 302)
def test_delete(self):
admin_user = self.get_superuser()
create_page("home", "nav_playground.html", "en",
created_by=admin_user, published=True)
page = create_page("delete-page", "nav_playground.html", "en",
created_by=admin_user, published=True)
create_page('child-page', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=page)
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
page.publish('en')
with self.login_user_context(admin_user):
data = {'post': 'yes'}
with self.assertNumQueries(FuzzyInt(300, 407)):
response = self.client.post(URL_CMS_PAGE_DELETE % page.pk, data)
self.assertRedirects(response, URL_CMS_PAGE)
def test_delete_diff_language(self):
admin_user = self.get_superuser()
create_page("home", "nav_playground.html", "en",
created_by=admin_user, published=True)
page = create_page("delete-page", "nav_playground.html", "en",
created_by=admin_user, published=True)
create_page('child-page', "nav_playground.html", "de",
created_by=admin_user, published=True, parent=page)
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
page.publish('en')
with self.login_user_context(admin_user):
data = {'post': 'yes'}
with self.assertNumQueries(FuzzyInt(300, 394)):
response = self.client.post(URL_CMS_PAGE_DELETE % page.pk, data)
self.assertRedirects(response, URL_CMS_PAGE)
def test_search_fields(self):
superuser = self.get_superuser()
from django.contrib.admin import site
with self.login_user_context(superuser):
for model, admin_instance in site._registry.items():
if model._meta.app_label != 'cms':
continue
if not admin_instance.search_fields:
continue
url = admin_reverse('cms_%s_changelist' % model._meta.model_name)
response = self.client.get('%s?q=1' % url)
errmsg = response.content
self.assertEqual(response.status_code, 200, errmsg)
def test_delete_translation(self):
admin_user = self.get_superuser()
page = create_page("delete-page-translation", "nav_playground.html", "en",
created_by=admin_user, published=True)
create_title("de", "delete-page-translation-2", page, slug="delete-page-translation-2")
create_title("es-mx", "delete-page-translation-es", page, slug="delete-page-translation-es")
with self.login_user_context(admin_user):
response = self.client.get(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'de'})
self.assertEqual(response.status_code, 200)
response = self.client.post(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'de'})
self.assertRedirects(response, URL_CMS_PAGE)
response = self.client.get(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'es-mx'})
self.assertEqual(response.status_code, 200)
response = self.client.post(URL_CMS_TRANSLATION_DELETE % page.pk, {'language': 'es-mx'})
self.assertRedirects(response, URL_CMS_PAGE)
def test_change_dates(self):
admin_user, staff = self._get_guys()
page = create_page('test-page', 'nav_playground.html', 'en')
page.publish('en')
draft = page.get_draft_object()
with self.settings(USE_TZ=False):
original_date = draft.publication_date
original_end_date = draft.publication_end_date
new_date = timezone.now() - datetime.timedelta(days=1)
new_end_date = timezone.now() + datetime.timedelta(days=1)
url = admin_reverse('cms_page_dates', args=(draft.pk,))
with self.login_user_context(admin_user):
response = self.client.post(url, {
'language': 'en',
'site': draft.site.pk,
'publication_date_0': new_date.date(),
'publication_date_1': new_date.strftime("%H:%M:%S"),
'publication_end_date_0': new_end_date.date(),
'publication_end_date_1': new_end_date.strftime("%H:%M:%S"),
})
self.assertEqual(response.status_code, 302)
draft = Page.objects.get(pk=draft.pk)
self.assertNotEqual(draft.publication_date.timetuple(), original_date.timetuple())
self.assertEqual(draft.publication_date.timetuple(), new_date.timetuple())
self.assertEqual(draft.publication_end_date.timetuple(), new_end_date.timetuple())
if original_end_date:
self.assertNotEqual(draft.publication_end_date.timetuple(), original_end_date.timetuple())
with self.settings(USE_TZ=True):
original_date = draft.publication_date
original_end_date = draft.publication_end_date
new_date = timezone.localtime(timezone.now()) - datetime.timedelta(days=1)
new_end_date = timezone.localtime(timezone.now()) + datetime.timedelta(days=1)
url = admin_reverse('cms_page_dates', args=(draft.pk,))
with self.login_user_context(admin_user):
response = self.client.post(url, {
'language': 'en',
'site': draft.site.pk,
'publication_date_0': new_date.date(),
'publication_date_1': new_date.strftime("%H:%M:%S"),
'publication_end_date_0': new_end_date.date(),
'publication_end_date_1': new_end_date.strftime("%H:%M:%S"),
})
self.assertEqual(response.status_code, 302)
draft = Page.objects.get(pk=draft.pk)
self.assertNotEqual(draft.publication_date.timetuple(), original_date.timetuple())
self.assertEqual(timezone.localtime(draft.publication_date).timetuple(), new_date.timetuple())
self.assertEqual(timezone.localtime(draft.publication_end_date).timetuple(), new_end_date.timetuple())
if original_end_date:
self.assertNotEqual(draft.publication_end_date.timetuple(), original_end_date.timetuple())
def test_change_template(self):
admin_user, staff = self._get_guys()
request = self.get_request('/admin/cms/page/1/', 'en')
request.method = "POST"
pageadmin = site._registry[Page]
with self.login_user_context(staff):
self.assertRaises(Http404, pageadmin.change_template, request, 1)
page = create_page('test-page', 'nav_playground.html', 'en')
response = pageadmin.change_template(request, page.pk)
self.assertEqual(response.status_code, 403)
url = admin_reverse('cms_page_change_template', args=(page.pk,))
with self.login_user_context(admin_user):
response = self.client.post(url, {'template': 'doesntexist'})
self.assertEqual(response.status_code, 400)
response = self.client.post(url, {'template': get_cms_setting('TEMPLATES')[0][0]})
self.assertEqual(response.status_code, 200)
def test_get_permissions(self):
page = create_page('test-page', 'nav_playground.html', 'en')
url = admin_reverse('cms_page_get_permissions', args=(page.pk,))
response = self.client.get(url)
if DJANGO_1_6:
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'admin/login.html')
else:
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/en/admin/login/?next=/en/admin/cms/page/%s/permissions/' % page.pk)
admin_user = self.get_superuser()
with self.login_user_context(admin_user):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertTemplateNotUsed(response, 'admin/login.html')
def test_changelist_items(self):
admin_user = self.get_superuser()
first_level_page = create_page('level1', 'nav_playground.html', 'en')
second_level_page_top = create_page('level21', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=first_level_page)
second_level_page_bottom = create_page('level22', "nav_playground.html", "en",
created_by=admin_user, published=True,
parent=self.reload(first_level_page))
third_level_page = create_page('level3', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=second_level_page_top)
self.assertEqual(Page.objects.all().count(), 4)
url = admin_reverse('cms_%s_changelist' % Page._meta.model_name)
request = self.get_request(url)
request.session = {}
request.user = admin_user
page_admin = site._registry[Page]
cl_params = [request, page_admin.model, page_admin.list_display,
page_admin.list_display_links, page_admin.list_filter,
page_admin.date_hierarchy, page_admin.search_fields,
page_admin.list_select_related, page_admin.list_per_page]
if hasattr(page_admin, 'list_max_show_all'): # django 1.4
cl_params.append(page_admin.list_max_show_all)
cl_params.extend([page_admin.list_editable, page_admin])
cl = CMSChangeList(*tuple(cl_params))
cl.set_items(request)
root_page = cl.get_items()[0]
self.assertEqual(root_page, first_level_page)
self.assertEqual(root_page.get_children()[0], second_level_page_top)
self.assertEqual(root_page.get_children()[1], second_level_page_bottom)
self.assertEqual(root_page.get_children()[0].get_children()[0], third_level_page)
def test_changelist_tree(self):
""" This test checks for proper jstree cookie unquoting.
It should be converted to a selenium test to actually test the jstree behaviour.
Cookie set below is just a forged example (from live session)
"""
admin_user = self.get_superuser()
first_level_page = create_page('level1', 'nav_playground.html', 'en')
second_level_page_top = create_page('level21', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=first_level_page)
second_level_page_bottom = create_page('level22', "nav_playground.html", "en",
created_by=admin_user, published=True,
parent=self.reload(first_level_page))
third_level_page = create_page('level3', "nav_playground.html", "en",
created_by=admin_user, published=True, parent=second_level_page_top)
url = admin_reverse('cms_%s_changelist' % Page._meta.model_name)
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='admin', password='admin')
self.client.cookies['djangocms_nodes_open'] = 'page_1%2Cpage_2'
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["open_menu_trees"], [1, 2])
# tests descendants method for the lazy load ajax call
url = "%s%d/en/descendants/" % (url, first_level_page.pk)
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# should include both direct descendant pages
self.assertContains(response, 'id="page_%s"' % second_level_page_top.pk)
self.assertContains(response, 'id="page_%s"' % second_level_page_bottom.pk)
# but not any further down the tree
self.assertNotContains(response, 'id="page_%s"' % third_level_page.pk)
self.assertNotContains(response, 'None')
def test_unihandecode_doesnt_break_404_in_admin(self):
self.get_superuser()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='admin', password='admin')
response = self.client.get('/en/admin/cms/page/1/?language=en')
self.assertEqual(response.status_code, 404)
def test_tree_displays_in_correct_language(self):
'''
Test to prove and protect that the page titles in the tree are
displayed in the currently set language.
'''
admin_guy, normal_guy = self._get_guys(use_global_permissions=False)
site = Site.objects.get(pk=1)
en_title = "EN Page"
es_title = "ES Pagina"
# Create a page in en
page = create_page(en_title, "nav_playground.html", "en", site=site, created_by=admin)
# Add a es-mx translation for this page
create_title("es-mx", es_title, page, slug="es_pagina")
url = admin_reverse('cms_%s_changelist' % Page._meta.model_name)
url_pat = '<a href="{0}/{1}/preview/"[^>]*>{2}</a>'
with self.login_user_context(admin_guy):
# Check the EN version of the tree...
response = self.client.get(url, {'language': 'en'})
self.assertRegexpMatches(str(response.content), url_pat.format(page.pk, 'en', en_title, ))
# Check the ES version of the tree...
response = self.client.get(url, {'language': 'es-mx'})
self.assertRegexpMatches(str(response.content), url_pat.format(page.pk, 'es-mx', es_title, ))
def test_empty_placeholder_in_correct_language(self):
"""
Test that Cleaning a placeholder only affect current language contents
"""
# create some objects
page_en = create_page("EmptyPlaceholderTestPage (EN)", "nav_playground.html", "en")
ph = page_en.placeholders.get(slot="body")
# add the text plugin to the en version of the page
add_plugin(ph, "TextPlugin", "en", body="Hello World EN 1")
add_plugin(ph, "TextPlugin", "en", body="Hello World EN 2")
# creating a de title of the page and adding plugins to it
create_title("de", page_en.get_title(), page_en, slug=page_en.get_slug())
add_plugin(ph, "TextPlugin", "de", body="Hello World DE")
add_plugin(ph, "TextPlugin", "de", body="Hello World DE 2")
add_plugin(ph, "TextPlugin", "de", body="Hello World DE 3")
# before cleaning the de placeholder
self.assertEqual(ph.get_plugins('en').count(), 2)
self.assertEqual(ph.get_plugins('de').count(), 3)
admin_user, staff = self._get_guys()
with self.login_user_context(admin_user):
url = '%s?language=de' % admin_reverse('cms_page_clear_placeholder', args=[ph.pk])
response = self.client.post(url, {'test': 0})
self.assertEqual(response.status_code, 302)
# After cleaning the de placeholder, en placeholder must still have all the plugins
self.assertEqual(ph.get_plugins('en').count(), 2)
self.assertEqual(ph.get_plugins('de').count(), 0)
class AdminTests(AdminTestsBase):
# TODO: needs tests for actual permissions, not only superuser/normaluser
def setUp(self):
self.page = create_page("testpage", "nav_playground.html", "en")
def get_admin(self):
User = get_user_model()
fields = dict(email="[email protected]", is_staff=True, is_superuser=True)
if (User.USERNAME_FIELD != 'email'):
fields[User.USERNAME_FIELD] = "admin"
usr = User(**fields)
usr.set_password(getattr(usr, User.USERNAME_FIELD))
usr.save()
return usr
def get_permless(self):
User = get_user_model()
fields = dict(email="[email protected]", is_staff=True)
if (User.USERNAME_FIELD != 'email'):
fields[User.USERNAME_FIELD] = "permless"
usr = User(**fields)
usr.set_password(getattr(usr, User.USERNAME_FIELD))
usr.save()
return usr
def get_page(self):
return self.page
def test_change_publish_unpublish(self):
page = self.get_page()
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.publish_page(request, page.pk, "en")
self.assertEqual(response.status_code, 403)
page = self.reload(page)
self.assertFalse(page.is_published('en'))
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.publish_page(request, page.pk, "en")
# Forbidden
self.assertEqual(response.status_code, 403)
self.assertFalse(page.is_published('en'))
admin_user = self.get_admin()
with self.login_user_context(admin_user):
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.publish_page(request, page.pk, "en")
self.assertEqual(response.status_code, 302)
page = self.reload(page)
self.assertTrue(page.is_published('en'))
response = self.admin_class.unpublish(request, page.pk, "en")
self.assertEqual(response.status_code, 302)
page = self.reload(page)
self.assertFalse(page.is_published('en'))
def test_change_status_adds_log_entry(self):
page = self.get_page()
admin_user = self.get_admin()
with self.login_user_context(admin_user):
request = self.get_request(post_data={'no': 'data'})
self.assertFalse(LogEntry.objects.count())
response = self.admin_class.publish_page(request, page.pk, "en")
self.assertEqual(response.status_code, 302)
self.assertEqual(1, LogEntry.objects.count())
self.assertEqual(page.pk, int(LogEntry.objects.all()[0].object_id))
def test_change_innavigation(self):
page = self.get_page()
permless = self.get_permless()
admin_user = self.get_admin()
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 403)
with self.login_user_context(permless):
request = self.get_request(post_data={'no': 'data'})
self.assertRaises(Http404, self.admin_class.change_innavigation,
request, page.pk + 100)
with self.login_user_context(permless):
request = self.get_request(post_data={'no': 'data'})
response = self.admin_class.change_innavigation(request, page.pk)
self.assertEqual(response.status_code, 403)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'no': 'data'})
old = page.in_navigation
response = self.admin_class.change_innavigation(request, page.pk)
# These asserts are for #3589
self.assertContains(response, 'lang="en"')
self.assertContains(response, './%s/en/preview/' % page.pk)
self.assertEqual(response.status_code, 200)
page = self.reload(page)
self.assertEqual(old, not page.in_navigation)
def test_publish_page_requires_perms(self):
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
request.method = "POST"
response = self.admin_class.publish_page(request, Page.objects.all()[0].pk, "en")
self.assertEqual(response.status_code, 403)
def test_revert_page(self):
self.page.publish('en')
title = self.page.title_set.get(language='en')
title.title = 'new'
title.save()
self.assertEqual(Title.objects.all().count(), 2)
self.assertEqual(Page.objects.all().count(), 2)
with self.login_user_context(self.get_superuser()):
request = self.get_request()
request.method = "POST"
response = self.admin_class.revert_page(request, Page.objects.all()[0].pk, "en")
self.assertEqual(response.status_code, 302)
self.assertEqual(Title.objects.all().count(), 2)
self.assertEqual(Page.objects.all().count(), 2)
new_title = Title.objects.get(pk=title.pk)
self.assertNotEqual(title.title, new_title.title)
self.assertTrue(title.publisher_is_draft)
self.assertTrue(new_title.publisher_is_draft)
def test_revert_page_requires_perms(self):
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
request.method = "POST"
response = self.admin_class.revert_page(request, Page.objects.all()[0].pk, 'en')
self.assertEqual(response.status_code, 403)
def test_revert_page_redirects(self):
admin_user = self.get_admin()
self.page.publish("en") # Ensure public copy exists before reverting
with self.login_user_context(admin_user):
response = self.client.get(admin_reverse('cms_page_revert_page', args=(self.page.pk, 'en')))
self.assertEqual(response.status_code, 302)
url = response['Location']
self.assertTrue(url.endswith('?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_OFF')))
def test_remove_plugin_requires_post(self):
ph = Placeholder.objects.create(slot='test')
plugin = add_plugin(ph, 'TextPlugin', 'en', body='test')
admin_user = self.get_admin()
with self.login_user_context(admin_user):
request = self.get_request()
response = self.admin_class.delete_plugin(request, plugin.pk)
self.assertEqual(response.status_code, 200)
def test_move_plugin(self):
ph = Placeholder.objects.create(slot='test')
plugin = add_plugin(ph, 'TextPlugin', 'en', body='test')
page = self.get_page()
source, target = list(page.placeholders.all())[:2]
pageplugin = add_plugin(source, 'TextPlugin', 'en', body='test')
plugin_class = pageplugin.get_plugin_class_instance()
expected = {'reload': plugin_class.requires_reload(PLUGIN_MOVE_ACTION)}
placeholder = Placeholder.objects.all()[0]
permless = self.get_permless()
admin_user = self.get_admin()
with self.login_user_context(permless):
request = self.get_request()
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 405)
request = self.get_request(post_data={'not_usable': '1'})
self.assertRaises(MultiValueDictKeyError, self.admin_class.move_plugin, request)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'ids': plugin.pk})
self.assertRaises(MultiValueDictKeyError, self.admin_class.move_plugin, request)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': 'invalid-placeholder', 'plugin_language': 'en'})
self.assertRaises(ValueError, self.admin_class.move_plugin, request)
with self.login_user_context(permless):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.pk, 'plugin_parent': '', 'plugin_language': 'en'})
self.assertEqual(self.admin_class.move_plugin(request).status_code, HttpResponseForbidden.status_code)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.pk, 'plugin_parent': '', 'plugin_language': 'en'})
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf8')), expected)
with self.login_user_context(permless):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.id, 'plugin_parent': '', 'plugin_language': 'en'})
self.assertEqual(self.admin_class.move_plugin(request).status_code, HttpResponseForbidden.status_code)
with self.login_user_context(admin_user):
request = self.get_request(post_data={'plugin_id': pageplugin.pk,
'placeholder_id': placeholder.id, 'plugin_parent': '', 'plugin_language': 'en'})
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 200)
self.assertEqual(json.loads(response.content.decode('utf8')), expected)
def test_move_language(self):
page = self.get_page()
source, target = list(page.placeholders.all())[:2]
col = add_plugin(source, 'MultiColumnPlugin', 'en')
sub_col = add_plugin(source, 'ColumnPlugin', 'en', target=col)
col2 = add_plugin(source, 'MultiColumnPlugin', 'de')
admin_user = self.get_admin()
with self.login_user_context(admin_user):
request = self.get_request(post_data={'plugin_id': sub_col.pk,
'placeholder_id': source.id, 'plugin_parent': col2.pk, 'plugin_language': 'de'})
response = self.admin_class.move_plugin(request)
self.assertEqual(response.status_code, 200)
sub_col = CMSPlugin.objects.get(pk=sub_col.pk)
self.assertEqual(sub_col.language, "de")
self.assertEqual(sub_col.parent_id, col2.pk)
def test_preview_page(self):
permless = self.get_permless()
with self.login_user_context(permless):
request = self.get_request()
self.assertRaises(Http404, self.admin_class.preview_page, request, 404, "en")
page = self.get_page()
page.publish("en")
base_url = page.get_absolute_url()
with self.login_user_context(permless):
request = self.get_request('/?public=true')
response = self.admin_class.preview_page(request, page.pk, 'en')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
request = self.get_request()
response = self.admin_class.preview_page(request, page.pk, 'en')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'], '%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
current_site = Site.objects.create(domain='django-cms.org', name='django-cms')
page.site = current_site
page.save()
page.publish("en")
self.assertTrue(page.is_home)
response = self.admin_class.preview_page(request, page.pk, 'en')
self.assertEqual(response.status_code, 302)
self.assertEqual(response['Location'],
'http://django-cms.org%s?%s&language=en' % (base_url, get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')))
def test_too_many_plugins_global(self):
conf = {
'body': {
'limits': {
'global': 1,
},
},
}
admin_user = self.get_admin()
url = admin_reverse('cms_page_add_plugin')
with self.settings(CMS_PERMISSION=False, CMS_PLACEHOLDER_CONF=conf):
page = create_page('somepage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
with self.login_user_context(admin_user):
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': body.pk,
'plugin_language': 'en',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseBadRequest.status_code)
def test_too_many_plugins_type(self):
conf = {
'body': {
'limits': {
'TextPlugin': 1,
},
},
}
admin_user = self.get_admin()
url = admin_reverse('cms_page_add_plugin')
with self.settings(CMS_PERMISSION=False, CMS_PLACEHOLDER_CONF=conf):
page = create_page('somepage', 'nav_playground.html', 'en')
body = page.placeholders.get(slot='body')
add_plugin(body, 'TextPlugin', 'en', body='text')
with self.login_user_context(admin_user):
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': body.pk,
'plugin_language': 'en',
'plugin_parent': '',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseBadRequest.status_code)
def test_edit_title_dirty_bit(self):
language = "en"
admin_user = self.get_admin()
page = create_page('A', 'nav_playground.html', language)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
page.publish("en")
draft_page = page.get_draft_object()
admin_url = reverse("admin:cms_page_edit_title_fields", args=(
draft_page.pk, language
))
post_data = {
'title': "A Title"
}
with self.login_user_context(admin_user):
self.client.post(admin_url, post_data)
draft_page = Page.objects.get(pk=page.pk).get_draft_object()
self.assertTrue(draft_page.is_dirty('en'))
def test_edit_title_languages(self):
language = "en"
admin_user = self.get_admin()
page = create_page('A', 'nav_playground.html', language)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
page.publish("en")
draft_page = page.get_draft_object()
admin_url = reverse("admin:cms_page_edit_title_fields", args=(
draft_page.pk, language
))
post_data = {
'title': "A Title"
}
with self.login_user_context(admin_user):
self.client.post(admin_url, post_data)
draft_page = Page.objects.get(pk=page.pk).get_draft_object()
self.assertTrue(draft_page.is_dirty('en'))
def test_page_form_leak(self):
language = "en"
admin_user = self.get_admin()
request = self.get_request('/', 'en')
request.user = admin_user
page = create_page('A', 'nav_playground.html', language, menu_title='menu title')
page_admin = PageAdmin(Page, site)
page_admin._current_page = page
edit_form = page_admin.get_form(request, page)
add_form = page_admin.get_form(request, None)
self.assertEqual(edit_form.base_fields['menu_title'].initial, 'menu title')
self.assertEqual(add_form.base_fields['menu_title'].initial, None)
class NoDBAdminTests(CMSTestCase):
@property
def admin_class(self):
return site._registry[Page]
def test_lookup_allowed_site__exact(self):
self.assertTrue(self.admin_class.lookup_allowed('site__exact', '1'))
def test_lookup_allowed_published(self):
self.assertTrue(self.admin_class.lookup_allowed('published', value='1'))
class PluginPermissionTests(AdminTestsBase):
def setUp(self):
self._page = create_page('test page', 'nav_playground.html', 'en')
self._placeholder = self._page.placeholders.all()[0]
def _get_admin(self):
User = get_user_model()
fields = dict(email="[email protected]", is_staff=True, is_active=True)
if (User.USERNAME_FIELD != 'email'):
fields[User.USERNAME_FIELD] = "admin"
admin_user = User(**fields)
admin_user.set_password('admin')
admin_user.save()
return admin_user
def _get_page_admin(self):
return admin.site._registry[Page]
def _give_permission(self, user, model, permission_type, save=True):
codename = '%s_%s' % (permission_type, model._meta.object_name.lower())
user.user_permissions.add(Permission.objects.get(codename=codename))
def _give_page_permission_rights(self, user):
self._give_permission(user, PagePermission, 'add')
self._give_permission(user, PagePermission, 'change')
self._give_permission(user, PagePermission, 'delete')
def _get_change_page_request(self, user, page):
return type('Request', (object,), {
'user': user,
'path': base.URL_CMS_PAGE_CHANGE % page.pk
})
def _give_cms_permissions(self, user, save=True):
for perm_type in ['add', 'change', 'delete']:
for model in [Page, Title]:
self._give_permission(user, model, perm_type, False)
gpp = GlobalPagePermission.objects.create(
user=user,
can_change=True,
can_delete=True,
can_change_advanced_settings=False,
can_publish=True,
can_change_permissions=False,
can_move_page=True,
)
gpp.sites = Site.objects.all()
if save:
user.save()
def _create_plugin(self):
plugin = add_plugin(self._placeholder, 'TextPlugin', 'en')
return plugin
def test_plugin_add_requires_permissions(self):
"""User tries to add a plugin but has no permissions. He can add the plugin after he got the permissions"""
admin = self._get_admin()
self._give_cms_permissions(admin)
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='admin')
else:
self.client.login(username='admin', password='admin')
url = admin_reverse('cms_page_add_plugin')
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': self._placeholder.pk,
'plugin_language': 'en',
'plugin_parent': '',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
self._give_permission(admin, Text, 'add')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugin_edit_requires_permissions(self):
"""User tries to edit a plugin but has no permissions. He can edit the plugin after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='test', password='test')
url = admin_reverse('cms_page_edit_plugin', args=[plugin.id])
response = self.client.post(url, dict())
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'change')
response = self.client.post(url, dict())
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugin_edit_wrong_url(self):
"""User tries to edit a plugin using a random url. 404 response returned"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='test', password='test')
self._give_permission(normal_guy, Text, 'change')
url = '%s/edit-plugin/%s/' % (admin_reverse('cms_page_edit_plugin', args=[plugin.id]), plugin.id)
response = self.client.post(url, dict())
self.assertEqual(response.status_code, HttpResponseNotFound.status_code)
self.assertTrue("Plugin not found" in force_text(response.content))
def test_plugin_remove_requires_permissions(self):
"""User tries to remove a plugin but has no permissions. He can remove the plugin after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='test', password='test')
url = admin_reverse('cms_page_delete_plugin', args=[plugin.pk])
data = dict(plugin_id=plugin.id)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'delete')
response = self.client.post(url, data)
self.assertEqual(response.status_code, 302)
def test_plugin_move_requires_permissions(self):
"""User tries to move a plugin but has no permissions. He can move the plugin after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='test', password='test')
url = admin_reverse('cms_page_move_plugin')
data = dict(plugin_id=plugin.id,
placeholder_id=self._placeholder.pk,
plugin_parent='',
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'change')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugins_copy_requires_permissions(self):
"""User tries to copy plugin but has no permissions. He can copy plugins after he got the permissions"""
plugin = self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='test', password='test')
url = admin_reverse('cms_page_copy_plugins')
data = dict(source_plugin_id=plugin.id,
source_placeholder_id=self._placeholder.pk,
source_language='en',
target_language='fr',
target_placeholder_id=self._placeholder.pk,
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'add')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugins_copy_placeholder_ref(self):
"""User copies a placeholder into a clipboard. A PlaceholderReferencePlugin is created. Afterwards he copies this
into a placeholder and the PlaceholderReferencePlugin unpacks its content. After that he clear the clipboard"""
self.assertEqual(Placeholder.objects.count(), 2)
self._create_plugin()
self._create_plugin()
admin_user = self.get_superuser()
clipboard = Placeholder()
clipboard.save()
self.assertEqual(CMSPlugin.objects.count(), 2)
settings = UserSettings(language="fr", clipboard=clipboard, user=admin_user)
settings.save()
self.assertEqual(Placeholder.objects.count(), 3)
if get_user_model().USERNAME_FIELD == 'email':
self.client.login(username='[email protected]', password='[email protected]')
else:
self.client.login(username='admin', password='admin')
url = admin_reverse('cms_page_copy_plugins')
data = dict(source_plugin_id='',
source_placeholder_id=self._placeholder.pk,
source_language='en',
target_language='en',
target_placeholder_id=clipboard.pk,
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
clipboard_plugins = clipboard.get_plugins()
self.assertEqual(CMSPlugin.objects.count(), 5)
self.assertEqual(clipboard_plugins.count(), 1)
self.assertEqual(clipboard_plugins[0].plugin_type, "PlaceholderPlugin")
placeholder_plugin, _ = clipboard_plugins[0].get_plugin_instance()
ref_placeholder = placeholder_plugin.placeholder_ref
copied_plugins = ref_placeholder.get_plugins()
self.assertEqual(copied_plugins.count(), 2)
data = dict(source_plugin_id=placeholder_plugin.pk,
source_placeholder_id=clipboard.pk,
source_language='en',
target_language='fr',
target_placeholder_id=self._placeholder.pk,
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
plugins = self._placeholder.get_plugins()
self.assertEqual(plugins.count(), 4)
self.assertEqual(CMSPlugin.objects.count(), 7)
self.assertEqual(Placeholder.objects.count(), 4)
url = admin_reverse('cms_page_clear_placeholder', args=[clipboard.pk])
with self.assertNumQueries(FuzzyInt(70, 80)):
response = self.client.post(url, {'test': 0})
self.assertEqual(response.status_code, 302)
self.assertEqual(CMSPlugin.objects.count(), 4)
self.assertEqual(Placeholder.objects.count(), 3)
def test_plugins_copy_language(self):
"""User tries to copy plugin but has no permissions. He can copy plugins after he got the permissions"""
self._create_plugin()
_, normal_guy = self._get_guys()
if get_user_model().USERNAME_FIELD != 'email':
self.client.login(username='test', password='test')
else:
self.client.login(username='[email protected]', password='[email protected]')
self.assertEqual(1, CMSPlugin.objects.all().count())
url = admin_reverse('cms_page_copy_language', args=[self._page.pk])
data = dict(
source_language='en',
target_language='fr',
)
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
# After he got the permissions, he can edit the plugin
self._give_permission(normal_guy, Text, 'add')
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
self.assertEqual(2, CMSPlugin.objects.all().count())
def test_page_permission_inline_visibility(self):
User = get_user_model()
fields = dict(email='[email protected]', password='user', is_staff=True)
if get_user_model().USERNAME_FIELD != 'email':
fields[get_user_model().USERNAME_FIELD] = 'user'
user = User(**fields)
user.save()
self._give_page_permission_rights(user)
page = create_page('A', 'nav_playground.html', 'en')
page_permission = PagePermission.objects.create(
can_change_permissions=True, user=user, page=page)
request = self._get_change_page_request(user, page)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
# user has can_change_permission
# => must see the PagePermissionInline
self.assertTrue(
any(type(inline) is PagePermissionInlineAdmin
for inline in page_admin.get_inline_instances(request, page)))
page = Page.objects.get(pk=page.pk)
# remove can_change_permission
page_permission.can_change_permissions = False
page_permission.save()
request = self._get_change_page_request(user, page)
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
# => PagePermissionInline is no longer visible
self.assertFalse(
any(type(inline) is PagePermissionInlineAdmin
for inline in page_admin.get_inline_instances(request, page)))
def test_edit_title_is_allowed_for_staff_user(self):
"""
We check here both the permission on a single page, and the global permissions
"""
user = self._create_user('user', is_staff=True)
another_user = self._create_user('another_user', is_staff=True)
page = create_page('A', 'nav_playground.html', 'en')
admin_url = reverse("admin:cms_page_edit_title_fields", args=(
page.pk, 'en'
))
page_admin = PageAdmin(Page, None)
page_admin._current_page = page
username = getattr(user, get_user_model().USERNAME_FIELD)
self.client.login(username=username, password=username)
response = self.client.get(admin_url)
self.assertEqual(response.status_code, HttpResponseForbidden.status_code)
assign_user_to_page(page, user, grant_all=True)
username = getattr(user, get_user_model().USERNAME_FIELD)
self.client.login(username=username, password=username)
response = self.client.get(admin_url)
self.assertEqual(response.status_code, HttpResponse.status_code)
self._give_cms_permissions(another_user)
username = getattr(another_user, get_user_model().USERNAME_FIELD)
self.client.login(username=username, password=username)
response = self.client.get(admin_url)
self.assertEqual(response.status_code, HttpResponse.status_code)
def test_plugin_add_returns_valid_pk_for_plugin(self):
admin_user = self._get_admin()
self._give_cms_permissions(admin_user)
self._give_permission(admin_user, Text, 'add')
username = getattr(admin_user, get_user_model().USERNAME_FIELD)
self.client.login(username=username, password='admin')
url = admin_reverse('cms_page_add_plugin')
data = {
'plugin_type': 'TextPlugin',
'placeholder_id': self._placeholder.pk,
'plugin_language': 'en',
'plugin_parent': '',
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, HttpResponse.status_code)
self.assertEqual(response['content-type'], 'application/json')
pk = response.content.decode('utf8').split("edit-plugin/")[1].split("/")[0]
self.assertTrue(CMSPlugin.objects.filter(pk=int(pk)).exists())
class AdminFormsTests(AdminTestsBase):
def test_clean_overwrite_url(self):
user = AnonymousUser()
user.is_superuser = True
user.pk = 1
request = type('Request', (object,), {'user': user})
with self.settings():
data = {
'title': 'TestPage',
'slug': 'test-page',
'language': 'en',
'overwrite_url': '/overwrite/url/',
'site': Site.objects.get_current().pk,
'template': get_cms_setting('TEMPLATES')[0][0],
'published': True
}
form = PageForm(data)
self.assertTrue(form.is_valid(), form.errors.as_text())
instance = form.save()
instance.permission_user_cache = user
instance.permission_advanced_settings_cache = True
Title.objects.set_or_create(request, instance, form, 'en')
form = PageForm(data, instance=instance)
self.assertTrue(form.is_valid(), form.errors.as_text())
def test_missmatching_site_parent_dotsite(self):
site0 = Site.objects.create(domain='foo.com', name='foo.com')
site1 = Site.objects.create(domain='foo.com', name='foo.com')
parent_page = Page.objects.create(
template='nav_playground.html',
site=site0)
new_page_data = {
'title': 'Title',
'slug': 'slug',
'language': 'en',
'site': site1.pk,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent': parent_page.pk,
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
self.assertIn(u"Site doesn't match the parent's page site",
form.errors['__all__'])
def test_form_errors(self):
new_page_data = {
'title': 'Title',
'slug': 'home',
'language': 'en',
'site': 10,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
site0 = Site.objects.create(domain='foo.com', name='foo.com')
page1 = api.create_page("test", get_cms_setting('TEMPLATES')[0][0], "fr", site=site0)
new_page_data = {
'title': 'Title',
'slug': 'home',
'language': 'en',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent': page1.pk,
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
new_page_data = {
'title': 'Title',
'slug': '#',
'language': 'en',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
new_page_data = {
'title': 'Title',
'slug': 'home',
'language': 'pp',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent':'',
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
page2 = api.create_page("test", get_cms_setting('TEMPLATES')[0][0], "en")
new_page_data = {
'title': 'Title',
'slug': 'test',
'language': 'en',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent':'',
}
form = PageForm(data=new_page_data, files=None)
self.assertFalse(form.is_valid())
page3 = api.create_page("test", get_cms_setting('TEMPLATES')[0][0], "en", parent=page2)
page3.title_set.update(path="hello/")
page3 = page3.reload()
new_page_data = {
'title': 'Title',
'slug': 'test',
'language': 'en',
'site': 1,
'template': get_cms_setting('TEMPLATES')[0][0],
'reverse_id': '',
'parent':'',
}
form = PageForm(data=new_page_data, files=None, instance=page3)
self.assertFalse(form.is_valid())
def test_reverse_id_error_location(self):
''' Test moving the reverse_id validation error to a field specific one '''
# this is the Reverse ID we'll re-use to break things.
dupe_id = 'p1'
curren_site = Site.objects.get_current()
create_page('Page 1', 'nav_playground.html', 'en', reverse_id=dupe_id)
page2 = create_page('Page 2', 'nav_playground.html', 'en')
# Assemble a bunch of data to test the page form
page2_data = {
'language': 'en',
'site': curren_site.pk,
'reverse_id': dupe_id,
'template': 'col_two.html',
}
form = AdvancedSettingsForm(data=page2_data, files=None)
self.assertFalse(form.is_valid())
# reverse_id is the only item that is in __all__ as every other field
# has it's own clean method. Moving it to be a field error means
# __all__ is now not available.
self.assertNotIn('__all__', form.errors)
# In moving it to it's own field, it should be in form.errors, and
# the values contained therein should match these.
self.assertIn('reverse_id', form.errors)
self.assertEqual(1, len(form.errors['reverse_id']))
self.assertEqual([u'A page with this reverse URL id exists already.'],
form.errors['reverse_id'])
page2_data['reverse_id'] = ""
form = AdvancedSettingsForm(data=page2_data, files=None)
self.assertTrue(form.is_valid())
admin_user = self._get_guys(admin_only=True)
# reset some of page2_data so we can use cms.api.create_page
page2 = page2.reload()
page2.site = curren_site
page2.save()
with self.login_user_context(admin_user):
# re-reset the page2_data for the admin form instance.
page2_data['reverse_id'] = dupe_id
page2_data['site'] = curren_site.pk
# post to the admin change form for page 2, and test that the
# reverse_id form row has an errors class. Django's admin avoids
# collapsing these, so that the error is visible.
resp = self.client.post(base.URL_CMS_PAGE_ADVANCED_CHANGE % page2.pk, page2_data)
self.assertContains(resp, '<div class="form-row errors reverse_id">')
def test_create_page_type(self):
page = create_page('Test', 'static.html', 'en', published=True, reverse_id="home")
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
page.publish('en')
self.assertEqual(Page.objects.count(), 2)
self.assertEqual(CMSPlugin.objects.count(), 4)
superuser = self.get_superuser()
with self.login_user_context(superuser):
response = self.client.get(
"%s?copy_target=%s&language=%s" % (admin_reverse("cms_page_add_page_type"), page.pk, 'en'))
self.assertEqual(response.status_code, 302)
self.assertEqual(Page.objects.count(), 3)
self.assertEqual(Page.objects.filter(reverse_id="page_types").count(), 1)
page_types = Page.objects.get(reverse_id='page_types')
url = response.url if hasattr(response, 'url') else response['Location']
expected_url_params = QueryDict(
'target=%s&position=first-child&add_page_type=1©_target=%s&language=en' % (page_types.pk, page.pk))
response_url_params = QueryDict(urlparse(url).query)
self.assertDictEqual(expected_url_params, response_url_params)
response = self.client.get("%s?copy_target=%s&language=%s" % (
admin_reverse("cms_page_add_page_type"), page.pk, 'en'), follow=True)
self.assertEqual(response.status_code, 200)
# test no page types if no page types there
response = self.client.get(admin_reverse('cms_page_add'))
self.assertNotContains(response, "page_type")
# create out first page type
page_data = {
'title': 'type1', 'slug': 'type1', '_save': 1, 'template': 'static.html', 'site': 1,
'language': 'en'
}
response = self.client.post(
"/en/admin/cms/page/add/?target=%s&position=first-child&add_page_type=1©_target=%s&language=en" % (
page_types.pk, page.pk), data=page_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(Page.objects.count(), 4)
self.assertEqual(CMSPlugin.objects.count(), 6)
response = self.client.get(admin_reverse('cms_page_add'))
self.assertContains(response, "page_type")
# no page types available if you use the copy_target
response = self.client.get("%s?copy_target=%s&language=en" % (admin_reverse('cms_page_add'), page.pk))
self.assertNotContains(response, "page_type")
def test_render_edit_mode(self):
from django.core.cache import cache
cache.clear()
create_page('Test', 'static.html', 'en', published=True)
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
user = self.get_superuser()
self.assertEqual(Placeholder.objects.all().count(), 4)
with self.login_user_context(user):
with self.assertNumQueries(FuzzyInt(40, 66)):
output = force_text(self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')).content)
self.assertIn('<b>Test</b>', output)
self.assertEqual(Placeholder.objects.all().count(), 9)
self.assertEqual(StaticPlaceholder.objects.count(), 2)
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
with self.assertNumQueries(FuzzyInt(40, 72)):
output = force_text(self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')).content)
self.assertIn('<b>Test</b>', output)
with self.assertNumQueries(FuzzyInt(18, 45)):
force_text(self.client.get('/en/?%s' % get_cms_setting('CMS_TOOLBAR_URL__EDIT_ON')).content)
with self.assertNumQueries(FuzzyInt(11, 29)):
force_text(self.client.get('/en/').content)
def test_tree_view_queries(self):
from django.core.cache import cache
cache.clear()
for i in range(10):
create_page('Test%s' % i, 'col_two.html', 'en', published=True)
for placeholder in Placeholder.objects.all():
add_plugin(placeholder, TextPlugin, 'en', body='<b>Test</b>')
user = self.get_superuser()
with self.login_user_context(user):
with self.assertNumQueries(FuzzyInt(18, 33)):
force_text(self.client.get('/en/admin/cms/page/'))
def test_smart_link_published_pages(self):
admin, staff_guy = self._get_guys()
page_url = '/en/admin/cms/page/published-pages/' # Not sure how to achieve this with reverse...
with self.login_user_context(staff_guy):
multi_title_page = create_page('main_title', 'col_two.html', 'en', published=True,
overwrite_url='overwritten_url',
menu_title='menu_title')
title = multi_title_page.get_title_obj()
title.page_title = 'page_title'
title.save()
multi_title_page.save()
publish_page(multi_title_page, admin, 'en')
# Non ajax call should return a 403 as this page shouldn't be accessed by anything else but ajax queries
self.assertEqual(403, self.client.get(page_url).status_code)
self.assertEqual(200,
self.client.get(page_url, HTTP_X_REQUESTED_WITH='XMLHttpRequest').status_code
)
# Test that the query param is working as expected.
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'main_title'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'menu_title'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'overwritten_url'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
self.assertEqual(1, len(json.loads(self.client.get(page_url, {'q':'page_title'},
HTTP_X_REQUESTED_WITH='XMLHttpRequest').content.decode("utf-8"))))
class AdminPageEditContentSizeTests(AdminTestsBase):
"""
System user count influences the size of the page edit page,
but the users are only 2 times present on the page
The test relates to extra=0
at PagePermissionInlineAdminForm and ViewRestrictionInlineAdmin
"""
def test_editpage_contentsize(self):
"""
Expected a username only 2 times in the content, but a relationship
between usercount and pagesize
"""
with self.settings(CMS_PERMISSION=True):
admin_user = self.get_superuser()
PAGE_NAME = 'TestPage'
USER_NAME = 'test_size_user_0'
current_site = Site.objects.get(pk=1)
page = create_page(PAGE_NAME, "nav_playground.html", "en", site=current_site, created_by=admin_user)
page.save()
self._page = page
with self.login_user_context(admin_user):
url = base.URL_CMS_PAGE_PERMISSION_CHANGE % self._page.pk
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
old_response_size = len(response.content)
old_user_count = get_user_model().objects.count()
# create additionals user and reload the page
get_user_model().objects.create_user(username=USER_NAME, email=USER_NAME + '@django-cms.org',
password=USER_NAME)
user_count = get_user_model().objects.count()
more_users_in_db = old_user_count < user_count
# we have more users
self.assertTrue(more_users_in_db, "New users got NOT created")
response = self.client.get(url)
new_response_size = len(response.content)
page_size_grown = old_response_size < new_response_size
# expect that the pagesize gets influenced by the useramount of the system
self.assertTrue(page_size_grown, "Page size has not grown after user creation")
# usernames are only 2 times in content
text = smart_str(response.content, response._charset)
foundcount = text.count(USER_NAME)
# 2 forms contain usernames as options
self.assertEqual(foundcount, 2,
"Username %s appeared %s times in response.content, expected 2 times" % (
USER_NAME, foundcount))
| bsd-3-clause |
delighted/phantomjs | src/qt/qtwebkit/Tools/QueueStatusServer/handlers/svnrevision.py | 143 | 1923 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import webapp
import model
class SVNRevision(webapp.RequestHandler):
def get(self, svn_revision_number):
svn_revisions = model.SVNRevision.all().filter('number =', int(svn_revision_number)).order('-date').fetch(1)
if not svn_revisions:
self.error(404)
return
self.response.out.write(svn_revisions[0].to_xml())
| bsd-3-clause |
Hybrid-Cloud/badam | patches_tool/aws_patch/aws_deps/libcloud/test/dns/test_linode.py | 1 | 14072 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
import sys
import unittest
from libcloud.utils.py3 import httplib
from libcloud.common.linode import LinodeException
from libcloud.dns.types import RecordType, ZoneDoesNotExistError
from libcloud.dns.types import RecordDoesNotExistError
from libcloud.dns.drivers.linode import LinodeDNSDriver
from libcloud.test import MockHttp
from libcloud.test.file_fixtures import DNSFileFixtures
from libcloud.test.secrets import DNS_PARAMS_LINODE
class LinodeTests(unittest.TestCase):
def setUp(self):
LinodeDNSDriver.connectionCls.conn_classes = (
None, LinodeMockHttp)
LinodeMockHttp.use_param = 'api_action'
LinodeMockHttp.type = None
self.driver = LinodeDNSDriver(*DNS_PARAMS_LINODE)
def assertHasKeys(self, dictionary, keys):
for key in keys:
self.assertTrue(key in dictionary, 'key "%s" not in dictionary' %
(key))
def test_list_record_types(self):
record_types = self.driver.list_record_types()
self.assertEqual(len(record_types), 7)
self.assertTrue(RecordType.A in record_types)
def test_list_zones_success(self):
zones = self.driver.list_zones()
self.assertEqual(len(zones), 2)
zone = zones[0]
self.assertEqual(zone.id, '5093')
self.assertEqual(zone.type, 'master')
self.assertEqual(zone.domain, 'linode.com')
self.assertEqual(zone.ttl, None)
self.assertHasKeys(zone.extra, ['description', 'SOA_Email', 'status'])
def test_list_records_success(self):
zone = self.driver.list_zones()[0]
records = self.driver.list_records(zone=zone)
self.assertEqual(len(records), 2)
arecord = records[0]
self.assertEqual(arecord.id, '3585100')
self.assertEqual(arecord.name, 'mc')
self.assertEqual(arecord.type, RecordType.A)
self.assertEqual(arecord.data, '127.0.0.1')
self.assertHasKeys(arecord.extra, ['protocol', 'ttl_sec', 'port',
'weight'])
def test_list_records_zone_does_not_exist(self):
zone = self.driver.list_zones()[0]
LinodeMockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.list_records(zone=zone)
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, zone.id)
else:
self.fail('Exception was not thrown')
def test_get_zone_success(self):
LinodeMockHttp.type = 'GET_ZONE'
zone = self.driver.get_zone(zone_id='5093')
self.assertEqual(zone.id, '5093')
self.assertEqual(zone.type, 'master')
self.assertEqual(zone.domain, 'linode.com')
self.assertEqual(zone.ttl, None)
self.assertHasKeys(zone.extra, ['description', 'SOA_Email', 'status'])
def test_get_zone_does_not_exist(self):
LinodeMockHttp.type = 'GET_ZONE_DOES_NOT_EXIST'
try:
self.driver.get_zone(zone_id='4444')
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, '4444')
else:
self.fail('Exception was not thrown')
def test_get_record_success(self):
LinodeMockHttp.type = 'GET_RECORD'
record = self.driver.get_record(zone_id='1234', record_id='3585100')
self.assertEqual(record.id, '3585100')
self.assertEqual(record.name, 'www')
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '127.0.0.1')
self.assertHasKeys(record.extra, ['protocol', 'ttl_sec', 'port',
'weight'])
def test_get_record_zone_does_not_exist(self):
LinodeMockHttp.type = 'GET_RECORD_ZONE_DOES_NOT_EXIST'
try:
self.driver.get_record(zone_id='444', record_id='3585100')
except ZoneDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_get_record_record_does_not_exist(self):
LinodeMockHttp.type = 'GET_RECORD_RECORD_DOES_NOT_EXIST'
try:
self.driver.get_record(zone_id='4441', record_id='3585100')
except RecordDoesNotExistError:
pass
else:
self.fail('Exception was not thrown')
def test_create_zone_success(self):
zone = self.driver.create_zone(domain='foo.bar.com', type='master',
ttl=None, extra=None)
self.assertEqual(zone.id, '5094')
self.assertEqual(zone.domain, 'foo.bar.com')
def test_create_zone_validaton_error(self):
LinodeMockHttp.type = 'VALIDATION_ERROR'
try:
self.driver.create_zone(domain='foo.bar.com', type='master',
ttl=None, extra=None)
except LinodeException:
pass
else:
self.fail('Exception was not thrown')
def test_update_zone_success(self):
zone = self.driver.list_zones()[0]
updated_zone = self.driver.update_zone(zone=zone,
domain='libcloud.org',
ttl=10,
extra={'SOA_Email':
'[email protected]'})
self.assertEqual(zone.extra['SOA_Email'], '[email protected]')
self.assertEqual(updated_zone.id, zone.id)
self.assertEqual(updated_zone.domain, 'libcloud.org')
self.assertEqual(updated_zone.type, zone.type)
self.assertEqual(updated_zone.ttl, 10)
self.assertEqual(updated_zone.extra['SOA_Email'], '[email protected]')
self.assertEqual(updated_zone.extra['status'], zone.extra['status'])
self.assertEqual(updated_zone.extra['description'],
zone.extra['description'])
def test_create_record_success(self):
zone = self.driver.list_zones()[0]
record = self.driver.create_record(name='www', zone=zone,
type=RecordType.A, data='127.0.0.1')
self.assertEqual(record.id, '3585100')
self.assertEqual(record.name, 'www')
self.assertEqual(record.zone, zone)
self.assertEqual(record.type, RecordType.A)
self.assertEqual(record.data, '127.0.0.1')
def test_update_record_success(self):
zone = self.driver.list_zones()[0]
record = self.driver.list_records(zone=zone)[0]
updated_record = self.driver.update_record(record=record, name='www',
type=RecordType.AAAA,
data='::1')
self.assertEqual(record.data, '127.0.0.1')
self.assertEqual(updated_record.id, record.id)
self.assertEqual(updated_record.name, 'www')
self.assertEqual(updated_record.zone, record.zone)
self.assertEqual(updated_record.type, RecordType.AAAA)
self.assertEqual(updated_record.data, '::1')
def test_delete_zone_success(self):
zone = self.driver.list_zones()[0]
status = self.driver.delete_zone(zone=zone)
self.assertTrue(status)
def test_delete_zone_does_not_exist(self):
zone = self.driver.list_zones()[0]
LinodeMockHttp.type = 'ZONE_DOES_NOT_EXIST'
try:
self.driver.delete_zone(zone=zone)
except ZoneDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.zone_id, zone.id)
else:
self.fail('Exception was not thrown')
def test_delete_record_success(self):
zone = self.driver.list_zones()[0]
record = self.driver.list_records(zone=zone)[0]
status = self.driver.delete_record(record=record)
self.assertTrue(status)
def test_delete_record_does_not_exist(self):
zone = self.driver.list_zones()[0]
record = self.driver.list_records(zone=zone)[0]
LinodeMockHttp.type = 'RECORD_DOES_NOT_EXIST'
try:
self.driver.delete_record(record=record)
except RecordDoesNotExistError:
e = sys.exc_info()[1]
self.assertEqual(e.record_id, record.id)
else:
self.fail('Exception was not thrown')
class LinodeMockHttp(MockHttp):
fixtures = DNSFileFixtures('linode')
def _domain_list(self, method, url, body, headers):
body = self.fixtures.load('domain_list.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _domain_resource_list(self, method, url, body, headers):
body = self.fixtures.load('resource_list.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _ZONE_DOES_NOT_EXIST_domain_resource_list(self, method, url, body,
headers):
body = self.fixtures.load('resource_list_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_ZONE_domain_list(self, method, url, body, headers):
body = self.fixtures.load('get_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_ZONE_DOES_NOT_EXIST_domain_list(self, method, url, body,
headers):
body = self.fixtures.load('get_zone_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_RECORD_domain_list(self, method, url, body, headers):
body = self.fixtures.load('get_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_RECORD_domain_resource_list(self, method, url, body, headers):
body = self.fixtures.load('get_record.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_RECORD_ZONE_DOES_NOT_EXIST_domain_list(self, method, url, body,
headers):
body = self.fixtures.load('get_zone_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_RECORD_ZONE_DOES_NOT_EXIST_domain_resource_list(self, method, url,
body, headers):
body = self.fixtures.load('get_record_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_RECORD_RECORD_DOES_NOT_EXIST_domain_list(self, method, url, body,
headers):
body = self.fixtures.load('get_zone.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _GET_RECORD_RECORD_DOES_NOT_EXIST_domain_resource_list(self, method,
url, body,
headers):
body = self.fixtures.load('get_record_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _domain_create(self, method, url, body, headers):
body = self.fixtures.load('create_domain.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _VALIDATION_ERROR_domain_create(self, method, url, body, headers):
body = self.fixtures.load('create_domain_validation_error.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _domain_update(self, method, url, body, headers):
body = self.fixtures.load('update_domain.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _domain_resource_create(self, method, url, body, headers):
body = self.fixtures.load('create_resource.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _domain_resource_update(self, method, url, body, headers):
body = self.fixtures.load('update_resource.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _domain_delete(self, method, url, body, headers):
body = self.fixtures.load('delete_domain.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _ZONE_DOES_NOT_EXIST_domain_delete(self, method, url, body, headers):
body = self.fixtures.load('delete_domain_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _domain_resource_delete(self, method, url, body, headers):
body = self.fixtures.load('delete_resource.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
def _RECORD_DOES_NOT_EXIST_domain_resource_delete(self, method, url, body,
headers):
body = self.fixtures.load('delete_resource_does_not_exist.json')
return (httplib.OK, body, {}, httplib.responses[httplib.OK])
if __name__ == '__main__':
sys.exit(unittest.main())
| apache-2.0 |
mlperf/training_results_v0.7 | Google/benchmarks/maskrcnn/implementations/maskrcnn-research-TF-tpu-v4-128/object_detection/balanced_positive_negative_sampler.py | 2 | 11991 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to subsample minibatches by balancing positives and negatives.
Subsamples minibatches based on a pre-specified positive fraction in range
[0,1]. The class presumes there are many more negatives than positive examples:
if the desired batch_size cannot be achieved with the pre-specified positive
fraction, it fills the rest with negative examples. If this is not sufficient
for obtaining the desired batch_size, it returns fewer examples.
The main function to call is Subsample(self, indicator, labels). For convenience
one can also call SubsampleWeights(self, weights, labels) which is defined in
the minibatch_sampler base class.
When is_static is True, it implements a method that guarantees static shapes.
It also ensures the length of output of the subsample is always batch_size, even
when number of examples set to True in indicator is less than batch_size.
This is originally implemented in TensorFlow Object Detection API.
"""
import tensorflow.compat.v1 as tf
from REDACTED.tensorflow_models.mlperf.models.rough.mask_rcnn.object_detection import minibatch_sampler
from REDACTED.tensorflow_models.mlperf.models.rough.mask_rcnn.object_detection import ops
class BalancedPositiveNegativeSampler(minibatch_sampler.MinibatchSampler):
"""Subsamples minibatches to a desired balance of positives and negatives."""
def __init__(self, positive_fraction=0.5, is_static=False):
"""Constructs a minibatch sampler.
Args:
positive_fraction: desired fraction of positive examples (scalar in [0,1])
in the batch.
is_static: If True, uses an implementation with static shape guarantees.
Raises:
ValueError: if positive_fraction < 0, or positive_fraction > 1
"""
if positive_fraction < 0 or positive_fraction > 1:
raise ValueError('positive_fraction should be in range [0,1]. '
'Received: %s.' % positive_fraction)
self._positive_fraction = positive_fraction
self._is_static = is_static
def _get_num_pos_neg_samples(self, sorted_indices_tensor, sample_size):
"""Counts the number of positives and negatives numbers to be sampled.
Args:
sorted_indices_tensor: A sorted int32 tensor of shape [N] which contains
the signed indices of the examples where the sign is based on the label
value. The examples that cannot be sampled are set to 0. It samples
atmost sample_size*positive_fraction positive examples and remaining
from negative examples.
sample_size: Size of subsamples.
Returns:
A tuple containing the number of positive and negative labels in the
subsample.
"""
input_length = tf.shape(sorted_indices_tensor)[0]
valid_positive_index = tf.greater(sorted_indices_tensor,
tf.zeros(input_length, tf.int32))
num_sampled_pos = tf.reduce_sum(tf.cast(valid_positive_index, tf.int32))
max_num_positive_samples = tf.constant(
int(sample_size * self._positive_fraction), tf.int32)
num_positive_samples = tf.minimum(max_num_positive_samples, num_sampled_pos)
num_negative_samples = tf.constant(sample_size,
tf.int32) - num_positive_samples
return num_positive_samples, num_negative_samples
def _get_values_from_start_and_end(self, input_tensor, num_start_samples,
num_end_samples, total_num_samples):
"""slices num_start_samples and last num_end_samples from input_tensor.
Args:
input_tensor: An int32 tensor of shape [N] to be sliced.
num_start_samples: Number of examples to be sliced from the beginning
of the input tensor.
num_end_samples: Number of examples to be sliced from the end of the
input tensor.
total_num_samples: Sum of is num_start_samples and num_end_samples. This
should be a scalar.
Returns:
A tensor containing the first num_start_samples and last num_end_samples
from input_tensor.
"""
input_length = tf.shape(input_tensor)[0]
start_positions = tf.less(tf.range(input_length), num_start_samples)
end_positions = tf.greater_equal(
tf.range(input_length), input_length - num_end_samples)
selected_positions = tf.logical_or(start_positions, end_positions)
selected_positions = tf.cast(selected_positions, tf.float32)
indexed_positions = tf.multiply(tf.cumsum(selected_positions),
selected_positions)
one_hot_selector = tf.one_hot(tf.cast(indexed_positions, tf.int32) - 1,
total_num_samples,
dtype=tf.float32)
return tf.cast(tf.tensordot(tf.cast(input_tensor, tf.float32),
one_hot_selector, axes=[0, 0]), tf.int32)
def _static_subsample(self, indicator, batch_size, labels):
"""Returns subsampled minibatch.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
N should be a complie time constant.
batch_size: desired batch size. This scalar cannot be None.
labels: boolean tensor of shape [N] denoting positive(=True) and negative
(=False) examples. N should be a complie time constant.
Returns:
sampled_idx_indicator: boolean tensor of shape [N], True for entries which
are sampled. It ensures the length of output of the subsample is always
batch_size, even when number of examples set to True in indicator is
less than batch_size.
Raises:
ValueError: if labels and indicator are not 1D boolean tensors.
"""
# Check if indicator and labels have a static size.
if not indicator.shape.is_fully_defined():
raise ValueError('indicator must be static in shape when is_static is'
'True')
if not labels.shape.is_fully_defined():
raise ValueError('labels must be static in shape when is_static is'
'True')
if not isinstance(batch_size, int):
raise ValueError('batch_size has to be an integer when is_static is'
'True.')
input_length = tf.shape(indicator)[0]
# Set the number of examples set True in indicator to be at least
# batch_size.
num_true_sampled = tf.reduce_sum(tf.cast(indicator, tf.float32))
additional_false_sample = tf.less_equal(
tf.cumsum(tf.cast(tf.logical_not(indicator), tf.float32)),
batch_size - num_true_sampled)
indicator = tf.logical_or(indicator, additional_false_sample)
# Shuffle indicator and label. Need to store the permutation to restore the
# order post sampling.
permutation = tf.random_shuffle(tf.range(input_length))
indicator = ops.matmul_gather_on_zeroth_axis(
tf.cast(indicator, tf.float32), permutation)
labels = ops.matmul_gather_on_zeroth_axis(
tf.cast(labels, tf.float32), permutation)
# index (starting from 1) when indicator is True, 0 when False
indicator_idx = tf.where(
tf.cast(indicator, tf.bool), tf.range(1, input_length + 1),
tf.zeros(input_length, tf.int32))
# Replace -1 for negative, +1 for positive labels
signed_label = tf.where(
tf.cast(labels, tf.bool), tf.ones(input_length, tf.int32),
tf.scalar_mul(-1, tf.ones(input_length, tf.int32)))
# negative of index for negative label, positive index for positive label,
# 0 when indicator is False.
signed_indicator_idx = tf.multiply(indicator_idx, signed_label)
sorted_signed_indicator_idx = tf.nn.top_k(
signed_indicator_idx, input_length, sorted=True).values
[num_positive_samples,
num_negative_samples] = self._get_num_pos_neg_samples(
sorted_signed_indicator_idx, batch_size)
sampled_idx = self._get_values_from_start_and_end(
sorted_signed_indicator_idx, num_positive_samples,
num_negative_samples, batch_size)
# Shift the indices to start from 0 and remove any samples that are set as
# False.
sampled_idx = tf.abs(sampled_idx) - tf.ones(batch_size, tf.int32)
sampled_idx = tf.multiply(
tf.cast(tf.greater_equal(sampled_idx, tf.constant(0)), tf.int32),
sampled_idx)
sampled_idx_indicator = tf.cast(tf.reduce_sum(
tf.one_hot(sampled_idx, depth=input_length),
axis=0), tf.bool)
# project back the order based on stored permutations
reprojections = tf.one_hot(permutation, depth=input_length,
dtype=tf.float32)
return tf.cast(tf.tensordot(
tf.cast(sampled_idx_indicator, tf.float32),
reprojections, axes=[0, 0]), tf.bool)
def subsample(self, indicator, batch_size, labels, scope=None):
"""Returns subsampled minibatch.
Args:
indicator: boolean tensor of shape [N] whose True entries can be sampled.
batch_size: desired batch size. If None, keeps all positive samples and
randomly selects negative samples so that the positive sample fraction
matches self._positive_fraction. It cannot be None is is_static is True.
labels: boolean tensor of shape [N] denoting positive(=True) and negative
(=False) examples.
scope: name scope.
Returns:
sampled_idx_indicator: boolean tensor of shape [N], True for entries which
are sampled.
Raises:
ValueError: if labels and indicator are not 1D boolean tensors.
"""
if len(indicator.get_shape().as_list()) != 1:
raise ValueError('indicator must be 1 dimensional, got a tensor of '
'shape %s' % indicator.get_shape())
if len(labels.get_shape().as_list()) != 1:
raise ValueError('labels must be 1 dimensional, got a tensor of '
'shape %s' % labels.get_shape())
if labels.dtype != tf.bool:
raise ValueError('labels should be of type bool. Received: %s' %
labels.dtype)
if indicator.dtype != tf.bool:
raise ValueError('indicator should be of type bool. Received: %s' %
indicator.dtype)
with tf.name_scope(scope, 'BalancedPositiveNegativeSampler'):
if self._is_static:
return self._static_subsample(indicator, batch_size, labels)
else:
# Only sample from indicated samples
negative_idx = tf.logical_not(labels)
positive_idx = tf.logical_and(labels, indicator)
negative_idx = tf.logical_and(negative_idx, indicator)
# Sample positive and negative samples separately
if batch_size is None:
max_num_pos = tf.reduce_sum(tf.to_int32(positive_idx))
else:
max_num_pos = int(self._positive_fraction * batch_size)
sampled_pos_idx = self.subsample_indicator(positive_idx, max_num_pos)
num_sampled_pos = tf.reduce_sum(tf.cast(sampled_pos_idx, tf.int32))
if batch_size is None:
negative_positive_ratio = (
1 - self._positive_fraction) / self._positive_fraction
max_num_neg = tf.to_int32(
negative_positive_ratio * tf.to_float(num_sampled_pos))
else:
max_num_neg = batch_size - num_sampled_pos
sampled_neg_idx = self.subsample_indicator(negative_idx, max_num_neg)
return tf.logical_or(sampled_pos_idx, sampled_neg_idx)
| apache-2.0 |
radiasoft/radtrack | radtrack/rt_params.py | 1 | 9448 | # -*- coding: utf-8 -*-
"""Parameter declaration parser
:copyright: Copyright (c) 2015 Bivio Software, Inc. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import UserDict
import __builtin__
import copy
import enum
import importlib
import re
import sys
import yaml
from pykern.pkdebug import pkdc, pkdp
from pykern import pkcompat
from pykern import pkio
from pykern import pkcollections
from pykern import pkyaml
#: Valid attributes for a declaration
DECLARATION_ATTRS = ('name', 'required', 'children', 'label', 'units', 'py_type')
class DeclarationException(ValueError):
pass
class Declaration(UserDict.DictMixin):
"""Describe a parameter and its children (if any)
Attributes:
children (ordered): OrderedDict of subparameters
label (str): displayed to the user (default: generated from name)
name (str): programmatic name
py_type (type): how to render value (None implies has children)
required (list or dict): components need this parameter (may be inherited)
units (str): expected units (default: None)
"""
def __init__(self, decl, qualifier=None):
try:
#TODO(robnagler) more type checking: especially required and children
self.name = decl['name']
self.qualified_name = qualifier + '.' + self.name if qualifier else self.name
unknown = [x for x in decl if x not in DECLARATION_ATTRS]
if unknown:
raise ValueError('{}: unknown attribute(s)'.format(unknown))
self.label = self._label(decl)
self.py_type = self._py_type(decl)
self.units = self._units(decl)
self.required = self._required(decl)
self.children = self._children(decl)
assert self.children or self.py_type, \
'{}: declaration must be one type or the other'
except DeclarationException:
raise
except Exception as e:
n = None
for a in ('qualified_name', 'name'):
if hasattr(self, a):
n = getattr(self, a)
break
if n is None:
n = decl
raise DeclarationException('{}: declaration error for {}'.format(e, n)), None, sys.exc_info()[2]
def __repr__(self):
return 'Declaration({})'.format(self.name or self.qualified_name)
def __getitem__(self, key):
if not (self.children and key in self.children):
raise KeyError(key)
return self.children[key]
def keys(self):
if not self.children:
return []
return pkcollections.map_keys(self.children)
def _children(self, decl):
if 'children' not in decl:
return None
res = pkcollections.OrderedMapping()
for c in decl['children']:
if pkcompat.isinstance_str(c):
d = c
n = c
else:
d = Declaration(c, self.qualified_name)
n = d.name
assert n not in res, \
'{}: duplicate key in {}'.format(n, self.name)
res[n] = d
return res
def _label(self, decl):
if 'label' in decl:
return decl['label']
res = self.name
res = re.sub(r'(^|_)([a-z])', lambda x: x.group(1) + x.group(2).upper(), res)
res = re.sub(r'_', ' ', res)
res = re.sub(r'\bLen\b', 'Length', res)
res = re.sub(r'\bNum\b', 'Number of', res)
res = re.sub(r'\bCoord\b', 'Coordinate', res)
res = re.sub(r'\bAvg\b', 'Average', res)
return res
def _py_type(self, decl):
"""Parse py_type to Python type instance"""
if 'py_type' not in decl:
return None
t = decl['py_type']
try:
t = getattr(__builtin__, t)
if isinstance(t, type):
return t
except AttributeError:
pass
s = re.search(r'^(\w+)\.(\w+)$', decl['py_type'])
assert s, \
'{py_type}: py_type for {name} not found'.format(*decl)
m = importlib.import_module('radtrack.' + s.group(1))
t = getattr(m, s.group(2))
assert isinstance(t, type), \
'{py_type}: py_type for {name} not a type'.format(*decl)
return t
def _required(self, decl):
return decl['required']
def _units(self, decl):
return decl['units'] if 'units' in decl else None
class Default(UserDict.DictMixin):
def __init__(self, value, component, decl, parent_type=None, qualifier=None):
self.decl = decl
self.qualified_name = qualifier + '.' + decl.qualified_name if qualifier else decl.qualified_name
self.children = self._children(value, decl, component)
if decl.py_type:
if decl.children:
self.value = self.children[next(iter(self.children))].value
else:
self.value = _parse_value(value, decl.py_type)
elif parent_type:
self.value = _parse_value(decl.name, parent_type)
def iter_leaves(self):
if not self.children:
yield self
else:
for c in pkcollections.map_values(self.children):
for l in c.iter_leaves():
yield l
def iter_leaves(self):
if not self.children:
yield self
else:
for c in pkcollections.map_values(self.children):
for l in c.iter_leaves():
yield l
def iter_nodes(self):
yield self
if not self.children:
return
for c in pkcollections.map_values(self.children):
for l in c.iter_nodes():
yield l
def __repr__(self):
return 'Default({})'.format(self.decl.qualified_name)
def __getitem__(self, key):
if not (self.children and key in self.children):
raise KeyError('{}: no key in {}'.format(key, self))
return self.children[key]
def keys(self):
if not self.children:
return []
return pkcollections.map_keys(self.children)
def _children(self, values, decl, component):
if not decl.children:
return None
res = pkcollections.OrderedMapping()
for child_decl in decl.values():
d = Default(
values[child_decl.name],
component,
child_decl,
decl.py_type,
self.qualified_name,
)
res[child_decl.name] = d
return res
def declarations(file_prefix):
"""Parsed parameter declarations from ``<file_prefix>_declarations.yml``
Args:
file_prefix (str): which file to parse
Returns:
OrderedMapping: mapping of declarations
"""
return _get(file_prefix, 'declarations', _parse_declarations)
def defaults(file_prefix, decl):
"""Parsed parameter defaults from ``<file_prefix>_defaults.yml``
Args:
file_prefix (str): which file to parse
decl (OrderedMapping): how to parse data
Returns:
OrderedMapping: mapping of default values
"""
return _get(
file_prefix,
'defaults',
lambda v, fp: defaults_from_dict(v, fp, decl),
)
def defaults_from_dict(values, component, decl):
"""Parsed parameter values from already parsed YAML.
Args:
values (dict): read from YAML
component (str): which component in decl, e.g. 'srw_multi'
decl (OrderedMapping): how to parse the data
Returns:
OrderedMapping: mapping of values
"""
return Default(values, component, decl)
def init_params(defaults):
"""Create a tree of default params excluding headings and computed params
Args:
defaults (dict): used for initializations
Returns:
OrderedMapping: nested dictionary of params
"""
res = pkcollections.OrderedMapping()
for k in defaults:
v = defaults[k]
res[k] = init_params(v.children) if v.children else v.value
return res
def _get(file_name_or_prefix, which, how):
"""Parse and validate YAML file.
Args:
file_name_or_prefix (str): which file to parse
which (str): "declarations" or "defaults" or None
how (callable): parser
Returns:
OrderedMapping: parsed YAML file
"""
fn = '{}_{}'.format(file_name_or_prefix, which) if which else file_name_or_prefix
values = pkyaml.load_resource(fn)
return how(values, file_name_or_prefix)
def _parse_declarations(values, file_prefix):
"""Recurse the parsed YAML declarations; convert values and types
Order preserving so can use for layout.
Args:
values (list): raw YAML as a list
file_prefix (str): which file to parse
"""
root = Declaration({
'name': '',
'children': values,
'required': None,
})
_parse_declarations_link(root, root)
return root
def _parse_declarations_link(decl, root):
for k, v in decl.items():
if not isinstance(v, Declaration):
decl.children[k] = root[k]
else:
_parse_declarations_link(v, root)
def _parse_value(v, t):
if hasattr(t, 'from_anything'):
return t.from_anything(v)
return t(v)
| apache-2.0 |
cgstudiomap/cgstudiomap | main/local_modules/res_partner_url_validation_missing_details/__openerp__.py | 1 | 1456 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) cgstudiomap <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Res Partner url validation: missing details',
'version': '0.2',
'author': 'cgstudiomap',
'maintainer': 'cgstudiomap',
'license': 'AGPL-3',
'category': 'Sales',
'summary': 'Set up for urls for missing details bot',
'depends': [
'res_partner_missing_details',
'res_partner_url_validation',
],
'external_dependencies': {},
'data': [
'missing_details.xml',
],
'installable': True,
}
| agpl-3.0 |
FEniCS/fiat | FIAT/discontinuous_taylor.py | 1 | 2114 | # Copyright (C) 2008 Robert C. Kirby (Texas Tech University)
# Modified by Colin Cotter (Imperial College London)
# David Ham (Imperial College London)
#
# This file is part of FIAT (https://www.fenicsproject.org)
#
# SPDX-License-Identifier: LGPL-3.0-or-later
from FIAT import finite_element, polynomial_set, dual_set, functional, P0, quadrature
from FIAT.polynomial_set import mis
import numpy
class DiscontinuousTaylorDualSet(dual_set.DualSet):
"""The dual basis for Taylor elements. This class works for
intervals. Nodes are function and derivative evaluation
at the midpoint."""
def __init__(self, ref_el, degree):
nodes = []
dim = ref_el.get_spatial_dimension()
Q = quadrature.make_quadrature(ref_el, 2 * (degree + 1))
f_at_qpts = numpy.ones(len(Q.wts))
nodes.append(functional.IntegralMoment(ref_el, Q, f_at_qpts))
vertices = ref_el.get_vertices()
midpoint = tuple(sum(numpy.array(vertices)) / len(vertices))
for k in range(1, degree + 1):
# Loop over all multi-indices of degree k.
for alpha in mis(dim, k):
nodes.append(functional.PointDerivative(ref_el, midpoint, alpha))
entity_ids = {d: {e: [] for e in ref_el.sub_entities[d]}
for d in range(dim + 1)}
entity_ids[dim][0] = list(range(len(nodes)))
super(DiscontinuousTaylorDualSet, self).__init__(nodes, ref_el, entity_ids)
class HigherOrderDiscontinuousTaylor(finite_element.CiarletElement):
"""The discontinuous Taylor finite element. Use a Taylor basis for DG."""
def __init__(self, ref_el, degree):
poly_set = polynomial_set.ONPolynomialSet(ref_el, degree)
dual = DiscontinuousTaylorDualSet(ref_el, degree)
formdegree = ref_el.get_spatial_dimension() # n-form
super(HigherOrderDiscontinuousTaylor, self).__init__(poly_set, dual, degree, formdegree)
def DiscontinuousTaylor(ref_el, degree):
if degree == 0:
return P0.P0(ref_el)
else:
return HigherOrderDiscontinuousTaylor(ref_el, degree)
| lgpl-3.0 |
lsp84ch83/PyText | UItestframework/testcase/test_baidu.py | 1 | 1073 | #coding=utf-8
from time import sleep
from public.common import mytest
from public.pages import baiduIndexPage
from public.common import datainfo
class TestBaiduIndex(mytest.MyTest):
"""百度搜索测试"""
def _search(self,searchKey):
"""封装百度搜索的函数"""
baidupage = baiduIndexPage.BaiduIndexPage(self.dr)
baidupage.into_baidu_page()
baidupage.input_search_key(searchKey)
baidupage.click_search_button()
sleep(2)
self.assertIn(searchKey, baidupage.return_title())
def test_search(self):
"""直接搜索"""
baidupage = baiduIndexPage.BaiduIndexPage(self.dr)
baidupage.into_baidu_page()
baidupage.input_search_key('小石头tester')
baidupage.click_search_button()
sleep(2)
self.assertIn('小石头',baidupage.return_title())
def test_search_excel(self):
"""使用数据驱动,进行测试"""
datas = datainfo.get_xls_to_list('searKey.xlsx','Sheet1')
for data in datas:
self._search(data)
| gpl-3.0 |
n3ocort3x/Kernel_one_x_sense | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
Yukarumya/Yukarum-Redfoxes | dom/canvas/test/webgl-conf/checkout/deqp/functional/gles3/fborender/fborender_test_generator.py | 51 | 3682 | #!/usr/bin/env python
# Copyright (c) 2016 The Khronos Group Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and/or associated documentation files (the
# "Materials"), to deal in the Materials without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Materials, and to
# permit persons to whom the Materials are furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Materials.
#
# THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS.
"""
Generator for fborender* tests.
This file needs to be run in its folder.
"""
import sys
_DO_NOT_EDIT_WARNING = """<!--
This file is auto-generated from fborender_test_generator.py
DO NOT EDIT!
-->
"""
_HTML_TEMPLATE = """<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>WebGL Framebuffer Render Tests</title>
<link rel="stylesheet" href="../../../../resources/js-test-style.css"/>
<script src="../../../../js/js-test-pre.js"></script>
<script src="../../../../js/webgl-test-utils.js"></script>
<script src="../../../../closure-library/closure/goog/base.js"></script>
<script src="../../../deqp-deps.js"></script>
<script>goog.require('functional.gles3.es3fFboRenderTest');</script>
</head>
<body>
<div id="description"></div>
<div id="console"></div>
<canvas id="canvas" width="200" height="128"> </canvas>
<script>
var wtu = WebGLTestUtils;
var gl = wtu.create3DContext('canvas', null, 2);
functional.gles3.es3fFboRenderTest.run(gl, [%(start)s, %(end)s]);
</script>
</body>
</html>
"""
_GROUPS = [
'stencil_clear',
'shared_colorbuffer_clear',
'shared_colorbuffer',
'shared_depth_stencil',
'resize',
'recreate_color',
'recreate_depth_stencil'
]
_GROUP_TEST_COUNTS = [
1,
1,
3,
1,
4,
7,
1
]
def GenerateFilename(group, count, index):
"""Generate test filename."""
filename = group
assert index >= 0 and index < count
if count > 1:
index_str = str(index)
if index < 10:
index_str = "0" + index_str
filename += "_" + index_str
filename += ".html"
return filename
def WriteTest(filename, start, end):
"""Write one test."""
file = open(filename, "wb")
file.write(_DO_NOT_EDIT_WARNING)
file.write(_HTML_TEMPLATE % {
'start': start,
'end': end
})
file.close
def GenerateTests():
"""Generate all tests."""
assert len(_GROUPS) == len(_GROUP_TEST_COUNTS)
test_index = 0
filelist = []
for ii in range(len(_GROUPS)):
group = _GROUPS[ii]
count = _GROUP_TEST_COUNTS[ii]
for index in range(count):
filename = GenerateFilename(group, count, index)
filelist.append(filename)
WriteTest(filename, test_index, test_index + 1)
test_index += 1
return filelist
def GenerateTestList(filelist):
file = open("00_test_list.txt", "wb")
file.write('\n'.join(filelist))
file.close
def main(argv):
"""This is the main function."""
filelist = GenerateTests()
GenerateTestList(filelist)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mpl-2.0 |
tiagoantao/igrat | igrat/bio/tasks/load_genomeposition.py | 1 | 1502 | """
Copyright 2013 Tiago Antao
This file is part of igrat.
igrat is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
igrat is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with igrat. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import vcf
from igrat.cunit import FileImportCompute
class GPImportCompute(FileImportCompute):
def __init__(self, fname, outputs, key):
FileImportCompute.__init__(self, fname, outputs)
self.out_db = self.outputs[0]
self.key = key
def compute(self, start, end):
cstart, pstart = start
cend, pend = end
#cstart and cend are expected to be the same
v = vcf.Reader(filename=self.fname)
recs = v.fetch(cstart, pstart, pend)
for rec in recs:
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("fname", help="Indexed VCF to load")
parser.add_argument("db", help="DB")
parser.add_argument("key", help="key")
parser.parse_args()
#TODO: include filter
| agpl-3.0 |
angr/angr | angr/analyses/girlscout.py | 3 | 32197 | raise ImportError("Don't import me! I don't work!")
import logging
import math
import os
import pickle
import re
import string
from collections import defaultdict
from datetime import datetime
import cle
import networkx
import progressbar
import pyvex
from . import Analysis
from angr.analyses.cfg.cfg_fast import SegmentList
from .. import options as o
from ..annocfg import AnnotatedCFG
from ..errors import SimMemoryError, SimEngineError, AngrError, SimValueError, SimIRSBError, SimSolverModeError, \
SimError
from ..state_plugins.sim_action import SimActionData
from ..surveyors import Explorer, Slicecutor
l = logging.getLogger(name=__name__)
class GirlScout(Analysis):
"""
We find functions inside the given binary, try to decide the base address if needed, and build a control-flow
graph on top of that to see if there is an entry or not. Obviously if the binary is not loaded as a blob (not
using Blob as its backend), GirlScout will not try to determine the base address.
It's also optional to perform a full code scan of the binary to show where all codes are. By default we don't scan
the entire binary since it's time consuming.
You probably need a BoyScout to determine the possible architecture and endianess of your binary blob.
"""
def __init__(self, binary=None, start=None, end=None, pickle_intermediate_results=False, perform_full_code_scan=False):
self._binary = binary if binary is not None else self.project.loader.main_object
self._start = start if start is not None else self._binary.min_addr
self._end = end if end is not None else self._binary.max_addr
self._pickle_intermediate_results = pickle_intermediate_results
self._perform_full_code_scan = perform_full_code_scan
l.debug("Starts at 0x%08x and ends at 0x%08x.", self._start, self._end)
# Valid memory regions
self._valid_memory_regions = sorted(
(start, start+len(backer)) for start, backer in self.project.loader.memory.backers())
self._valid_memory_region_size = sum([ (end - start) for start, end in self._valid_memory_regions ])
# Size of each basic block
self._block_size = { }
self._next_addr = self._start - 1
# Starting point of functions
self.functions = None
# Calls between functions
self.call_map = networkx.DiGraph()
# A CFG - this is not what you get from project.analyses.CFG() !
self.cfg = networkx.DiGraph()
# Create the segment list
self._seg_list = SegmentList()
self._read_addr_to_run = defaultdict(list)
self._write_addr_to_run = defaultdict(list)
# All IRSBs with an indirect exit target
self._indirect_jumps = set()
self._unassured_functions = set()
self.base_address = None
# Start working!
self._reconnoiter()
@property
def call_map(self):
return self.call_map
def _get_next_addr_to_search(self, alignment=None):
# TODO: Take care of those functions that are already generated
curr_addr = self._next_addr
if self._seg_list.has_blocks:
curr_addr = self._seg_list.next_free_pos(curr_addr)
if alignment is not None:
if curr_addr % alignment > 0:
curr_addr = curr_addr - curr_addr % alignment + alignment
# Make sure curr_addr exists in binary
accepted = False
for start, end in self._valid_memory_regions:
if curr_addr >= start and curr_addr < end:
# accept
accepted = True
break
if curr_addr < start:
# accept, but we are skipping the gap
accepted = True
curr_addr = start
if not accepted:
# No memory available!
return None
self._next_addr = curr_addr
if self._end is None or curr_addr < self._end:
l.debug("Returning new recon address: 0x%08x", curr_addr)
return curr_addr
else:
l.debug("0x%08x is beyond the ending point.", curr_addr)
return None
def _get_next_code_addr(self, initial_state):
"""
Besides calling _get_next_addr, we will check if data locates at that address seems to be code or not. If not,
we'll move on to request for next valid address.
"""
next_addr = self._get_next_addr_to_search()
if next_addr is None:
return None
start_addr = next_addr
sz = ""
is_sz = True
while is_sz:
# Get data until we meet a 0
while next_addr in initial_state.memory:
try:
l.debug("Searching address %x", next_addr)
val = initial_state.mem_concrete(next_addr, 1)
if val == 0:
if len(sz) < 4:
is_sz = False
else:
reach_end = True
break
if chr(val) not in string.printable:
is_sz = False
break
sz += chr(val)
next_addr += 1
except SimValueError:
# Not concretizable
l.debug("Address 0x%08x is not concretizable!", next_addr)
break
if len(sz) > 0 and is_sz:
l.debug("Got a string of %d chars: [%s]", len(sz), sz)
# l.debug("Occpuy %x - %x", start_addr, start_addr + len(sz) + 1)
self._seg_list.occupy(start_addr, len(sz) + 1)
sz = ""
next_addr = self._get_next_addr_to_search()
if next_addr is None:
return None
# l.debug("next addr = %x", next_addr)
start_addr = next_addr
if is_sz:
next_addr += 1
instr_alignment = initial_state.arch.instruction_alignment
if start_addr % instr_alignment > 0:
start_addr = start_addr - start_addr % instr_alignment + \
instr_alignment
l.debug('_get_next_code_addr() returns 0x%x', start_addr)
return start_addr
def _symbolic_reconnoiter(self, addr, target_addr, max_depth=10):
"""
When an IRSB has more than two exits (for example, a jumptable), we
cannot concretize their exits in concrete mode. Hence we statically
execute the function from beginning in this method, and then switch to
symbolic mode for the final IRSB to get all possible exits of that
IRSB.
"""
state = self.project.factory.blank_state(addr=addr,
mode="symbolic",
add_options={o.CALLLESS}
)
initial_exit = self.project.factory.path(state)
explorer = Explorer(self.project,
start=initial_exit,
max_depth=max_depth,
find=(target_addr), num_find=1).run()
if len(explorer.found) > 0:
path = explorer.found[0]
last_run = path.last_run
return last_run.flat_exits()
else:
return []
def _static_memory_slice(self, run):
if isinstance(run, SimIRSB):
for stmt in run.statements:
refs = stmt.actions
if len(refs) > 0:
real_ref = refs[-1]
if type(real_ref) == SimActionData:
if real_ref.action == 'write':
addr = real_ref.addr
if not run.initial_state.solver.symbolic(addr):
concrete_addr = run.initial_state.solver.eval(addr)
self._write_addr_to_run[addr].append(run.addr)
elif real_ref.action == 'read':
addr = real_ref.addr
if not run.initial_state.solver.symbolic(addr):
concrete_addr = run.initial_state.solver.eval(addr)
self._read_addr_to_run[addr].append(run.addr)
def _scan_code(self, traced_addresses, function_exits, initial_state, starting_address):
# Saving tuples like (current_function_addr, next_exit_addr)
# Current_function_addr == -1 for exits not inside any function
remaining_exits = set()
next_addr = starting_address
# Initialize the remaining_exits set
remaining_exits.add((next_addr,
next_addr,
next_addr,
initial_state.copy()))
while len(remaining_exits):
current_function_addr, previous_addr, parent_addr, state = \
remaining_exits.pop()
if previous_addr in traced_addresses:
continue
# Add this node to the CFG first, in case this is a dangling node
self.cfg.add_node(previous_addr)
if current_function_addr != -1:
l.debug("Tracing new exit 0x%08x in function 0x%08x",
previous_addr, current_function_addr)
else:
l.debug("Tracing new exit 0x%08x", previous_addr)
traced_addresses.add(previous_addr)
self._scan_block(previous_addr, state, current_function_addr, function_exits, remaining_exits, traced_addresses)
def _scan_block(self, addr, state, current_function_addr, function_exits, remaining_exits, traced_addresses):
# Let's try to create the pyvex IRSB directly, since it's much faster
try:
irsb = self.project.factory.block(addr).vex
# Log the size of this basic block
self._block_size[addr] = irsb.size
# Occupy the block
self._seg_list.occupy(addr, irsb.size)
except (SimEngineError, SimMemoryError):
return
# Get all possible successors
next, jumpkind = irsb.next, irsb.jumpkind
successors = [ (i.dst, i.jumpkind) for i in irsb.statements if type(i) is pyvex.IRStmt.Exit]
successors.append((next, jumpkind))
# Process each successor
for suc in successors:
target, jumpkind = suc
if type(target) is pyvex.IRExpr.Const:
next_addr = target.con.value
else:
next_addr = None
if jumpkind == 'Ijk_Boring' and next_addr is not None:
remaining_exits.add((current_function_addr, next_addr,
addr, None))
elif jumpkind == 'Ijk_Call' and next_addr is not None:
# Log it before we cut the tracing :)
if jumpkind == "Ijk_Call":
if current_function_addr != -1:
self.functions.add(current_function_addr)
self.functions.add(next_addr)
self.call_map.add_edge(current_function_addr, next_addr)
else:
self.functions.add(next_addr)
self.call_map.add_node(next_addr)
elif jumpkind == "Ijk_Boring" or \
jumpkind == "Ijk_Ret":
if current_function_addr != -1:
function_exits[current_function_addr].add(next_addr)
# If we have traced it before, don't trace it anymore
if next_addr in traced_addresses:
return
remaining_exits.add((next_addr, next_addr, addr, None))
l.debug("Function calls: %d", len(self.call_map.nodes()))
def _scan_block_(self, addr, state, current_function_addr, function_exits, remaining_exits, traced_addresses):
# Get a basic block
state.ip = addr
s_path = self.project.factory.path(state)
try:
s_run = s_path.next_run
except SimIRSBError as ex:
l.debug(ex)
return
except AngrError as ex:
# "No memory at xxx"
l.debug(ex)
return
except (SimValueError, SimSolverModeError) as ex:
# Cannot concretize something when executing the SimRun
l.debug(ex)
return
except SimError as ex:
# Catch all simuvex errors
l.debug(ex)
return
if type(s_run) is SimIRSB:
# Calculate its entropy to avoid jumping into uninitialized/all-zero space
bytes = s_run.irsb._state[1]['bytes']
size = s_run.irsb.size
ent = self._calc_entropy(bytes, size=size)
if ent < 1.0 and size > 40:
# Skipping basic blocks that have a very low entropy
return
# self._static_memory_slice(s_run)
# Mark that part as occupied
if isinstance(s_run, SimIRSB):
self._seg_list.occupy(addr, s_run.irsb.size)
successors = s_run.flat_successors + s_run.unsat_successors
has_call_exit = False
tmp_exit_set = set()
for suc in successors:
if suc.history.jumpkind == "Ijk_Call":
has_call_exit = True
for suc in successors:
jumpkind = suc.history.jumpkind
if has_call_exit and jumpkind == "Ijk_Ret":
jumpkind = "Ijk_FakeRet"
if jumpkind == "Ijk_Ret":
continue
try:
# Try to concretize the target. If we can't, just move on
# to the next target
next_addr = suc.solver.eval_one(suc.ip)
except (SimValueError, SimSolverModeError) as ex:
# Undecidable jumps (might be a function return, or a conditional branch, etc.)
# We log it
self._indirect_jumps.add((suc.history.jumpkind, addr))
l.info("IRSB 0x%x has an indirect exit %s.", addr, suc.history.jumpkind)
continue
self.cfg.add_edge(addr, next_addr, jumpkind=jumpkind)
# Log it before we cut the tracing :)
if jumpkind == "Ijk_Call":
if current_function_addr != -1:
self.call_map.add_edge(current_function_addr, next_addr)
else:
self.call_map.add_node(next_addr)
elif jumpkind == "Ijk_Boring" or \
jumpkind == "Ijk_Ret":
if current_function_addr != -1:
function_exits[current_function_addr].add(next_addr)
# If we have traced it before, don't trace it anymore
if next_addr in traced_addresses:
continue
# If we have traced it in current loop, don't tract it either
if next_addr in tmp_exit_set:
continue
tmp_exit_set.add(next_addr)
if jumpkind == "Ijk_Call":
# This is a call. Let's record it
new_state = suc.copy()
# Unconstrain those parameters
# TODO: Support other archs as well
# if 12 + 16 in new_state.registers.mem:
# del new_state.registers.mem[12 + 16]
#if 16 + 16 in new_state.registers.mem:
# del new_state.registers.mem[16 + 16]
#if 20 + 16 in new_state.registers.mem:
# del new_state.registers.mem[20 + 16]
# 0x8000000: call 0x8000045
remaining_exits.add((next_addr, next_addr, addr, new_state))
l.debug("Function calls: %d", len(self.call_map.nodes()))
elif jumpkind == "Ijk_Boring" or \
jumpkind == "Ijk_Ret" or \
jumpkind == "Ijk_FakeRet":
new_state = suc.copy()
l.debug("New exit with jumpkind %s", jumpkind)
# FIXME: should not use current_function_addr if jumpkind is "Ijk_Ret"
remaining_exits.add((current_function_addr, next_addr,
addr, new_state))
elif jumpkind == "Ijk_NoDecode":
# That's something VEX cannot decode!
# We assume we ran into a deadend
pass
elif jumpkind.startswith("Ijk_Sig"):
# Should not go into that exit
pass
elif jumpkind == "Ijk_TInval":
# ppc32: isync
# FIXME: It is the same as Ijk_Boring! Process it later
pass
elif jumpkind == 'Ijk_Sys_syscall':
# Let's not jump into syscalls
pass
elif jumpkind == 'Ijk_InvalICache':
pass
elif jumpkind == 'Ijk_MapFail':
pass
elif jumpkind == 'Ijk_EmWarn':
pass
else:
raise Exception("NotImplemented")
def _scan_function_prologues(self, traced_address, function_exits, initial_state):
"""
Scan the entire program space for prologues, and start code scanning at those positions
:param traced_address:
:param function_exits:
:param initial_state:
:param next_addr:
:returns:
"""
# Precompile all regexes
regexes = set()
for ins_regex in self.project.arch.function_prologs:
r = re.compile(ins_regex)
regexes.add(r)
# TODO: Make sure self._start is aligned
# Construct the binary blob first
for start_, bytes_ in self.project.loader.main_object.memory.backers():
for regex in regexes:
# Match them!
for mo in regex.finditer(bytes):
position = mo.start() + start_
if position % self.project.arch.instruction_alignment == 0:
if position not in traced_address:
percentage = self._seg_list.occupied_size * 100.0 / (self._valid_memory_region_size)
l.info("Scanning %xh, progress %0.04f%%", position, percentage)
self._unassured_functions.add(position)
self._scan_code(traced_address, function_exits, initial_state, position)
else:
l.info("Skipping %xh", position)
def _process_indirect_jumps(self):
"""
Execute each basic block with an indeterminiable exit target
:returns:
"""
function_starts = set()
l.info("We have %d indirect jumps", len(self._indirect_jumps))
for jumpkind, irsb_addr in self._indirect_jumps:
# First execute the current IRSB in concrete mode
if len(function_starts) > 20:
break
if jumpkind == "Ijk_Call":
state = self.project.factory.blank_state(addr=irsb_addr, mode="concrete",
add_options={o.SYMBOLIC_INITIAL_VALUES}
)
path = self.project.factory.path(state)
l.debug(hex(irsb_addr))
try:
r = (path.next_run.successors + path.next_run.unsat_successors)[0]
ip = r.solver.eval_one(r.ip)
function_starts.add(ip)
continue
except SimSolverModeError as ex:
pass
# Not resolved
# Do a backward slicing from the call
irsb = self.project.factory.block(irsb_addr).vex
stmts = irsb.statements
# Start slicing from the "next"
b = Blade(self.cfg, irsb.addr, -1, project=self.project)
# Debugging output
for addr, stmt_idx in sorted(list(b.slice.nodes())):
irsb = self.project.factory.block(addr).vex
stmts = irsb.statements
l.debug("%x: %d | %s %d", (addr, stmt_idx), stmts[stmt_idx], b.slice.in_degree((addr, stmt_idx)))
# Get all sources
sources = [n for n in b.slice.nodes() if b.slice.in_degree(n) == 0]
# Create the annotated CFG
annotatedcfg = AnnotatedCFG(self.project, None, target_irsb_addr=irsb_addr, detect_loops=False)
annotatedcfg.from_digraph(b.slice)
for src_irsb, src_stmt_idx in sources:
# Use slicecutor to execute each one, and get the address
# We simply give up if any exception occurs on the way
start_state = self.project.factory.blank_state(addr=src_irsb,
add_options=
{o.DO_RET_EMULATION,
o.TRUE_RET_EMULATION_GUARD}
)
start_path = self.project.factory.path(start_state)
# Create the slicecutor
slicecutor = Slicecutor(self.project, annotatedcfg, start=start_path, targets=(irsb_addr,))
# Run it!
try:
slicecutor.run()
except KeyError as ex:
# This is because the program slice is incomplete.
# Blade will support more IRExprs and IRStmts
l.debug("KeyError occurred due to incomplete program slice.", exc_info=ex)
continue
# Get the jumping targets
for r in slicecutor.reached_targets:
if r.next_run.successors:
target_ip = r.next_run.successors[0].ip
se = r.next_run.successors[0].se
if not se.symbolic(target_ip):
concrete_ip = se.eval_one(target_ip)
function_starts.add(concrete_ip)
l.info("Found a function address %x", concrete_ip)
return function_starts
def _solve_forbase_address(self, function_starts, functions):
"""
Voting for the most possible base address.
:param function_starts:
:param functions:
:returns:
"""
pseudo_base_addr = self.project.loader.main_object.min_addr
base_addr_ctr = { }
for s in function_starts:
for f in functions:
base_addr = s - f + pseudo_base_addr
ctr = 1
for k in function_starts:
if k - base_addr + pseudo_base_addr in functions:
ctr += 1
if ctr > 5:
base_addr_ctr[base_addr] = ctr
if len(base_addr_ctr):
base_addr, hits = sorted([(k, v) for k, v in base_addr_ctr.items()], key=lambda x: x[1], reverse=True)[0]
return base_addr
else:
return None
def _reconnoiter(self):
if type(self._binary) is cle.blob.Blob:
self._determinebase_address()
if self._perform_full_code_scan:
self._full_code_scan()
def _determinebase_address(self):
"""
The basic idea is simple: start from a specific point, try to construct
functions as much as we can, and maintain a function distribution graph
and a call graph simultaneously. Repeat searching until we come to the
end that there is no new function to be found.
A function should start with:
# some addresses that a call exit leads to, or
# certain instructions. They are recoreded in SimArch.
For a better performance, instead of blindly scanning the entire process
space, we first try to search for instruction patterns that a function
may start with, and start scanning at those positions. Then we try to
decode anything that is left.
"""
traced_address = set()
self.functions = set()
self.call_map = networkx.DiGraph()
self.cfg = networkx.DiGraph()
initial_state = self.project.factory.blank_state(mode="fastpath")
initial_options = initial_state.options - { o.TRACK_CONSTRAINTS } - o.refs
initial_options |= { o.SUPER_FASTPATH }
# initial_options.remove(o.COW_STATES)
initial_state.options = initial_options
# Sadly, not all calls to functions are explicitly made by call
# instruction - they could be a jmp or b, or something else. So we
# should record all exits from a single function, and then add
# necessary calling edges in our call map during the post-processing
# phase.
function_exits = defaultdict(set)
dump_file_prefix = self.project.filename
if self._pickle_intermediate_results and \
os.path.exists(dump_file_prefix + "_indirect_jumps.angr"):
l.debug("Loading existing intermediate results.")
self._indirect_jumps = pickle.load(open(dump_file_prefix + "_indirect_jumps.angr", "rb"))
self.cfg = pickle.load(open(dump_file_prefix + "_coercecfg.angr", "rb"))
self._unassured_functions = pickle.load(open(dump_file_prefix + "_unassured_functions.angr", "rb"))
else:
# Performance boost :-)
# Scan for existing function prologues
self._scan_function_prologues(traced_address, function_exits, initial_state)
if self._pickle_intermediate_results:
l.debug("Dumping intermediate results.")
pickle.dump(self._indirect_jumps, open(dump_file_prefix + "_indirect_jumps.angr", "wb"), -1)
pickle.dump(self.cfg, open(dump_file_prefix + "_coercecfg.angr", "wb"), -1)
pickle.dump(self._unassured_functions, open(dump_file_prefix + "_unassured_functions.angr", "wb"), -1)
if len(self._indirect_jumps):
# We got some indirect jumps!
# Gotta execute each basic block and see where it wants to jump to
function_starts = self._process_indirect_jumps()
self.base_address = self._solve_forbase_address(function_starts, self._unassured_functions)
l.info("Base address should be 0x%x", self.base_address)
else:
l.debug("No indirect jumps are found. We switch to the slowpath mode.")
# TODO: Slowpath mode...
while True:
next_addr = self._get_next_code_addr(initial_state)
percentage = self._seg_list.occupied_size * 100.0 / (self._valid_memory_region_size)
l.info("Analyzing %xh, progress %0.04f%%", next_addr, percentage)
if next_addr is None:
break
self.call_map.add_node(next_addr)
self._scan_code(traced_address, function_exits, initial_state, next_addr)
# Post-processing: Map those calls that are not made by call/blr
# instructions to their targets in our map
for src, s in function_exits.items():
if src in self.call_map:
for target in s:
if target in self.call_map:
self.call_map.add_edge(src, target)
nodes = sorted(self.call_map.nodes())
for i in range(len(nodes) - 1):
if nodes[i] >= nodes[i + 1] - 4:
for dst in self.call_map.successors(nodes[i + 1]):
self.call_map.add_edge(nodes[i], dst)
for src in self.call_map.predecessors(nodes[i + 1]):
self.call_map.add_edge(src, nodes[i])
self.call_map.remove_node(nodes[i + 1])
l.debug("Construction finished.")
def _full_code_scan(self):
"""
Perform a full code scan on the target binary.
"""
# We gotta time this function
start_time = datetime.now()
traced_address = set()
self.functions = set()
self.call_map = networkx.DiGraph()
self.cfg = networkx.DiGraph()
initial_state = self.project.factory.blank_state(mode="fastpath")
initial_options = initial_state.options - {o.TRACK_CONSTRAINTS} - o.refs
initial_options |= {o.SUPER_FASTPATH}
# initial_options.remove(o.COW_STATES)
initial_state.options = initial_options
# Sadly, not all calls to functions are explicitly made by call
# instruction - they could be a jmp or b, or something else. So we
# should record all exits from a single function, and then add
# necessary calling edges in our call map during the post-processing
# phase.
function_exits = defaultdict(set)
widgets = [progressbar.Percentage(),
' ',
progressbar.Bar(marker=progressbar.RotatingMarker()),
' ',
progressbar.Timer(),
' ',
progressbar.ETA()
]
pb = progressbar.ProgressBar(widgets=widgets, maxval=10000 * 100).start()
while True:
next_addr = self._get_next_code_addr(initial_state)
percentage = self._seg_list.occupied_size * 100.0 / (self._valid_memory_region_size)
if percentage > 100.0: percentage = 100.0
pb.update(percentage * 10000)
if next_addr is not None:
l.info("Analyzing %xh, progress %0.04f%%", next_addr, percentage)
else:
l.info('No more addr to analyze. Progress %0.04f%%', percentage)
break
self.call_map.add_node(next_addr)
self._scan_code(traced_address, function_exits, initial_state, next_addr)
pb.finish()
end_time = datetime.now()
l.info("A full code scan takes %d seconds.", (end_time - start_time).seconds)
def _calc_entropy(self, data, size=None):
if not data:
return 0
entropy = 0
if size is None: size = len(data)
data = str(pyvex.ffi.buffer(data, size))
for x in range(0, 256):
p_x = float(data.count(chr(x)))/size
if p_x > 0:
entropy += - p_x * math.log(p_x, 2)
return entropy
def _dbg_output(self):
ret = ""
ret += "Functions:\n"
function_list = list(self.functions)
# Sort it
function_list = sorted(function_list)
for f in function_list:
ret += "0x%08x" % f
return ret
def genenare_callmap_sif(self, filepath):
"""
Generate a sif file from the call map
"""
graph = self.call_map
if graph is None:
raise AngrGirlScoutError('Please generate the call graph first.')
f = open(filepath, "wb")
for src, dst in graph.edges():
f.write("0x%x\tDirectEdge\t0x%x\n" % (src, dst))
f.close()
def generate_code_cover(self):
"""
Generate a list of all recovered basic blocks.
"""
lst = [ ]
for irsb_addr in self.cfg.nodes():
if irsb_addr not in self._block_size:
continue
irsb_size = self._block_size[irsb_addr]
lst.append((irsb_addr, irsb_size))
lst = sorted(lst, key=lambda x: x[0])
return lst
from angr.analyses import AnalysesHub
AnalysesHub.register_default('GirlScout', GirlScout)
from ..blade import Blade
from ..errors import AngrGirlScoutError
| bsd-2-clause |
nitin-cherian/LifeLongLearning | Python/PythonProgrammingLanguage/Encapsulation/encap_env/lib/python3.5/site-packages/pickleshare.py | 12 | 9849 | #!/usr/bin/env python
""" PickleShare - a small 'shelve' like datastore with concurrency support
Like shelve, a PickleShareDB object acts like a normal dictionary. Unlike
shelve, many processes can access the database simultaneously. Changing a
value in database is immediately visible to other processes accessing the
same database.
Concurrency is possible because the values are stored in separate files. Hence
the "database" is a directory where *all* files are governed by PickleShare.
Example usage::
from pickleshare import *
db = PickleShareDB('~/testpickleshare')
db.clear()
print "Should be empty:",db.items()
db['hello'] = 15
db['aku ankka'] = [1,2,313]
db['paths/are/ok/key'] = [1,(5,46)]
print db.keys()
del db['aku ankka']
This module is certainly not ZODB, but can be used for low-load
(non-mission-critical) situations where tiny code size trumps the
advanced features of a "real" object database.
Installation guide: pip install path pickleshare
Author: Ville Vainio <[email protected]>
License: MIT open source license.
"""
from __future__ import print_function
__version__ = "0.7.4"
try:
from pathlib import Path
except ImportError:
# Python 2 backport
from pathlib2 import Path
import os,stat,time
import collections
try:
import cPickle as pickle
except ImportError:
import pickle
import errno
import sys
if sys.version_info[0] >= 3:
string_types = (str,)
else:
string_types = (str, unicode)
def gethashfile(key):
return ("%02x" % abs(hash(key) % 256))[-2:]
_sentinel = object()
class PickleShareDB(collections.MutableMapping):
""" The main 'connection' object for PickleShare database """
def __init__(self,root):
""" Return a db object that will manage the specied directory"""
if not isinstance(root, string_types):
root = str(root)
root = os.path.abspath(os.path.expanduser(root))
self.root = Path(root)
if not self.root.is_dir():
# catching the exception is necessary if multiple processes are concurrently trying to create a folder
# exists_ok keyword argument of mkdir does the same but only from Python 3.5
try:
self.root.mkdir(parents=True)
except OSError as e:
if e.errno != errno.EEXIST:
raise
# cache has { 'key' : (obj, orig_mod_time) }
self.cache = {}
def __getitem__(self,key):
""" db['key'] reading """
fil = self.root / key
try:
mtime = (fil.stat()[stat.ST_MTIME])
except OSError:
raise KeyError(key)
if fil in self.cache and mtime == self.cache[fil][1]:
return self.cache[fil][0]
try:
# The cached item has expired, need to read
with fil.open("rb") as f:
obj = pickle.loads(f.read())
except:
raise KeyError(key)
self.cache[fil] = (obj,mtime)
return obj
def __setitem__(self,key,value):
""" db['key'] = 5 """
fil = self.root / key
parent = fil.parent
if parent and not parent.is_dir():
parent.mkdir(parents=True)
# We specify protocol 2, so that we can mostly go between Python 2
# and Python 3. We can upgrade to protocol 3 when Python 2 is obsolete.
with fil.open('wb') as f:
pickle.dump(value, f, protocol=2)
try:
self.cache[fil] = (value, fil.stat().st_mtime)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def hset(self, hashroot, key, value):
""" hashed set """
hroot = self.root / hashroot
if not hroot.is_dir():
hroot.mkdir()
hfile = hroot / gethashfile(key)
d = self.get(hfile, {})
d.update( {key : value})
self[hfile] = d
def hget(self, hashroot, key, default = _sentinel, fast_only = True):
""" hashed get """
hroot = self.root / hashroot
hfile = hroot / gethashfile(key)
d = self.get(hfile, _sentinel )
#print "got dict",d,"from",hfile
if d is _sentinel:
if fast_only:
if default is _sentinel:
raise KeyError(key)
return default
# slow mode ok, works even after hcompress()
d = self.hdict(hashroot)
return d.get(key, default)
def hdict(self, hashroot):
""" Get all data contained in hashed category 'hashroot' as dict """
hfiles = self.keys(hashroot + "/*")
hfiles.sort()
last = len(hfiles) and hfiles[-1] or ''
if last.endswith('xx'):
# print "using xx"
hfiles = [last] + hfiles[:-1]
all = {}
for f in hfiles:
# print "using",f
try:
all.update(self[f])
except KeyError:
print("Corrupt",f,"deleted - hset is not threadsafe!")
del self[f]
self.uncache(f)
return all
def hcompress(self, hashroot):
""" Compress category 'hashroot', so hset is fast again
hget will fail if fast_only is True for compressed items (that were
hset before hcompress).
"""
hfiles = self.keys(hashroot + "/*")
all = {}
for f in hfiles:
# print "using",f
all.update(self[f])
self.uncache(f)
self[hashroot + '/xx'] = all
for f in hfiles:
p = self.root / f
if p.name == 'xx':
continue
p.unlink()
def __delitem__(self,key):
""" del db["key"] """
fil = self.root / key
self.cache.pop(fil,None)
try:
fil.unlink()
except OSError:
# notfound and permission denied are ok - we
# lost, the other process wins the conflict
pass
def _normalized(self, p):
""" Make a key suitable for user's eyes """
return str(p.relative_to(self.root)).replace('\\','/')
def keys(self, globpat = None):
""" All keys in DB, or all keys matching a glob"""
if globpat is None:
files = self.root.rglob('*')
else:
files = self.root.glob(globpat)
return [self._normalized(p) for p in files if p.is_file()]
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self.keys())
def uncache(self,*items):
""" Removes all, or specified items from cache
Use this after reading a large amount of large objects
to free up memory, when you won't be needing the objects
for a while.
"""
if not items:
self.cache = {}
for it in items:
self.cache.pop(it,None)
def waitget(self,key, maxwaittime = 60 ):
""" Wait (poll) for a key to get a value
Will wait for `maxwaittime` seconds before raising a KeyError.
The call exits normally if the `key` field in db gets a value
within the timeout period.
Use this for synchronizing different processes or for ensuring
that an unfortunately timed "db['key'] = newvalue" operation
in another process (which causes all 'get' operation to cause a
KeyError for the duration of pickling) won't screw up your program
logic.
"""
wtimes = [0.2] * 3 + [0.5] * 2 + [1]
tries = 0
waited = 0
while 1:
try:
val = self[key]
return val
except KeyError:
pass
if waited > maxwaittime:
raise KeyError(key)
time.sleep(wtimes[tries])
waited+=wtimes[tries]
if tries < len(wtimes) -1:
tries+=1
def getlink(self,folder):
""" Get a convenient link for accessing items """
return PickleShareLink(self, folder)
def __repr__(self):
return "PickleShareDB('%s')" % self.root
class PickleShareLink:
""" A shortdand for accessing nested PickleShare data conveniently.
Created through PickleShareDB.getlink(), example::
lnk = db.getlink('myobjects/test')
lnk.foo = 2
lnk.bar = lnk.foo + 5
"""
def __init__(self, db, keydir ):
self.__dict__.update(locals())
def __getattr__(self,key):
return self.__dict__['db'][self.__dict__['keydir']+'/' + key]
def __setattr__(self,key,val):
self.db[self.keydir+'/' + key] = val
def __repr__(self):
db = self.__dict__['db']
keys = db.keys( self.__dict__['keydir'] +"/*")
return "<PickleShareLink '%s': %s>" % (
self.__dict__['keydir'],
";".join([Path(k).basename() for k in keys]))
def main():
import textwrap
usage = textwrap.dedent("""\
pickleshare - manage PickleShare databases
Usage:
pickleshare dump /path/to/db > dump.txt
pickleshare load /path/to/db < dump.txt
pickleshare test /path/to/db
""")
DB = PickleShareDB
import sys
if len(sys.argv) < 2:
print(usage)
return
cmd = sys.argv[1]
args = sys.argv[2:]
if cmd == 'dump':
if not args: args= ['.']
db = DB(args[0])
import pprint
pprint.pprint(db.items())
elif cmd == 'load':
cont = sys.stdin.read()
db = DB(args[0])
data = eval(cont)
db.clear()
for k,v in db.items():
db[k] = v
elif cmd == 'testwait':
db = DB(args[0])
db.clear()
print(db.waitget('250'))
elif cmd == 'test':
test()
stress()
if __name__== "__main__":
main()
| mit |
UCRoboticsLab/BaxterTictactoe | src/baxter_tools/src/baxter_tools/__init__.py | 3 | 1588 | # Copyright (c) 2013-2015, Rethink Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the Rethink Robotics nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from .smoketests import SmokeTest
| apache-2.0 |
geodynamics/gale | boost/libs/python/test/virtual_functions.py | 46 | 1803 | # Copyright David Abrahams 2004. Distributed under the Boost
# Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
'''
>>> from virtual_functions_ext import *
>>> class C1(concrete):
... def f(self, y):
... return concrete.f(self, Y(-y.value()))
>>> class C2(concrete):
... pass
>>> class A1(abstract):
... def f(self, y):
... return y.value() * 2
... def g(self, y):
... return self
>>> class A2(abstract):
... pass
>>> y1 = Y(16)
>>> y2 = Y(17)
#
# Test abstract with f,g overridden
#
>>> a1 = A1(42)
>>> a1.value()
42
# Call f,g indirectly from C++
>>> a1.call_f(y1)
32
>>> assert type(a1.call_g(y1)) is abstract
# Call f directly from Python
>>> a1.f(y2)
34
#
# Test abstract with f not overridden
#
>>> a2 = A2(42)
>>> a2.value()
42
# Call f indirectly from C++
>>> try: a2.call_f(y1)
... except AttributeError: pass
... else: print 'no exception'
# Call f directly from Python
>>> try: a2.call_f(y2)
... except AttributeError: pass
... else: print 'no exception'
############# Concrete Tests ############
#
# Test concrete with f overridden
#
>>> c1 = C1(42)
>>> c1.value()
42
# Call f indirectly from C++
>>> c1.call_f(y1)
-16
# Call f directly from Python
>>> c1.f(y2)
-17
#
# Test concrete with f not overridden
#
>>> c2 = C2(42)
>>> c2.value()
42
# Call f indirectly from C++
>>> c2.call_f(y1)
16
# Call f directly from Python
>>> c2.f(y2)
17
'''
def run(args = None):
import sys
import doctest
if args is not None:
sys.argv = args
return doctest.testmod(sys.modules.get(__name__))
if __name__ == '__main__':
print "running..."
import sys
status = run()[0]
if (status == 0): print "Done."
sys.exit(status)
| gpl-2.0 |
mozvip/CouchPotatoServer | libs/suds/sax/date.py | 160 | 10456 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Nathan Van Gheem ([email protected])
"""
The I{xdate} module provides classes for converstion
between XML dates and python objects.
"""
from logging import getLogger
from suds import *
from suds.xsd import *
import time
import datetime as dt
import re
log = getLogger(__name__)
class Date:
"""
An XML date object.
Supported formats:
- YYYY-MM-DD
- YYYY-MM-DD(z|Z)
- YYYY-MM-DD+06:00
- YYYY-MM-DD-06:00
@ivar date: The object value.
@type date: B{datetime}.I{date}
"""
def __init__(self, date):
"""
@param date: The value of the object.
@type date: (date|str)
@raise ValueError: When I{date} is invalid.
"""
if isinstance(date, dt.date):
self.date = date
return
if isinstance(date, basestring):
self.date = self.__parse(date)
return
raise ValueError, type(date)
def year(self):
"""
Get the I{year} component.
@return: The year.
@rtype: int
"""
return self.date.year
def month(self):
"""
Get the I{month} component.
@return: The month.
@rtype: int
"""
return self.date.month
def day(self):
"""
Get the I{day} component.
@return: The day.
@rtype: int
"""
return self.date.day
def __parse(self, s):
"""
Parse the string date.
Supported formats:
- YYYY-MM-DD
- YYYY-MM-DD(z|Z)
- YYYY-MM-DD+06:00
- YYYY-MM-DD-06:00
Although, the TZ is ignored because it's meaningless
without the time, right?
@param s: A date string.
@type s: str
@return: A date object.
@rtype: I{date}
"""
try:
year, month, day = s[:10].split('-', 2)
year = int(year)
month = int(month)
day = int(day)
return dt.date(year, month, day)
except:
log.debug(s, exec_info=True)
raise ValueError, 'Invalid format "%s"' % s
def __str__(self):
return unicode(self)
def __unicode__(self):
return self.date.isoformat()
class Time:
"""
An XML time object.
Supported formats:
- HH:MI:SS
- HH:MI:SS(z|Z)
- HH:MI:SS.ms
- HH:MI:SS.ms(z|Z)
- HH:MI:SS(+|-)06:00
- HH:MI:SS.ms(+|-)06:00
@ivar tz: The timezone
@type tz: L{Timezone}
@ivar date: The object value.
@type date: B{datetime}.I{time}
"""
def __init__(self, time, adjusted=True):
"""
@param time: The value of the object.
@type time: (time|str)
@param adjusted: Adjust for I{local} Timezone.
@type adjusted: boolean
@raise ValueError: When I{time} is invalid.
"""
self.tz = Timezone()
if isinstance(time, dt.time):
self.time = time
return
if isinstance(time, basestring):
self.time = self.__parse(time)
if adjusted:
self.__adjust()
return
raise ValueError, type(time)
def hour(self):
"""
Get the I{hour} component.
@return: The hour.
@rtype: int
"""
return self.time.hour
def minute(self):
"""
Get the I{minute} component.
@return: The minute.
@rtype: int
"""
return self.time.minute
def second(self):
"""
Get the I{seconds} component.
@return: The seconds.
@rtype: int
"""
return self.time.second
def microsecond(self):
"""
Get the I{microsecond} component.
@return: The microsecond.
@rtype: int
"""
return self.time.microsecond
def __adjust(self):
"""
Adjust for TZ offset.
"""
if hasattr(self, 'offset'):
today = dt.date.today()
delta = self.tz.adjustment(self.offset)
d = dt.datetime.combine(today, self.time)
d = ( d + delta )
self.time = d.time()
def __parse(self, s):
"""
Parse the string date.
Patterns:
- HH:MI:SS
- HH:MI:SS(z|Z)
- HH:MI:SS.ms
- HH:MI:SS.ms(z|Z)
- HH:MI:SS(+|-)06:00
- HH:MI:SS.ms(+|-)06:00
@param s: A time string.
@type s: str
@return: A time object.
@rtype: B{datetime}.I{time}
"""
try:
offset = None
part = Timezone.split(s)
hour, minute, second = part[0].split(':', 2)
hour = int(hour)
minute = int(minute)
second, ms = self.__second(second)
if len(part) == 2:
self.offset = self.__offset(part[1])
if ms is None:
return dt.time(hour, minute, second)
else:
return dt.time(hour, minute, second, ms)
except:
log.debug(s, exec_info=True)
raise ValueError, 'Invalid format "%s"' % s
def __second(self, s):
"""
Parse the seconds and microseconds.
The microseconds are truncated to 999999 due to a restriction in
the python datetime.datetime object.
@param s: A string representation of the seconds.
@type s: str
@return: Tuple of (sec,ms)
@rtype: tuple.
"""
part = s.split('.')
if len(part) > 1:
return (int(part[0]), int(part[1][:6]))
else:
return (int(part[0]), None)
def __offset(self, s):
"""
Parse the TZ offset.
@param s: A string representation of the TZ offset.
@type s: str
@return: The signed offset in hours.
@rtype: str
"""
if len(s) == len('-00:00'):
return int(s[:3])
if len(s) == 0:
return self.tz.local
if len(s) == 1:
return 0
raise Exception()
def __str__(self):
return unicode(self)
def __unicode__(self):
time = self.time.isoformat()
if self.tz.local:
return '%s%+.2d:00' % (time, self.tz.local)
else:
return '%sZ' % time
class DateTime(Date,Time):
"""
An XML time object.
Supported formats:
- YYYY-MM-DDB{T}HH:MI:SS
- YYYY-MM-DDB{T}HH:MI:SS(z|Z)
- YYYY-MM-DDB{T}HH:MI:SS.ms
- YYYY-MM-DDB{T}HH:MI:SS.ms(z|Z)
- YYYY-MM-DDB{T}HH:MI:SS(+|-)06:00
- YYYY-MM-DDB{T}HH:MI:SS.ms(+|-)06:00
@ivar datetime: The object value.
@type datetime: B{datetime}.I{datedate}
"""
def __init__(self, date):
"""
@param date: The value of the object.
@type date: (datetime|str)
@raise ValueError: When I{tm} is invalid.
"""
if isinstance(date, dt.datetime):
Date.__init__(self, date.date())
Time.__init__(self, date.time())
self.datetime = \
dt.datetime.combine(self.date, self.time)
return
if isinstance(date, basestring):
part = date.split('T')
Date.__init__(self, part[0])
Time.__init__(self, part[1], 0)
self.datetime = \
dt.datetime.combine(self.date, self.time)
self.__adjust()
return
raise ValueError, type(date)
def __adjust(self):
"""
Adjust for TZ offset.
"""
if not hasattr(self, 'offset'):
return
delta = self.tz.adjustment(self.offset)
try:
d = ( self.datetime + delta )
self.datetime = d
self.date = d.date()
self.time = d.time()
except OverflowError:
log.warn('"%s" caused overflow, not-adjusted', self.datetime)
def __str__(self):
return unicode(self)
def __unicode__(self):
s = []
s.append(Date.__unicode__(self))
s.append(Time.__unicode__(self))
return 'T'.join(s)
class UTC(DateTime):
"""
Represents current UTC time.
"""
def __init__(self, date=None):
if date is None:
date = dt.datetime.utcnow()
DateTime.__init__(self, date)
self.tz.local = 0
class Timezone:
"""
Timezone object used to do TZ conversions
@cvar local: The (A) local TZ offset.
@type local: int
@cvar patten: The regex patten to match TZ.
@type patten: re.Pattern
"""
pattern = re.compile('([zZ])|([\-\+][0-9]{2}:[0-9]{2})')
LOCAL = ( 0-time.timezone/60/60 )
def __init__(self, offset=None):
if offset is None:
offset = self.LOCAL
self.local = offset
@classmethod
def split(cls, s):
"""
Split the TZ from string.
@param s: A string containing a timezone
@type s: basestring
@return: The split parts.
@rtype: tuple
"""
m = cls.pattern.search(s)
if m is None:
return (s,)
x = m.start(0)
return (s[:x], s[x:])
def adjustment(self, offset):
"""
Get the adjustment to the I{local} TZ.
@return: The delta between I{offset} and local TZ.
@rtype: B{datetime}.I{timedelta}
"""
delta = ( self.local - offset )
return dt.timedelta(hours=delta)
| gpl-3.0 |
Jun1113/MapReduce-Example | contrib/hod/hodlib/RingMaster/ringMaster.py | 182 | 35563 | #Licensed to the Apache Software Foundation (ASF) under one
#or more contributor license agreements. See the NOTICE file
#distributed with this work for additional information
#regarding copyright ownership. The ASF licenses this file
#to you under the Apache License, Version 2.0 (the
#"License"); you may not use this file except in compliance
#with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
#!/usr/bin/env python
"""manages services and nodepool"""
# -*- python -*-
import os, sys, random, time, sets, shutil, threading
import urllib, urlparse, re, getpass, pprint, signal, shutil
from pprint import pformat
from HTMLParser import HTMLParser
binfile = sys.path[0]
libdir = os.path.dirname(binfile)
sys.path.append(libdir)
import hodlib.Common.logger
from hodlib.RingMaster.idleJobTracker import JobTrackerMonitor, HadoopJobStatus
from hodlib.Common.threads import func
from hodlib.Hod.nodePool import *
from hodlib.Common.util import *
from hodlib.Common.nodepoolutil import NodePoolUtil
from hodlib.Common.socketServers import hodXMLRPCServer
from hodlib.Common.socketServers import threadedHTTPServer
from hodlib.NodePools import *
from hodlib.NodePools.torque import *
from hodlib.GridServices import *
from hodlib.Common.descGenerator import *
from hodlib.Common.xmlrpc import hodXRClient
from hodlib.Common.miniHTMLParser import miniHTMLParser
from hodlib.Common.threads import simpleCommand
class ringMasterServer:
"""The RPC server that exposes all the master config
changes. Also, one of these RPC servers runs as a proxy
and all the hodring instances register with this proxy"""
instance = None
xmlrpc = None
def __init__(self, cfg, log, logMasterSources, retry=5):
try:
from hodlib.Common.socketServers import twistedXMLRPCServer
ringMasterServer.xmlrpc = twistedXMLRPCServer("",
cfg['ringmaster']['xrs-port-range'])
except ImportError:
log.info("Twisted interface not found. Using hodXMLRPCServer.")
ringMasterServer.xmlrpc = hodXMLRPCServer("",
cfg['ringmaster']['xrs-port-range'])
ringMasterServer.xmlrpc.register_instance(logMasterSources)
self.logMasterSources = logMasterSources
ringMasterServer.xmlrpc.serve_forever()
while not ringMasterServer.xmlrpc.is_alive():
time.sleep(.5)
log.debug('Ringmaster RPC Server at %d' %
ringMasterServer.xmlrpc.server_address[1])
def startService(ss, cfg, np, log, rm):
logMasterSources = _LogMasterSources(ss, cfg, np, log, rm)
ringMasterServer.instance = ringMasterServer(cfg, log, logMasterSources)
def stopService():
ringMasterServer.xmlrpc.stop()
def getPort():
return ringMasterServer.instance.port
def getAddress():
return 'http://%s:%d/' % (socket.gethostname(),
ringMasterServer.xmlrpc.server_address[1])
startService = staticmethod(startService)
stopService = staticmethod(stopService)
getPort = staticmethod(getPort)
getAddress = staticmethod(getAddress)
class _LogMasterSources:
"""All the methods that are run by the RPC server are
added into this class """
def __init__(self, serviceDict, cfg, np, log, rm):
self.serviceDict = serviceDict
self.tarSource = []
self.tarSourceLock = threading.Lock()
self.dict = {}
self.count = {}
self.logsourceList = []
self.logsourceListLock = threading.Lock()
self.masterParam = []
self.masterParamLock = threading.Lock()
self.verify = 'none'
self.cmdLock = threading.Lock()
self.cfg = cfg
self.log = log
self.np = np
self.rm = rm
self.hdfsHost = None
self.mapredHost = None
self.maxconnect = self.cfg['ringmaster']['max-connect']
self.log.debug("Using max-connect value %s"%self.maxconnect)
def registerTarSource(self, hostname, url, addr=None):
self.log.debug("registering: " + url)
lock = self.tarSourceLock
lock.acquire()
self.dict[url] = url
self.count[url] = 0
# addr is None when ringMaster himself invokes this method
if addr:
c = self.count[addr]
self.count[addr] = c - 1
lock.release()
if addr:
str = "%s is done" % (addr)
self.log.debug(str)
return url
def getTarList(self,hodring): # this looks useful
lock = self.tarSourceLock
lock.acquire()
leastkey = None
leastval = -1
for k, v in self.count.iteritems():
if (leastval == -1):
leastval = v
pass
if (v <= leastval and v < self.maxconnect):
leastkey = k
leastval = v
if (leastkey == None):
url = 'none'
else:
url = self.dict[leastkey]
self.count[leastkey] = leastval + 1
self.log.debug("%s %d" % (leastkey, self.count[leastkey]))
lock.release()
self.log.debug('sending url ' + url+" to "+hodring) # this looks useful
return url
def tarDone(self, uri):
str = "%s is done" % (uri)
self.log.debug(str)
lock = self.tarSourceLock
lock.acquire()
c = self.count[uri]
self.count[uri] = c - 1
lock.release()
return uri
def status(self):
return True
# FIXME: this code is broken, it relies on a central service registry
#
# def clusterStart(self, changedClusterParams=[]):
# self.log.debug("clusterStart method invoked.")
# self.dict = {}
# self.count = {}
# try:
# if (len(changedClusterParams) > 0):
# self.log.debug("Updating config.")
# for param in changedClusterParams:
# (key, sep1, val) = param.partition('=')
# (i1, sep2, i2) = key.partition('.')
# try:
# prev = self.cfg[i1][i2]
# self.rm.cfg[i1][i2] = val
# self.cfg[i1][i2] = val
# self.log.debug("\nModified [%s][%s]=%s to [%s][%s]=%s" % (i1, i2, prev, i1, i2, val))
# except KeyError, e:
# self.log.info("Skipping %s as no such config parameter found in ringmaster" % param)
# self.log.debug("Regenerating Service Description.")
# dGen = DescGenerator(self.rm.cfg)
# self.rm.cfg['servicedesc'] = dGen.createServiceDescDict()
# self.cfg['servicedesc'] = self.rm.cfg['servicedesc']
#
# self.rm.tar = None
# if self.rm.cfg['ringmaster'].has_key('hadoop-tar-ball'):
# self.rm.download = True
# self.rm.tar = self.rm.cfg['ringmaster']['hadoop-tar-ball']
# self.log.debug("self.rm.tar=%s" % self.rm.tar)
#
# self.rm.cd_to_tempdir()
#
# self.rm.tarAddress = None
# hostname = socket.gethostname()
# if (self.rm.download):
# self.rm.basename = os.path.basename(self.rm.tar)
# dest = os.path.join(os.getcwd(), self.rm.basename)
# src = self.rm.tar
# self.log.debug("cp %s -> %s" % (src, dest))
# shutil.copy(src, dest)
# self.rm.tarAddress = "%s%s" % (self.rm.httpAddress, self.rm.basename)
# self.registerTarSource(hostname, self.rm.tarAddress)
# self.log.debug("Registered new tarAddress %s" % self.rm.tarAddress)
# else:
# self.log.debug("Download not set.")
#
# if (self.rm.tar != None):
# self.cfg['hodring']['download-addr'] = self.rm.tarAddress
# self.rm.cfg['hodring']['download-addr'] = self.rm.tarAddress
#
# sdl = self.rm.cfg['servicedesc']
# workDirs = self.rm.getWorkDirs(self.rm.cfg, True)
# hdfsDesc = sdl['hdfs']
# hdfs = None
# if hdfsDesc.isExternal():
# hdfs = HdfsExternal(hdfsDesc, workDirs)
# else:
# hdfs = Hdfs(hdfsDesc, workDirs, 0, False, True)
#
# self.rm.serviceDict[hdfs.getName()] = hdfs
# mrDesc = sdl['mapred']
# mr = None
# if mrDesc.isExternal():
# mr = MapReduceExternal(mrDesc, workDirs)
# else:
# mr = MapReduce(mrDesc, workDirs, 1)
# self.rm.serviceDict[mr.getName()] = mr
#
# ringList = self.rm.serviceClient.getServiceInfo(self.cfg['hodring']['userid'],
# self.np.getServiceId(), 'hodring', 'hod')
#
# slaveList = ringList
# hdfsringXRAddress = None
# # Start HDFS Master - Step 1
# if not hdfsDesc.isExternal():
# masterFound = False
# for ring in ringList:
# ringXRAddress = ring['xrs']
# if ringXRAddress == None:
# raise Exception("Could not get hodring XML-RPC server address.")
# if (ringXRAddress.find(self.hdfsHost) != -1):
# ringClient = hodXRClient(ringXRAddress, None, None, 0, 0, 0, False, 0)
# hdfsringXRAddress = ringXRAddress
# self.log.debug("Invoking clusterStart on " + ringXRAddress + " (HDFS Master)")
# ringClient.clusterStart()
# masterFound = True
# slaveList.remove(ring)
# break
# if not masterFound:
# raise Exception("HDFS Master host not found")
# while hdfs.getInfoAddrs() == None:
# self.log.debug("Waiting for HDFS Master (Name Node) to register dfs.info.port")
# time.sleep(1)
#
# # Start MAPRED Master - Step 2
# if not mrDesc.isExternal():
# masterFound = False
# for ring in ringList:
# ringXRAddress = ring['xrs']
# if ringXRAddress == None:
# raise Exception("Could not get hodring XML-RPC server address.")
# if (not mrDesc.isExternal() and ringXRAddress.find(self.mapredHost) != -1):
# ringClient = hodXRClient(ringXRAddress, None, None, 0, 0, 0, False, 0)
# self.log.debug("Invoking clusterStart on " + ringXRAddress + " (MAPRED Master)")
# ringClient.clusterStart()
# masterFound = True
# slaveList.remove(ring)
# break
# if not masterFound:
# raise Excpetion("MAPRED Master host not found")
# while mr.getInfoAddrs() == None:
# self.log.debug("Waiting for MAPRED Master (Job Tracker) to register \
# mapred.job.tracker.info.port")
# time.sleep(1)
#
# # Start Slaves - Step 3
# for ring in slaveList:
# ringXRAddress = ring['xrs']
# if ringXRAddress == None:
# raise Exception("Could not get hodring XML-RPC server address.")
# ringClient = hodXRClient(ringXRAddress, None, None, 0, 0, 0, False, 0)
# self.log.debug("Invoking clusterStart on " + ringXRAddress + " (Slaves)")
# ringThread = func(name='hodring_slaves_start', functionRef=ringClient.clusterStart())
# ring['thread'] = ringThread
# ringThread.start()
#
# for ring in slaveList:
# ringThread = ring['thread']
# if ringThread == None:
# raise Exception("Could not get hodring thread (Slave).")
# ringThread.join()
# self.log.debug("Completed clusterStart on " + ring['xrs'] + " (Slave)")
#
# # Run Admin Commands on HDFS Master - Step 4
# if not hdfsDesc.isExternal():
# if hdfsringXRAddress == None:
# raise Exception("HDFS Master host not found (to Run Admin Commands)")
# ringClient = hodXRClient(hdfsringXRAddress, None, None, 0, 0, 0, False, 0)
# self.log.debug("Invoking clusterStart(False) - Admin on "
# + hdfsringXRAddress + " (HDFS Master)")
# ringClient.clusterStart(False)
#
# except:
# self.log.debug(get_exception_string())
# return False
#
# self.log.debug("Successfully started cluster.")
# return True
#
# def clusterStop(self):
# self.log.debug("clusterStop method invoked.")
# try:
# hdfsAddr = self.getServiceAddr('hdfs')
# if hdfsAddr.find(':') != -1:
# h, p = hdfsAddr.split(':', 1)
# self.hdfsHost = h
# self.log.debug("hdfsHost: " + self.hdfsHost)
# mapredAddr = self.getServiceAddr('mapred')
# if mapredAddr.find(':') != -1:
# h, p = mapredAddr.split(':', 1)
# self.mapredHost = h
# self.log.debug("mapredHost: " + self.mapredHost)
# ringList = self.rm.serviceClient.getServiceInfo(self.cfg['hodring']['userid'],
# self.np.getServiceId(),
# 'hodring', 'hod')
# for ring in ringList:
# ringXRAddress = ring['xrs']
# if ringXRAddress == None:
# raise Exception("Could not get hodring XML-RPC server address.")
# ringClient = hodXRClient(ringXRAddress, None, None, 0, 0, 0, False)
# self.log.debug("Invoking clusterStop on " + ringXRAddress)
# ringThread = func(name='hodring_stop', functionRef=ringClient.clusterStop())
# ring['thread'] = ringThread
# ringThread.start()
#
# for ring in ringList:
# ringThread = ring['thread']
# if ringThread == None:
# raise Exception("Could not get hodring thread.")
# ringThread.join()
# self.log.debug("Completed clusterStop on " + ring['xrs'])
#
# except:
# self.log.debug(get_exception_string())
# return False
#
# self.log.debug("Successfully stopped cluster.")
#
# return True
def getCommand(self, addr):
"""This method is called by the
hodrings to get commands from
the ringmaster"""
lock = self.cmdLock
cmdList = []
lock.acquire()
try:
try:
for v in self.serviceDict.itervalues():
if (not v.isExternal()):
if v.isLaunchable(self.serviceDict):
# If a master is still not launched, or the number of
# retries for launching master is not reached,
# launch master
if not v.isMasterLaunched() and \
(v.getMasterFailureCount() <= \
self.cfg['ringmaster']['max-master-failures']):
cmdList = v.getMasterCommands(self.serviceDict)
v.setlaunchedMaster()
v.setMasterAddress(addr)
break
if cmdList == []:
for s in self.serviceDict.itervalues():
if (not v.isExternal()):
if s.isMasterInitialized():
cl = s.getWorkerCommands(self.serviceDict)
cmdList.extend(cl)
else:
cmdList = []
break
except:
self.log.debug(get_exception_string())
finally:
lock.release()
pass
cmd = addr + pformat(cmdList)
self.log.debug("getCommand returning " + cmd)
return cmdList
def getAdminCommand(self, addr):
"""This method is called by the
hodrings to get admin commands from
the ringmaster"""
lock = self.cmdLock
cmdList = []
lock.acquire()
try:
try:
for v in self.serviceDict.itervalues():
cmdList = v.getAdminCommands(self.serviceDict)
if cmdList != []:
break
except Exception, e:
self.log.debug(get_exception_string())
finally:
lock.release()
pass
cmd = addr + pformat(cmdList)
self.log.debug("getAdminCommand returning " + cmd)
return cmdList
def addMasterParams(self, addr, vals):
"""This method is called by
hodring to update any parameters
its changed for the commands it was
running"""
self.log.debug('Comment: adding master params from %s' % addr)
self.log.debug(pformat(vals))
lock = self.masterParamLock
lock.acquire()
try:
for v in self.serviceDict.itervalues():
if v.isMasterLaunched():
if (v.getMasterAddress() == addr):
v.setMasterParams(vals)
v.setMasterInitialized()
except:
self.log.debug(get_exception_string())
pass
lock.release()
return addr
def setHodRingErrors(self, addr, errors):
"""This method is called by the hodrings to update errors
it encountered while starting up"""
self.log.critical("Hodring at %s failed with following errors:\n%s" \
% (addr, errors))
lock = self.masterParamLock
lock.acquire()
try:
for v in self.serviceDict.itervalues():
if v.isMasterLaunched():
if (v.getMasterAddress() == addr):
# strip the PID part.
idx = addr.rfind('_')
if idx is not -1:
addr = addr[:idx]
v.setMasterFailed("Hodring at %s failed with following" \
" errors:\n%s" % (addr, errors))
except:
self.log.debug(get_exception_string())
pass
lock.release()
return True
def getKeys(self):
lock= self.masterParamLock
lock.acquire()
keys = self.serviceDict.keys()
lock.release()
return keys
def getServiceAddr(self, name):
addr = 'not found'
self.log.debug("getServiceAddr name: %s" % name)
lock= self.masterParamLock
lock.acquire()
try:
service = self.serviceDict[name]
except KeyError:
pass
else:
self.log.debug("getServiceAddr service: %s" % service)
# Check if we should give up ! If the limit on max failures is hit,
# give up.
err = service.getMasterFailed()
if (err is not None) and \
(service.getMasterFailureCount() > \
self.cfg['ringmaster']['max-master-failures']):
self.log.critical("Detected errors (%s) beyond allowed number"\
" of failures (%s). Flagging error to client" \
% (service.getMasterFailureCount(), \
self.cfg['ringmaster']['max-master-failures']))
addr = "Error: " + err
elif (service.isMasterInitialized()):
addr = service.getMasterAddrs()[0]
else:
addr = 'not found'
lock.release()
self.log.debug("getServiceAddr addr %s: %s" % (name, addr))
return addr
def getURLs(self, name):
addr = 'none'
lock = self.masterParamLock
lock.acquire()
try:
service = self.serviceDict[name]
except KeyError:
pass
else:
if (service.isMasterInitialized()):
addr = service.getInfoAddrs()[0]
lock.release()
return addr
def stopRM(self):
"""An XMLRPC call which will spawn a thread to stop the Ringmaster program."""
# We spawn a thread here because we want the XMLRPC call to return. Calling
# stop directly from here will also stop the XMLRPC server.
try:
self.log.debug("inside xml-rpc call to stop ringmaster")
rmStopperThread = func('RMStopper', self.rm.stop)
rmStopperThread.start()
self.log.debug("returning from xml-rpc call to stop ringmaster")
return True
except:
self.log.debug("Exception in stop: %s" % get_exception_string())
return False
class RingMaster:
def __init__(self, cfg, log, **kwds):
"""starts nodepool and services"""
self.download = False
self.httpServer = None
self.cfg = cfg
self.log = log
self.__hostname = local_fqdn()
self.workDirs = None
# ref to the idle job tracker object.
self.__jtMonitor = None
self.__idlenessDetected = False
self.__stopInProgress = False
self.__isStopped = False # to let main exit
self.__exitCode = 0 # exit code with which the ringmaster main method should return
self.workers_per_ring = self.cfg['ringmaster']['workers_per_ring']
self.__initialize_signal_handlers()
sdd = self.cfg['servicedesc']
gsvc = None
for key in sdd:
gsvc = sdd[key]
break
npd = self.cfg['nodepooldesc']
self.np = NodePoolUtil.getNodePool(npd, cfg, log)
self.log.debug("Getting service ID.")
self.serviceId = self.np.getServiceId()
self.log.debug("Got service ID: %s" % self.serviceId)
self.tarSrcLoc = None
if self.cfg['ringmaster'].has_key('hadoop-tar-ball'):
self.download = True
self.tarSrcLoc = self.cfg['ringmaster']['hadoop-tar-ball']
self.cd_to_tempdir()
if (self.download):
self.__copy_tarball(os.getcwd())
self.basename = self.__find_tarball_in_dir(os.getcwd())
if self.basename is None:
raise Exception('Did not find tarball copied from %s in %s.'
% (self.tarSrcLoc, os.getcwd()))
self.serviceAddr = to_http_url(self.cfg['ringmaster']['svcrgy-addr'])
self.log.debug("Service registry @ %s" % self.serviceAddr)
self.serviceClient = hodXRClient(self.serviceAddr)
self.serviceDict = {}
try:
sdl = self.cfg['servicedesc']
workDirs = self.getWorkDirs(cfg)
hdfsDesc = sdl['hdfs']
hdfs = None
# Determine hadoop Version
hadoopVers = hadoopVersion(self.__getHadoopDir(), \
self.cfg['hodring']['java-home'], self.log)
if (hadoopVers['major']==None) or (hadoopVers['minor']==None):
raise Exception('Could not retrive the version of Hadoop.'
+ ' Check the Hadoop installation or the value of the hodring.java-home variable.')
if hdfsDesc.isExternal():
hdfs = HdfsExternal(hdfsDesc, workDirs, version=int(hadoopVers['minor']))
hdfs.setMasterParams( self.cfg['gridservice-hdfs'] )
else:
hdfs = Hdfs(hdfsDesc, workDirs, 0, version=int(hadoopVers['minor']),
workers_per_ring = self.workers_per_ring)
self.serviceDict[hdfs.getName()] = hdfs
mrDesc = sdl['mapred']
mr = None
if mrDesc.isExternal():
mr = MapReduceExternal(mrDesc, workDirs, version=int(hadoopVers['minor']))
mr.setMasterParams( self.cfg['gridservice-mapred'] )
else:
mr = MapReduce(mrDesc, workDirs,1, version=int(hadoopVers['minor']),
workers_per_ring = self.workers_per_ring)
self.serviceDict[mr.getName()] = mr
except:
self.log.critical("Exception in creating Hdfs and Map/Reduce descriptor objects: \
%s." % get_exception_error_string())
self.log.debug(get_exception_string())
raise
# should not be starting these in a constructor
ringMasterServer.startService(self.serviceDict, cfg, self.np, log, self)
self.rpcserver = ringMasterServer.getAddress()
self.httpAddress = None
self.tarAddress = None
hostname = socket.gethostname()
if (self.download):
self.httpServer = threadedHTTPServer(hostname,
self.cfg['ringmaster']['http-port-range'])
self.httpServer.serve_forever()
self.httpAddress = "http://%s:%d/" % (self.httpServer.server_address[0],
self.httpServer.server_address[1])
self.tarAddress = "%s%s" % (self.httpAddress, self.basename)
ringMasterServer.instance.logMasterSources.registerTarSource(hostname,
self.tarAddress)
else:
self.log.debug("Download not set.")
self.log.debug("%s %s %s %s %s" % (self.cfg['ringmaster']['userid'],
self.serviceId, self.__hostname, 'ringmaster', 'hod'))
if self.cfg['ringmaster']['register']:
if self.httpAddress:
self.serviceClient.registerService(self.cfg['ringmaster']['userid'],
self.serviceId, self.__hostname, 'ringmaster', 'hod', {
'xrs' : self.rpcserver, 'http' : self.httpAddress })
else:
self.serviceClient.registerService(self.cfg['ringmaster']['userid'],
self.serviceId, self.__hostname, 'ringmaster', 'hod', {
'xrs' : self.rpcserver, })
self.log.debug("Registered with serivce registry: %s." % self.serviceAddr)
hodRingPath = os.path.join(cfg['ringmaster']['base-dir'], 'bin', 'hodring')
hodRingWorkDir = os.path.join(cfg['hodring']['temp-dir'], 'hodring' + '_'
+ getpass.getuser())
self.cfg['hodring']['hodring'] = [hodRingWorkDir,]
self.cfg['hodring']['svcrgy-addr'] = self.cfg['ringmaster']['svcrgy-addr']
self.cfg['hodring']['service-id'] = self.np.getServiceId()
self.cfg['hodring']['ringmaster-xrs-addr'] = self.__url_to_addr(self.rpcserver)
if (self.tarSrcLoc != None):
cfg['hodring']['download-addr'] = self.tarAddress
self.__init_job_tracker_monitor(ringMasterServer.instance.logMasterSources)
def __init_job_tracker_monitor(self, logMasterSources):
hadoopDir = self.__getHadoopDir()
self.log.debug('hadoopdir=%s, java-home=%s' % \
(hadoopDir, self.cfg['hodring']['java-home']))
try:
self.__jtMonitor = JobTrackerMonitor(self.log, self,
self.cfg['ringmaster']['jt-poll-interval'],
self.cfg['ringmaster']['idleness-limit'],
hadoopDir, self.cfg['hodring']['java-home'],
logMasterSources)
self.log.debug('starting jt monitor')
self.__jtMonitor.start()
except:
self.log.critical('Exception in running idle job tracker. This cluster cannot be deallocated if idle.\
Exception message: %s' % get_exception_error_string())
self.log.debug('Exception details: %s' % get_exception_string())
def __getHadoopDir(self):
hadoopDir = None
if self.cfg['ringmaster'].has_key('hadoop-tar-ball'):
tarFile = os.path.join(os.getcwd(), self.basename)
ret = untar(tarFile, os.getcwd())
if not ret:
raise Exception('Untarring tarfile %s to directory %s failed. Cannot find hadoop directory.' \
% (tarFile, os.getcwd()))
hadoopDir = os.path.join(os.getcwd(), self.__get_dir(tarFile))
else:
hadoopDir = self.cfg['gridservice-mapred']['pkgs']
self.log.debug('Returning Hadoop directory as: %s' % hadoopDir)
return hadoopDir
def __get_dir(self, name):
"""Return the root directory inside the tarball
specified by name. Assumes that the tarball begins
with a root directory."""
import tarfile
myTarFile = tarfile.open(name)
hadoopPackage = myTarFile.getnames()[0]
self.log.debug("tarball name : %s hadoop package name : %s" %(name,hadoopPackage))
return hadoopPackage
def __find_tarball_in_dir(self, dir):
"""Find the tarball among files specified in the given
directory. We need this method because how the tarball
source URI is given depends on the method of copy and
we can't get the tarball name from that.
This method will fail if there are multiple tarballs
in the directory with the same suffix."""
files = os.listdir(dir)
for file in files:
if self.tarSrcLoc.endswith(file):
return file
return None
def __copy_tarball(self, destDir):
"""Copy the hadoop tar ball from a remote location to the
specified destination directory. Based on the URL it executes
an appropriate copy command. Throws an exception if the command
returns a non-zero exit code."""
# for backwards compatibility, treat the default case as file://
url = ''
if self.tarSrcLoc.startswith('/'):
url = 'file:/'
src = '%s%s' % (url, self.tarSrcLoc)
if src.startswith('file://'):
src = src[len('file://')-1:]
cpCmd = '/bin/cp'
cmd = '%s %s %s' % (cpCmd, src, destDir)
self.log.debug('Command to execute: %s' % cmd)
copyProc = simpleCommand('remote copy', cmd)
copyProc.start()
copyProc.wait()
copyProc.join()
ret = copyProc.exit_code()
self.log.debug('Completed command execution. Exit Code: %s.' % ret)
if ret != 0:
output = copyProc.output()
raise Exception('Could not copy tarball using command %s. Exit code: %s. Output: %s'
% (cmd, ret, output))
else:
raise Exception('Unsupported URL for file: %s' % src)
# input: http://hostname:port/. output: [hostname,port]
def __url_to_addr(self, url):
addr = url.rstrip('/')
if addr.startswith('http://'):
addr = addr.replace('http://', '', 1)
addr_parts = addr.split(':')
return [addr_parts[0], int(addr_parts[1])]
def __initialize_signal_handlers(self):
def sigStop(sigNum, handler):
sig_wrapper(sigNum, self.stop)
signal.signal(signal.SIGTERM, sigStop)
signal.signal(signal.SIGINT, sigStop)
signal.signal(signal.SIGQUIT, sigStop)
def __clean_up(self):
tempDir = self.__get_tempdir()
os.chdir(os.path.split(tempDir)[0])
if os.path.exists(tempDir):
shutil.rmtree(tempDir, True)
self.log.debug("Cleaned up temporary dir: %s" % tempDir)
def __get_tempdir(self):
dir = os.path.join(self.cfg['ringmaster']['temp-dir'],
"%s.%s.ringmaster" % (self.cfg['ringmaster']['userid'],
self.np.getServiceId()))
return dir
def getWorkDirs(self, cfg, reUse=False):
if (not reUse) or (self.workDirs == None):
import math
frand = random.random()
while math.ceil(frand) != math.floor(frand):
frand = frand * 100
irand = int(frand)
uniq = '%s-%d-%s' % (socket.gethostname(), os.getpid(), irand)
dirs = []
parentDirs = cfg['ringmaster']['work-dirs']
for p in parentDirs:
dir = os.path.join(p, uniq)
dirs.append(dir)
self.workDirs = dirs
return self.workDirs
def _fetchLink(self, link, parentDir):
parser = miniHTMLParser()
self.log.debug("Checking link %s" %link)
while link:
# Get the file from the site and link
input = urllib.urlopen(link)
out = None
contentType = input.info().gettype()
isHtml = contentType == 'text/html'
#print contentType
if isHtml:
parser.setBaseUrl(input.geturl())
else:
parsed = urlparse.urlparse(link)
hp = parsed[1]
h = hp
p = None
if hp.find(':') != -1:
h, p = hp.split(':', 1)
path = parsed[2]
path = path.split('/')
file = os.path.join(parentDir, h, p)
for c in path:
if c == '':
continue
file = os.path.join(file, c)
try:
self.log.debug('Creating %s' % file)
dir, tail = os.path.split(file)
if not os.path.exists(dir):
os.makedirs(dir)
except:
self.log.debug(get_exception_string())
out = open(file, 'w')
bufSz = 8192
buf = input.read(bufSz)
while len(buf) > 0:
if isHtml:
# Feed the file into the HTML parser
parser.feed(buf)
if out:
out.write(buf)
buf = input.read(bufSz)
input.close()
if out:
out.close()
# Search the retfile here
# Get the next link in level traversal order
link = parser.getNextLink()
parser.close()
def _finalize(self):
try:
# FIXME: get dir from config
dir = 'HOD-log-P%d' % (os.getpid())
dir = os.path.join('.', dir)
except:
self.log.debug(get_exception_string())
self.np.finalize()
def handleIdleJobTracker(self):
self.log.critical("Detected idle job tracker for %s seconds. The allocation will be cleaned up." \
% self.cfg['ringmaster']['idleness-limit'])
self.__idlenessDetected = True
def cd_to_tempdir(self):
dir = self.__get_tempdir()
if not os.path.exists(dir):
os.makedirs(dir)
os.chdir(dir)
return dir
def getWorkload(self):
return self.workload
def getHostName(self):
return self.__hostname
def start(self):
"""run the thread main loop"""
self.log.debug("Entered start method.")
hodring = os.path.join(self.cfg['ringmaster']['base-dir'],
'bin', 'hodring')
largs = [hodring]
targs = self.cfg.get_args(section='hodring')
largs.extend(targs)
hodringCmd = ""
for item in largs:
hodringCmd = "%s%s " % (hodringCmd, item)
self.log.debug(hodringCmd)
if self.np.runWorkers(largs) > 0:
self.log.critical("Failed to start worker.")
self.log.debug("Returned from runWorkers.")
self._finalize()
def __findExitCode(self):
"""Determine the exit code based on the status of the cluster or jobs run on them"""
xmlrpcServer = ringMasterServer.instance.logMasterSources
if xmlrpcServer.getServiceAddr('hdfs') == 'not found' or \
xmlrpcServer.getServiceAddr('hdfs').startswith("Error: "):
self.__exitCode = 7
elif xmlrpcServer.getServiceAddr('mapred') == 'not found' or \
xmlrpcServer.getServiceAddr('mapred').startswith("Error: "):
self.__exitCode = 8
else:
clusterStatus = get_cluster_status(xmlrpcServer.getServiceAddr('hdfs'),
xmlrpcServer.getServiceAddr('mapred'))
if clusterStatus != 0:
self.__exitCode = clusterStatus
else:
self.__exitCode = self.__findHadoopJobsExitCode()
self.log.debug('exit code %s' % self.__exitCode)
def __findHadoopJobsExitCode(self):
"""Determine the consolidate exit code of hadoop jobs run on this cluster, provided
this information is available. Return 0 otherwise"""
ret = 0
failureStatus = 3
failureCount = 0
if self.__jtMonitor:
jobStatusList = self.__jtMonitor.getJobsStatus()
try:
if len(jobStatusList) > 0:
for jobStatus in jobStatusList:
self.log.debug('job status for %s: %s' % (jobStatus.getJobId(),
jobStatus.getStatus()))
if jobStatus.getStatus() == failureStatus:
failureCount = failureCount+1
if failureCount > 0:
if failureCount == len(jobStatusList): # all jobs failed
ret = 16
else:
ret = 17
except:
self.log.debug('exception in finding hadoop jobs exit code' % get_exception_string())
return ret
def stop(self):
self.log.debug("RingMaster stop method invoked.")
if self.__stopInProgress or self.__isStopped:
return
self.__stopInProgress = True
if ringMasterServer.instance is not None:
self.log.debug('finding exit code')
self.__findExitCode()
self.log.debug('stopping ringmaster instance')
ringMasterServer.stopService()
else:
self.__exitCode = 6
if self.__jtMonitor is not None:
self.__jtMonitor.stop()
if self.httpServer:
self.httpServer.stop()
self.__clean_up()
self.__isStopped = True
def shouldStop(self):
"""Indicates whether the main loop should exit, either due to idleness condition,
or a stop signal was received"""
return self.__idlenessDetected or self.__isStopped
def getExitCode(self):
"""return the exit code of the program"""
return self.__exitCode
def main(cfg,log):
try:
rm = None
dGen = DescGenerator(cfg)
cfg = dGen.initializeDesc()
rm = RingMaster(cfg, log)
rm.start()
while not rm.shouldStop():
time.sleep(1)
rm.stop()
log.debug('returning from main')
return rm.getExitCode()
except Exception, e:
if log:
log.critical(get_exception_string())
raise Exception(e)
| apache-2.0 |
syndbg/ubuntu-make | umake/interactions/__init__.py | 1 | 6238 | # -*- coding: utf-8 -*-
# Copyright (C) 2014 Canonical
#
# Authors:
# Didier Roche
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; version 3.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
# module gather different types of interactions with the UI
from gettext import gettext as _
import logging
from umake.tools import InputError
logger = logging.getLogger(__name__)
class Choice:
def __init__(self, id, label, callback_fn, txt_shorcut=None, is_default=False):
"""Choice element containing label and callback function"""
self.id = id
self.label = label
self.txt_shorcut = txt_shorcut
self.callback_fn = callback_fn
self.is_default = is_default
class TextWithChoices:
def __init__(self, content, choices=[], newline_before_option=False):
"""Content text with a list of multiple Choice elements"""
current_ids = []
default_found = False
for choice in choices:
if choice.id in current_ids:
message = "{} choice id is already in registered ids. Can't instantiate this " \
"interaction".format(choice.id)
logger.error(message)
raise BaseException(message)
current_ids.append(choice.id)
if choice.is_default:
if default_found:
message = "One default was already registered, can't register a second one in that choices set: {}"\
.format([choice.label for choice in choices])
logger.error(message)
raise BaseException(message)
default_found = True
self.content = content
self.choices = choices
self.newline_before_option = newline_before_option
def choose(self, choice_id=None, answer=None):
"""Return associated callback for choice"""
for choice in self.choices:
if (choice_id is not None and choice.id == choice_id) or\
(answer is not None and (choice.label.lower() == answer.lower() or
(choice.txt_shorcut is not None and
choice.txt_shorcut.lower() == answer.lower()))):
return choice.callback_fn()
msg = _("No suitable answer provided")
if choice_id is not None:
msg = _("Your entry '{}' isn't an acceptable choice. choices are: {}")\
.format(choice_id, [choice.id for choice in self.choices])
if answer is not None:
msg = _("Your entry '{}' isn't an acceptable choice. choices are: {} and {}")\
.format(answer, [choice.txt_shorcut for choice in self.choices if choice.txt_shorcut is not None],
[choice.label for choice in self.choices])
if not choice_id and not answer:
for choice in self.choices:
if choice.is_default:
return choice.callback_fn()
logger.error(msg)
raise InputError(msg)
@property
def prompt(self):
"""Text prompt handling if we do have some shortcuts"""
possible_answers = []
for choice in self.choices:
answer = choice.label
if choice.txt_shorcut:
# NOTE: sum of answers
answer += _(" ({})").format((choice.txt_shorcut))
possible_answers.append(answer)
if self.newline_before_option:
# NOTE: first is prompt, newline and then set of answers
prompt = _("{}\n[{}] ").format(self.content, '/'.join(possible_answers))
else:
# NOTE: first is prompt, then set of answers:
prompt = _("{} [{}] ").format(self.content, '/'.join(possible_answers))
return prompt
class LicenseAgreement(TextWithChoices):
def __init__(self, content, callback_yes, callback_no):
"""License agreement text with accept/decline"""
choices = [Choice(0, _("I Accept"), callback_yes, txt_shorcut=_("a")),
Choice(1, _("I don't accept"), callback_no, txt_shorcut=_("N"), is_default=True)]
super().__init__(content, choices=choices, newline_before_option=True)
@property
def input(self):
"""Text input prompt handling if we do have some shortcuts"""
answers = []
for choice in self.choices:
# NOTE: first element is choice, and then shortcut
_("{} ({})")
answer = _("{} ({})").format(choice.label, choice.txt_shorcut)
answers.append(answer)
# append different possible choices
return _("[{}] ").format('/'.join(answers))
class InputText:
def __init__(self, content, callback_fn, default_input=""):
"""Content text with an line input"""
self.content = content
self._callback_fn = callback_fn
self.default_input = default_input
def run_callback(self, result):
self._callback_fn(result)
class YesNo(TextWithChoices):
def __init__(self, content, callback_yes, callback_no, default_is_yes=False):
"""Return a basic Yes No question, default being false or overriden"""
super().__init__(content, [Choice(0, _("Yes"), callback_yes, txt_shorcut=_('y'), is_default=default_is_yes),
Choice(1, _("No"), callback_no, txt_shorcut=_("N"),
is_default=(not default_is_yes))])
class DisplayMessage:
def __init__(self, text):
self.text = text
class UnknownProgress:
def __init__(self, iterator):
self.bar = None
self._iterator = iterator
| gpl-3.0 |
Yuriy-Leonov/nova | nova/api/openstack/compute/contrib/server_diagnostics.py | 15 | 2561 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
from nova import compute
from nova import exception
from nova.openstack.common.gettextutils import _
authorize = extensions.extension_authorizer('compute', 'server_diagnostics')
sd_nsmap = {None: wsgi.XMLNS_V11}
class ServerDiagnosticsTemplate(xmlutil.TemplateBuilder):
def construct(self):
root = xmlutil.TemplateElement('diagnostics')
elem = xmlutil.SubTemplateElement(root, xmlutil.Selector(0),
selector=xmlutil.get_items)
elem.text = 1
return xmlutil.MasterTemplate(root, 1, nsmap=sd_nsmap)
class ServerDiagnosticsController(object):
@wsgi.serializers(xml=ServerDiagnosticsTemplate)
def index(self, req, server_id):
context = req.environ["nova.context"]
authorize(context)
compute_api = compute.API()
try:
instance = compute_api.get(context, server_id)
except exception.NotFound():
raise webob.exc.HTTPNotFound(_("Instance not found"))
return compute_api.get_diagnostics(context, instance)
class Server_diagnostics(extensions.ExtensionDescriptor):
"""Allow Admins to view server diagnostics through server action."""
name = "ServerDiagnostics"
alias = "os-server-diagnostics"
namespace = ("http://docs.openstack.org/compute/ext/"
"server-diagnostics/api/v1.1")
updated = "2011-12-21T00:00:00+00:00"
def get_resources(self):
parent_def = {'member_name': 'server', 'collection_name': 'servers'}
#NOTE(bcwaldon): This should be prefixed with 'os-'
ext = extensions.ResourceExtension('diagnostics',
ServerDiagnosticsController(),
parent=parent_def)
return [ext]
| apache-2.0 |
ity/pants | tests/python/pants_test/util/test_osutil.py | 24 | 1839 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import unittest
from contextlib import contextmanager
from pants.util.osutil import OS_ALIASES, known_os_names, normalize_os_name
class OsutilTest(unittest.TestCase):
class WarningRecorder(object):
"""Simple logging handler to record warnings."""
def __init__(self):
self.warning_list = []
self.level = logging.WARNING
def handle(self, record):
self.warning_list.append('{}: {}'.format(record.name, record.getMessage()))
@contextmanager
def warnings(self):
handler = self.WarningRecorder()
logging.getLogger('').addHandler(handler)
yield handler.warning_list
def test_alias_normalization(self):
for normal_os, aliases in OS_ALIASES.items():
for alias in aliases:
self.assertEqual(normal_os, normalize_os_name(alias))
def test_keys_in_aliases(self):
for key in OS_ALIASES.keys():
self.assertIn(key, known_os_names())
def test_no_warnings_on_known_names(self):
for name in known_os_names():
with self.warnings() as warning_list:
normalize_os_name(name)
self.assertEqual(0, len(warning_list),
'Recieved unexpected warnings: {}'.format(warning_list))
def test_warnings_on_unknown_names(self):
name = 'I really hope no one ever names an operating system with this string.'
with self.warnings() as warning_list:
normalize_os_name(name)
self.assertEqual(1, len(warning_list),
'Expected exactly one warning, but got: {}'.format(warning_list))
| apache-2.0 |
bukzor/sympy | sympy/__init__.py | 42 | 2442 | """SymPy is a Python library for symbolic mathematics. It aims to become a
full-featured computer algebra system (CAS) while keeping the code as
simple as possible in order to be comprehensible and easily extensible.
SymPy is written entirely in Python and does not require any external
libraries, except optionally for plotting support.
See the webpage for more information and documentation:
http://sympy.org
"""
from __future__ import absolute_import, print_function
del absolute_import, print_function
try:
import mpmath
except ImportError:
raise ImportError("SymPy now depends on mpmath as an external library. "
"See http://docs.sympy.org/latest/install.html#mpmath for more information.")
from sympy.release import __version__
import sys
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
raise ImportError("Python Version 2.6 or above is required for SymPy.")
else: # Python 3
pass
# Here we can also check for specific Python 3 versions, if needed
del sys
def __sympy_debug():
# helper function so we don't import os globally
import os
debug_str = os.getenv('SYMPY_DEBUG', 'False')
if debug_str in ('True', 'False'):
return eval(debug_str)
else:
raise RuntimeError("unrecognized value for SYMPY_DEBUG: %s" %
debug_str)
SYMPY_DEBUG = __sympy_debug()
from .core import *
from .logic import *
from .assumptions import *
from .polys import *
from .series import *
from .functions import *
from .ntheory import *
from .concrete import *
from .simplify import *
from .sets import *
from .solvers import *
from .matrices import *
from .geometry import *
from .utilities import *
from .integrals import *
from .tensor import *
from .parsing import *
from .calculus import *
# Adds about .04-.05 seconds of import time
# from combinatorics import *
# This module is slow to import:
#from physics import units
from .plotting import plot, textplot, plot_backends, plot_implicit
from .printing import pretty, pretty_print, pprint, pprint_use_unicode, \
pprint_try_use_unicode, print_gtk, print_tree, pager_print, TableForm
from .printing import ccode, fcode, jscode, mathematica_code, octave_code, \
latex, preview
from .printing import python, print_python, srepr, sstr, sstrrepr
from .interactive import init_session, init_printing
evalf._create_evalf_table()
# This is slow to import:
#import abc
from .deprecated import *
| bsd-3-clause |
tillahoffmann/tensorflow | tensorflow/python/eager/core.py | 14 | 2317 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Experimental API for TensorFlow's "Eager" mode of execution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.eager import memory_trace
from tensorflow.python.framework import errors
# Trace of execution and memory usage.
_active_trace = None
def _status_to_exception(code, message):
try:
error_class = errors.exception_type_from_error_code(code)
return error_class(None, None, message)
except KeyError:
return errors.UnknownError(None, None, message, code)
class _NotOkStatusException(Exception):
"""Exception class to handle not ok Status."""
def __init__(self, message, code):
super(_NotOkStatusException, self).__init__()
self.message = message
self.code = code
def __str__(self):
e = _status_to_exception(self.code, self.message)
return "%s: %s" % (e.__class__.__name__, e)
pywrap_tensorflow.TFE_Py_RegisterExceptionClass(_NotOkStatusException)
def enable_tracing():
"""Enables tracing of execution and memory usage.
WARNING: tracing is not thread-safe.
"""
# TODO(alive): Add code example in doc string.
global _active_trace
_active_trace = memory_trace.MemoryTrace()
def flush_trace():
"""Flushes the active trace, if it exists.
WARNING: tracing is not thread-safe.
"""
# TODO(alive): Add code example in doc string.
if _active_trace is not None:
_active_trace.flush_trace()
def active_trace():
"""Returns the current global active trace of execution and memory usage."""
return _active_trace
| apache-2.0 |
paulthulstrup/moose | framework/contrib/nsiqcppstyle/rules/RULE_3_2_F_use_representitive_classname_for_cpp_filename.py | 43 | 3744 | """
The file name should contain the representitive class/struct name.
If the file contains class/struct decls or defs, the file name should be
one of classes.
If the class/struct name starts with "C", "C" can be ommited in the file name.
== Vilolation ==
= a.h = <== Violation. It should contain class name 'TestClass'
class TestClass() {
}
= a.cpp = <== Violation. It should contain class name 'Test'
void Test::Method1() {
}
== Good ==
= TestClass.h = <== OK
class TestClass {
}
= Class1.h = <== OK.
class CClass1 {
}
= TestClass.cpp = <== OK
void TestClass::Method1() {
}
"""
from nsiqcppstyle_rulemanager import *
from nsiqcppstyle_reporter import *
from nsiqcppstyle_rulemanager import *
try :
set()
except NameError:
from sets import Set as set
classname = None
def RunFunctionNameRule(lexer, fullName, decl, contextStack, context) :
names = fullName.split("::")
if len(names) > 1 :
if len(names[0]) != 0 :
classname.add(names[0])
def RunTypeNameRule(lexer, currentType, fullName, decl, contextStack, context) :
if currentType in ["CLASS", "STRUCT"] :
names = fullName.split("::")
if len(names[-1]) != 0 :
classname.add(names[-1])
def RunFileStartRule(lexer, filename, dirname) :
global classname
classname = set()
def RunFileEndRule(lexer, filename, dirname):
goodFileName = False
filename = filename.lower( )
if len(classname) == 0 : return
for t in classname :
if t.startswith("C") :
t = t[1:]
if filename.find(t.lower()) != -1 :
goodFileName = True
break
if not goodFileName :
nsiqcppstyle_reporter.Error(DummyToken(lexer.filename, "", 0, 0), __name__,
"The filename does not represent the classnames (%s)" %(classname))
ruleManager.AddFileStartRule(RunFileStartRule)
ruleManager.AddTypeNameRule(RunTypeNameRule)
ruleManager.AddFunctionNameRule(RunFunctionNameRule)
ruleManager.AddFileEndRule(RunFileEndRule)
###########################################################################################
# Unit Test
###########################################################################################
from nsiqunittest.nsiqcppstyle_unittestbase import *
class testRule(nct):
def setUpRule(self):
ruleManager.AddFileStartRule(RunFileStartRule)
ruleManager.AddTypeNameRule(RunTypeNameRule)
ruleManager.AddFunctionNameRule(RunFunctionNameRule)
ruleManager.AddFileEndRule(RunFileEndRule)
def test1(self):
self.Analyze("test/aa.c",
"""
void AA::DSD() {
}
""")
assert not CheckErrorContent(__name__)
def test2(self):
self.Analyze("test/ab.c",
"""
void AA::DSD() {
}
""")
assert CheckErrorContent(__name__)
def test3(self):
self.Analyze("test/aa.c",
"""
void CAA::DSD() {
}
""")
assert not CheckErrorContent(__name__)
def test4(self):
self.Analyze("test/aa.c",
"""
void DSD() {
}
""")
assert not CheckErrorContent(__name__)
def test5(self):
self.Analyze("test/aa.cpp",
"""
struct AA {
}
class BB {
}
""")
assert not CheckErrorContent(__name__)
def test6(self):
self.Analyze("test/aa.cpp",
"""
struct AA1 {
}
class BB {
}
""")
assert CheckErrorContent(__name__)
def test7(self):
self.Analyze("test/CamRecorderFactory.cpp",
"""
class __declspec(dllexport) CCamRecorderFactory
{
};
""")
assert not CheckErrorContent(__name__)
def test8(self):
self.Analyze("test/CamRecorderFactory.cpp",
"""
class DLLEXPORT CCamRecorderFactory
{
};
""")
assert not CheckErrorContent(__name__)
| lgpl-2.1 |
jhayworth/config | .emacs.d/elpy/rpc-venv/lib/python2.7/site-packages/pip/_vendor/urllib3/connectionpool.py | 10 | 36488 | from __future__ import absolute_import
import errno
import logging
import sys
import warnings
from socket import error as SocketError, timeout as SocketTimeout
import socket
from .exceptions import (
ClosedPoolError,
ProtocolError,
EmptyPoolError,
HeaderParsingError,
HostChangedError,
LocationValueError,
MaxRetryError,
ProxyError,
ReadTimeoutError,
SSLError,
TimeoutError,
InsecureRequestWarning,
NewConnectionError,
)
from .packages.ssl_match_hostname import CertificateError
from .packages import six
from .packages.six.moves import queue
from .connection import (
port_by_scheme,
DummyConnection,
HTTPConnection,
HTTPSConnection,
VerifiedHTTPSConnection,
HTTPException,
BaseSSLError,
)
from .request import RequestMethods
from .response import HTTPResponse
from .util.connection import is_connection_dropped
from .util.request import set_file_position
from .util.response import assert_header_parsing
from .util.retry import Retry
from .util.timeout import Timeout
from .util.url import (
get_host,
parse_url,
Url,
_normalize_host as normalize_host,
_encode_target,
)
from .util.queue import LifoQueue
xrange = six.moves.xrange
log = logging.getLogger(__name__)
_Default = object()
# Pool objects
class ConnectionPool(object):
"""
Base class for all connection pools, such as
:class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`.
"""
scheme = None
QueueCls = LifoQueue
def __init__(self, host, port=None):
if not host:
raise LocationValueError("No host specified.")
self.host = _normalize_host(host, scheme=self.scheme)
self._proxy_host = host.lower()
self.port = port
def __str__(self):
return "%s(host=%r, port=%r)" % (type(self).__name__, self.host, self.port)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# Return False to re-raise any potential exceptions
return False
def close(self):
"""
Close all pooled connections and disable the pool.
"""
pass
# This is taken from http://hg.python.org/cpython/file/7aaba721ebc0/Lib/socket.py#l252
_blocking_errnos = {errno.EAGAIN, errno.EWOULDBLOCK}
class HTTPConnectionPool(ConnectionPool, RequestMethods):
"""
Thread-safe connection pool for one host.
:param host:
Host used for this HTTP Connection (e.g. "localhost"), passed into
:class:`httplib.HTTPConnection`.
:param port:
Port used for this HTTP Connection (None is equivalent to 80), passed
into :class:`httplib.HTTPConnection`.
:param strict:
Causes BadStatusLine to be raised if the status line can't be parsed
as a valid HTTP/1.0 or 1.1 status line, passed into
:class:`httplib.HTTPConnection`.
.. note::
Only works in Python 2. This parameter is ignored in Python 3.
:param timeout:
Socket timeout in seconds for each individual connection. This can
be a float or integer, which sets the timeout for the HTTP request,
or an instance of :class:`urllib3.util.Timeout` which gives you more
fine-grained control over request timeouts. After the constructor has
been parsed, this is always a `urllib3.util.Timeout` object.
:param maxsize:
Number of connections to save that can be reused. More than 1 is useful
in multithreaded situations. If ``block`` is set to False, more
connections will be created but they will not be saved once they've
been used.
:param block:
If set to True, no more than ``maxsize`` connections will be used at
a time. When no free connections are available, the call will block
until a connection has been released. This is a useful side effect for
particular multithreaded situations where one does not want to use more
than maxsize connections per host to prevent flooding.
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
:param retries:
Retry configuration to use by default with requests in this pool.
:param _proxy:
Parsed proxy URL, should not be used directly, instead, see
:class:`urllib3.connectionpool.ProxyManager`"
:param _proxy_headers:
A dictionary with proxy headers, should not be used directly,
instead, see :class:`urllib3.connectionpool.ProxyManager`"
:param \\**conn_kw:
Additional parameters are used to create fresh :class:`urllib3.connection.HTTPConnection`,
:class:`urllib3.connection.HTTPSConnection` instances.
"""
scheme = "http"
ConnectionCls = HTTPConnection
ResponseCls = HTTPResponse
def __init__(
self,
host,
port=None,
strict=False,
timeout=Timeout.DEFAULT_TIMEOUT,
maxsize=1,
block=False,
headers=None,
retries=None,
_proxy=None,
_proxy_headers=None,
**conn_kw
):
ConnectionPool.__init__(self, host, port)
RequestMethods.__init__(self, headers)
self.strict = strict
if not isinstance(timeout, Timeout):
timeout = Timeout.from_float(timeout)
if retries is None:
retries = Retry.DEFAULT
self.timeout = timeout
self.retries = retries
self.pool = self.QueueCls(maxsize)
self.block = block
self.proxy = _proxy
self.proxy_headers = _proxy_headers or {}
# Fill the queue up so that doing get() on it will block properly
for _ in xrange(maxsize):
self.pool.put(None)
# These are mostly for testing and debugging purposes.
self.num_connections = 0
self.num_requests = 0
self.conn_kw = conn_kw
if self.proxy:
# Enable Nagle's algorithm for proxies, to avoid packet fragmentation.
# We cannot know if the user has added default socket options, so we cannot replace the
# list.
self.conn_kw.setdefault("socket_options", [])
def _new_conn(self):
"""
Return a fresh :class:`HTTPConnection`.
"""
self.num_connections += 1
log.debug(
"Starting new HTTP connection (%d): %s:%s",
self.num_connections,
self.host,
self.port or "80",
)
conn = self.ConnectionCls(
host=self.host,
port=self.port,
timeout=self.timeout.connect_timeout,
strict=self.strict,
**self.conn_kw
)
return conn
def _get_conn(self, timeout=None):
"""
Get a connection. Will return a pooled connection if one is available.
If no connections are available and :prop:`.block` is ``False``, then a
fresh connection is returned.
:param timeout:
Seconds to wait before giving up and raising
:class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and
:prop:`.block` is ``True``.
"""
conn = None
try:
conn = self.pool.get(block=self.block, timeout=timeout)
except AttributeError: # self.pool is None
raise ClosedPoolError(self, "Pool is closed.")
except queue.Empty:
if self.block:
raise EmptyPoolError(
self,
"Pool reached maximum size and no more connections are allowed.",
)
pass # Oh well, we'll create a new connection then
# If this is a persistent connection, check if it got disconnected
if conn and is_connection_dropped(conn):
log.debug("Resetting dropped connection: %s", self.host)
conn.close()
if getattr(conn, "auto_open", 1) == 0:
# This is a proxied connection that has been mutated by
# httplib._tunnel() and cannot be reused (since it would
# attempt to bypass the proxy)
conn = None
return conn or self._new_conn()
def _put_conn(self, conn):
"""
Put a connection back into the pool.
:param conn:
Connection object for the current host and port as returned by
:meth:`._new_conn` or :meth:`._get_conn`.
If the pool is already full, the connection is closed and discarded
because we exceeded maxsize. If connections are discarded frequently,
then maxsize should be increased.
If the pool is closed, then the connection will be closed and discarded.
"""
try:
self.pool.put(conn, block=False)
return # Everything is dandy, done.
except AttributeError:
# self.pool is None.
pass
except queue.Full:
# This should never happen if self.block == True
log.warning("Connection pool is full, discarding connection: %s", self.host)
# Connection never got put back into the pool, close it.
if conn:
conn.close()
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
pass
def _prepare_proxy(self, conn):
# Nothing to do for HTTP connections.
pass
def _get_timeout(self, timeout):
""" Helper that always returns a :class:`urllib3.util.Timeout` """
if timeout is _Default:
return self.timeout.clone()
if isinstance(timeout, Timeout):
return timeout.clone()
else:
# User passed us an int/float. This is for backwards compatibility,
# can be removed later
return Timeout.from_float(timeout)
def _raise_timeout(self, err, url, timeout_value):
"""Is the error actually a timeout? Will raise a ReadTimeout or pass"""
if isinstance(err, SocketTimeout):
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % timeout_value
)
# See the above comment about EAGAIN in Python 3. In Python 2 we have
# to specifically catch it and throw the timeout error
if hasattr(err, "errno") and err.errno in _blocking_errnos:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % timeout_value
)
# Catch possible read timeouts thrown as SSL errors. If not the
# case, rethrow the original. We need to do this because of:
# http://bugs.python.org/issue10272
if "timed out" in str(err) or "did not complete (read)" in str(
err
): # Python < 2.7.4
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % timeout_value
)
def _make_request(
self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw
):
"""
Perform a request on a given urllib connection object taken from our
pool.
:param conn:
a connection from one of our connection pools
:param timeout:
Socket timeout in seconds for the request. This can be a
float or integer, which will set the same timeout value for
the socket connect and the socket read, or an instance of
:class:`urllib3.util.Timeout`, which gives you more fine-grained
control over your timeouts.
"""
self.num_requests += 1
timeout_obj = self._get_timeout(timeout)
timeout_obj.start_connect()
conn.timeout = timeout_obj.connect_timeout
# Trigger any extra validation we need to do.
try:
self._validate_conn(conn)
except (SocketTimeout, BaseSSLError) as e:
# Py2 raises this as a BaseSSLError, Py3 raises it as socket timeout.
self._raise_timeout(err=e, url=url, timeout_value=conn.timeout)
raise
# conn.request() calls httplib.*.request, not the method in
# urllib3.request. It also calls makefile (recv) on the socket.
if chunked:
conn.request_chunked(method, url, **httplib_request_kw)
else:
conn.request(method, url, **httplib_request_kw)
# Reset the timeout for the recv() on the socket
read_timeout = timeout_obj.read_timeout
# App Engine doesn't have a sock attr
if getattr(conn, "sock", None):
# In Python 3 socket.py will catch EAGAIN and return None when you
# try and read into the file pointer created by http.client, which
# instead raises a BadStatusLine exception. Instead of catching
# the exception and assuming all BadStatusLine exceptions are read
# timeouts, check for a zero timeout before making the request.
if read_timeout == 0:
raise ReadTimeoutError(
self, url, "Read timed out. (read timeout=%s)" % read_timeout
)
if read_timeout is Timeout.DEFAULT_TIMEOUT:
conn.sock.settimeout(socket.getdefaulttimeout())
else: # None or a value
conn.sock.settimeout(read_timeout)
# Receive the response from the server
try:
try:
# Python 2.7, use buffering of HTTP responses
httplib_response = conn.getresponse(buffering=True)
except TypeError:
# Python 3
try:
httplib_response = conn.getresponse()
except BaseException as e:
# Remove the TypeError from the exception chain in
# Python 3 (including for exceptions like SystemExit).
# Otherwise it looks like a bug in the code.
six.raise_from(e, None)
except (SocketTimeout, BaseSSLError, SocketError) as e:
self._raise_timeout(err=e, url=url, timeout_value=read_timeout)
raise
# AppEngine doesn't have a version attr.
http_version = getattr(conn, "_http_vsn_str", "HTTP/?")
log.debug(
'%s://%s:%s "%s %s %s" %s %s',
self.scheme,
self.host,
self.port,
method,
url,
http_version,
httplib_response.status,
httplib_response.length,
)
try:
assert_header_parsing(httplib_response.msg)
except (HeaderParsingError, TypeError) as hpe: # Platform-specific: Python 3
log.warning(
"Failed to parse headers (url=%s): %s",
self._absolute_url(url),
hpe,
exc_info=True,
)
return httplib_response
def _absolute_url(self, path):
return Url(scheme=self.scheme, host=self.host, port=self.port, path=path).url
def close(self):
"""
Close all pooled connections and disable the pool.
"""
if self.pool is None:
return
# Disable access to the pool
old_pool, self.pool = self.pool, None
try:
while True:
conn = old_pool.get(block=False)
if conn:
conn.close()
except queue.Empty:
pass # Done.
def is_same_host(self, url):
"""
Check if the given ``url`` is a member of the same host as this
connection pool.
"""
if url.startswith("/"):
return True
# TODO: Add optional support for socket.gethostbyname checking.
scheme, host, port = get_host(url)
if host is not None:
host = _normalize_host(host, scheme=scheme)
# Use explicit default port for comparison when none is given
if self.port and not port:
port = port_by_scheme.get(scheme)
elif not self.port and port == port_by_scheme.get(scheme):
port = None
return (scheme, host, port) == (self.scheme, self.host, self.port)
def urlopen(
self,
method,
url,
body=None,
headers=None,
retries=None,
redirect=True,
assert_same_host=True,
timeout=_Default,
pool_timeout=None,
release_conn=None,
chunked=False,
body_pos=None,
**response_kw
):
"""
Get a connection from the pool and perform an HTTP request. This is the
lowest level call for making a request, so you'll need to specify all
the raw details.
.. note::
More commonly, it's appropriate to use a convenience method provided
by :class:`.RequestMethods`, such as :meth:`request`.
.. note::
`release_conn` will only behave as expected if
`preload_content=False` because we want to make
`preload_content=False` the default behaviour someday soon without
breaking backwards compatibility.
:param method:
HTTP request method (such as GET, POST, PUT, etc.)
:param body:
Data to send in the request body (useful for creating
POST requests, see HTTPConnectionPool.post_url for
more convenience).
:param headers:
Dictionary of custom headers to send, such as User-Agent,
If-None-Match, etc. If None, pool headers are used. If provided,
these headers completely replace any pool-specific headers.
:param retries:
Configure the number of retries to allow before raising a
:class:`~urllib3.exceptions.MaxRetryError` exception.
Pass ``None`` to retry until you receive a response. Pass a
:class:`~urllib3.util.retry.Retry` object for fine-grained control
over different types of retries.
Pass an integer number to retry connection errors that many times,
but no other types of errors. Pass zero to never retry.
If ``False``, then retries are disabled and any exception is raised
immediately. Also, instead of raising a MaxRetryError on redirects,
the redirect response will be returned.
:type retries: :class:`~urllib3.util.retry.Retry`, False, or an int.
:param redirect:
If True, automatically handle redirects (status codes 301, 302,
303, 307, 308). Each redirect counts as a retry. Disabling retries
will disable redirect, too.
:param assert_same_host:
If ``True``, will make sure that the host of the pool requests is
consistent else will raise HostChangedError. When False, you can
use the pool on an HTTP proxy and request foreign hosts.
:param timeout:
If specified, overrides the default timeout for this one
request. It may be a float (in seconds) or an instance of
:class:`urllib3.util.Timeout`.
:param pool_timeout:
If set and the pool is set to block=True, then this method will
block for ``pool_timeout`` seconds and raise EmptyPoolError if no
connection is available within the time period.
:param release_conn:
If False, then the urlopen call will not release the connection
back into the pool once a response is received (but will release if
you read the entire contents of the response such as when
`preload_content=True`). This is useful if you're not preloading
the response's content immediately. You will need to call
``r.release_conn()`` on the response ``r`` to return the connection
back into the pool. If None, it takes the value of
``response_kw.get('preload_content', True)``.
:param chunked:
If True, urllib3 will send the body using chunked transfer
encoding. Otherwise, urllib3 will send the body using the standard
content-length form. Defaults to False.
:param int body_pos:
Position to seek to in file-like body in the event of a retry or
redirect. Typically this won't need to be set because urllib3 will
auto-populate the value when needed.
:param \\**response_kw:
Additional parameters are passed to
:meth:`urllib3.response.HTTPResponse.from_httplib`
"""
if headers is None:
headers = self.headers
if not isinstance(retries, Retry):
retries = Retry.from_int(retries, redirect=redirect, default=self.retries)
if release_conn is None:
release_conn = response_kw.get("preload_content", True)
# Check host
if assert_same_host and not self.is_same_host(url):
raise HostChangedError(self, url, retries)
# Ensure that the URL we're connecting to is properly encoded
if url.startswith("/"):
url = six.ensure_str(_encode_target(url))
else:
url = six.ensure_str(parse_url(url).url)
conn = None
# Track whether `conn` needs to be released before
# returning/raising/recursing. Update this variable if necessary, and
# leave `release_conn` constant throughout the function. That way, if
# the function recurses, the original value of `release_conn` will be
# passed down into the recursive call, and its value will be respected.
#
# See issue #651 [1] for details.
#
# [1] <https://github.com/urllib3/urllib3/issues/651>
release_this_conn = release_conn
# Merge the proxy headers. Only do this in HTTP. We have to copy the
# headers dict so we can safely change it without those changes being
# reflected in anyone else's copy.
if self.scheme == "http":
headers = headers.copy()
headers.update(self.proxy_headers)
# Must keep the exception bound to a separate variable or else Python 3
# complains about UnboundLocalError.
err = None
# Keep track of whether we cleanly exited the except block. This
# ensures we do proper cleanup in finally.
clean_exit = False
# Rewind body position, if needed. Record current position
# for future rewinds in the event of a redirect/retry.
body_pos = set_file_position(body, body_pos)
try:
# Request a connection from the queue.
timeout_obj = self._get_timeout(timeout)
conn = self._get_conn(timeout=pool_timeout)
conn.timeout = timeout_obj.connect_timeout
is_new_proxy_conn = self.proxy is not None and not getattr(
conn, "sock", None
)
if is_new_proxy_conn:
self._prepare_proxy(conn)
# Make the request on the httplib connection object.
httplib_response = self._make_request(
conn,
method,
url,
timeout=timeout_obj,
body=body,
headers=headers,
chunked=chunked,
)
# If we're going to release the connection in ``finally:``, then
# the response doesn't need to know about the connection. Otherwise
# it will also try to release it and we'll have a double-release
# mess.
response_conn = conn if not release_conn else None
# Pass method to Response for length checking
response_kw["request_method"] = method
# Import httplib's response into our own wrapper object
response = self.ResponseCls.from_httplib(
httplib_response,
pool=self,
connection=response_conn,
retries=retries,
**response_kw
)
# Everything went great!
clean_exit = True
except queue.Empty:
# Timed out by queue.
raise EmptyPoolError(self, "No pool connections are available.")
except (
TimeoutError,
HTTPException,
SocketError,
ProtocolError,
BaseSSLError,
SSLError,
CertificateError,
) as e:
# Discard the connection for these exceptions. It will be
# replaced during the next _get_conn() call.
clean_exit = False
if isinstance(e, (BaseSSLError, CertificateError)):
e = SSLError(e)
elif isinstance(e, (SocketError, NewConnectionError)) and self.proxy:
e = ProxyError("Cannot connect to proxy.", e)
elif isinstance(e, (SocketError, HTTPException)):
e = ProtocolError("Connection aborted.", e)
retries = retries.increment(
method, url, error=e, _pool=self, _stacktrace=sys.exc_info()[2]
)
retries.sleep()
# Keep track of the error for the retry warning.
err = e
finally:
if not clean_exit:
# We hit some kind of exception, handled or otherwise. We need
# to throw the connection away unless explicitly told not to.
# Close the connection, set the variable to None, and make sure
# we put the None back in the pool to avoid leaking it.
conn = conn and conn.close()
release_this_conn = True
if release_this_conn:
# Put the connection back to be reused. If the connection is
# expired then it will be None, which will get replaced with a
# fresh connection during _get_conn.
self._put_conn(conn)
if not conn:
# Try again
log.warning(
"Retrying (%r) after connection broken by '%r': %s", retries, err, url
)
return self.urlopen(
method,
url,
body,
headers,
retries,
redirect,
assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
**response_kw
)
def drain_and_release_conn(response):
try:
# discard any remaining response body, the connection will be
# released back to the pool once the entire response is read
response.read()
except (
TimeoutError,
HTTPException,
SocketError,
ProtocolError,
BaseSSLError,
SSLError,
):
pass
# Handle redirect?
redirect_location = redirect and response.get_redirect_location()
if redirect_location:
if response.status == 303:
method = "GET"
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_redirect:
# Drain and release the connection for this response, since
# we're not returning it to be released manually.
drain_and_release_conn(response)
raise
return response
# drain and return the connection to the pool before recursing
drain_and_release_conn(response)
retries.sleep_for_retry(response)
log.debug("Redirecting %s -> %s", url, redirect_location)
return self.urlopen(
method,
redirect_location,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
**response_kw
)
# Check if we should retry the HTTP response.
has_retry_after = bool(response.getheader("Retry-After"))
if retries.is_retry(method, response.status, has_retry_after):
try:
retries = retries.increment(method, url, response=response, _pool=self)
except MaxRetryError:
if retries.raise_on_status:
# Drain and release the connection for this response, since
# we're not returning it to be released manually.
drain_and_release_conn(response)
raise
return response
# drain and return the connection to the pool before recursing
drain_and_release_conn(response)
retries.sleep(response)
log.debug("Retry: %s", url)
return self.urlopen(
method,
url,
body,
headers,
retries=retries,
redirect=redirect,
assert_same_host=assert_same_host,
timeout=timeout,
pool_timeout=pool_timeout,
release_conn=release_conn,
chunked=chunked,
body_pos=body_pos,
**response_kw
)
return response
class HTTPSConnectionPool(HTTPConnectionPool):
"""
Same as :class:`.HTTPConnectionPool`, but HTTPS.
When Python is compiled with the :mod:`ssl` module, then
:class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates,
instead of :class:`.HTTPSConnection`.
:class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``,
``assert_hostname`` and ``host`` in this order to verify connections.
If ``assert_hostname`` is False, no verification is done.
The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``,
``ca_cert_dir``, ``ssl_version``, ``key_password`` are only used if :mod:`ssl`
is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade
the connection socket into an SSL socket.
"""
scheme = "https"
ConnectionCls = HTTPSConnection
def __init__(
self,
host,
port=None,
strict=False,
timeout=Timeout.DEFAULT_TIMEOUT,
maxsize=1,
block=False,
headers=None,
retries=None,
_proxy=None,
_proxy_headers=None,
key_file=None,
cert_file=None,
cert_reqs=None,
key_password=None,
ca_certs=None,
ssl_version=None,
assert_hostname=None,
assert_fingerprint=None,
ca_cert_dir=None,
**conn_kw
):
HTTPConnectionPool.__init__(
self,
host,
port,
strict,
timeout,
maxsize,
block,
headers,
retries,
_proxy,
_proxy_headers,
**conn_kw
)
self.key_file = key_file
self.cert_file = cert_file
self.cert_reqs = cert_reqs
self.key_password = key_password
self.ca_certs = ca_certs
self.ca_cert_dir = ca_cert_dir
self.ssl_version = ssl_version
self.assert_hostname = assert_hostname
self.assert_fingerprint = assert_fingerprint
def _prepare_conn(self, conn):
"""
Prepare the ``connection`` for :meth:`urllib3.util.ssl_wrap_socket`
and establish the tunnel if proxy is used.
"""
if isinstance(conn, VerifiedHTTPSConnection):
conn.set_cert(
key_file=self.key_file,
key_password=self.key_password,
cert_file=self.cert_file,
cert_reqs=self.cert_reqs,
ca_certs=self.ca_certs,
ca_cert_dir=self.ca_cert_dir,
assert_hostname=self.assert_hostname,
assert_fingerprint=self.assert_fingerprint,
)
conn.ssl_version = self.ssl_version
return conn
def _prepare_proxy(self, conn):
"""
Establish tunnel connection early, because otherwise httplib
would improperly set Host: header to proxy's IP:port.
"""
conn.set_tunnel(self._proxy_host, self.port, self.proxy_headers)
conn.connect()
def _new_conn(self):
"""
Return a fresh :class:`httplib.HTTPSConnection`.
"""
self.num_connections += 1
log.debug(
"Starting new HTTPS connection (%d): %s:%s",
self.num_connections,
self.host,
self.port or "443",
)
if not self.ConnectionCls or self.ConnectionCls is DummyConnection:
raise SSLError(
"Can't connect to HTTPS URL because the SSL module is not available."
)
actual_host = self.host
actual_port = self.port
if self.proxy is not None:
actual_host = self.proxy.host
actual_port = self.proxy.port
conn = self.ConnectionCls(
host=actual_host,
port=actual_port,
timeout=self.timeout.connect_timeout,
strict=self.strict,
cert_file=self.cert_file,
key_file=self.key_file,
key_password=self.key_password,
**self.conn_kw
)
return self._prepare_conn(conn)
def _validate_conn(self, conn):
"""
Called right before a request is made, after the socket is created.
"""
super(HTTPSConnectionPool, self)._validate_conn(conn)
# Force connect early to allow us to validate the connection.
if not getattr(conn, "sock", None): # AppEngine might not have `.sock`
conn.connect()
if not conn.is_verified:
warnings.warn(
(
"Unverified HTTPS request is being made. "
"Adding certificate verification is strongly advised. See: "
"https://urllib3.readthedocs.io/en/latest/advanced-usage.html"
"#ssl-warnings"
),
InsecureRequestWarning,
)
def connection_from_url(url, **kw):
"""
Given a url, return an :class:`.ConnectionPool` instance of its host.
This is a shortcut for not having to parse out the scheme, host, and port
of the url before creating an :class:`.ConnectionPool` instance.
:param url:
Absolute URL string that must include the scheme. Port is optional.
:param \\**kw:
Passes additional parameters to the constructor of the appropriate
:class:`.ConnectionPool`. Useful for specifying things like
timeout, maxsize, headers, etc.
Example::
>>> conn = connection_from_url('http://google.com/')
>>> r = conn.request('GET', '/')
"""
scheme, host, port = get_host(url)
port = port or port_by_scheme.get(scheme, 80)
if scheme == "https":
return HTTPSConnectionPool(host, port=port, **kw)
else:
return HTTPConnectionPool(host, port=port, **kw)
def _normalize_host(host, scheme):
"""
Normalize hosts for comparisons and use with sockets.
"""
host = normalize_host(host, scheme)
# httplib doesn't like it when we include brackets in IPv6 addresses
# Specifically, if we include brackets but also pass the port then
# httplib crazily doubles up the square brackets on the Host header.
# Instead, we need to make sure we never pass ``None`` as the port.
# However, for backward compatibility reasons we can't actually
# *assert* that. See http://bugs.python.org/issue28539
if host.startswith("[") and host.endswith("]"):
host = host[1:-1]
return host
| gpl-3.0 |
MiltosD/CEF-ELRC | lib/python2.7/site-packages/haystack/fields.py | 14 | 12973 | import re
from django.utils import datetime_safe
from django.template import loader, Context
from haystack.exceptions import SearchFieldError
class NOT_PROVIDED:
pass
DATETIME_REGEX = re.compile('^(?P<year>\d{4})-(?P<month>\d{2})-(?P<day>\d{2})(T|\s+)(?P<hour>\d{2}):(?P<minute>\d{2}):(?P<second>\d{2}).*?$')
# All the SearchFields variants.
class SearchField(object):
"""The base implementation of a search field."""
field_type = None
def __init__(self, model_attr=None, use_template=False, template_name=None,
document=False, indexed=True, stored=True, faceted=False,
default=NOT_PROVIDED, null=False, index_fieldname=None,
facet_class=None, boost=1.0, weight=None):
# Track what the index thinks this field is called.
self.instance_name = None
self.model_attr = model_attr
self.use_template = use_template
self.template_name = template_name
self.document = document
self.indexed = indexed
self.stored = stored
self.faceted = faceted
self._default = default
self.null = null
self.index_fieldname = index_fieldname
self.boost = weight or boost
self.is_multivalued = False
# We supply the facet_class for making it easy to create a faceted
# field based off of this field.
self.facet_class = facet_class
if self.facet_class is None:
self.facet_class = FacetCharField
self.set_instance_name(None)
def set_instance_name(self, instance_name):
self.instance_name = instance_name
if self.index_fieldname is None:
self.index_fieldname = self.instance_name
def has_default(self):
"""Returns a boolean of whether this field has a default value."""
return self._default is not NOT_PROVIDED
@property
def default(self):
"""Returns the default value for the field."""
if callable(self._default):
return self._default()
return self._default
def prepare(self, obj):
"""
Takes data from the provided object and prepares it for storage in the
index.
"""
# Give priority to a template.
if self.use_template:
return self.prepare_template(obj)
elif self.model_attr is not None:
# Check for `__` in the field for looking through the relation.
attrs = self.model_attr.split('__')
current_object = obj
for attr in attrs:
if not hasattr(current_object, attr):
raise SearchFieldError("The model '%s' does not have a model_attr '%s'." % (repr(obj), attr))
current_object = getattr(current_object, attr, None)
if current_object is None:
if self.has_default():
current_object = self._default
# Fall out of the loop, given any further attempts at
# accesses will fail misreably.
break
elif self.null:
current_object = None
# Fall out of the loop, given any further attempts at
# accesses will fail misreably.
break
else:
raise SearchFieldError("The model '%s' has an empty model_attr '%s' and doesn't allow a default or null value." % (repr(obj), attr))
if callable(current_object):
return current_object()
return current_object
if self.has_default():
return self.default
else:
return None
def prepare_template(self, obj):
"""
Flattens an object for indexing.
This loads a template
(``search/indexes/{app_label}/{model_name}_{field_name}.txt``) and
returns the result of rendering that template. ``object`` will be in
its context.
"""
if self.instance_name is None and self.template_name is None:
raise SearchFieldError("This field requires either its instance_name variable to be populated or an explicit template_name in order to load the correct template.")
if self.template_name is not None:
template_names = self.template_name
if not isinstance(template_names, (list, tuple)):
template_names = [template_names]
else:
template_names = ['search/indexes/%s/%s_%s.txt' % (obj._meta.app_label, obj._meta.module_name, self.instance_name)]
t = loader.select_template(template_names)
return t.render(Context({'object': obj}))
def convert(self, value):
"""
Handles conversion between the data found and the type of the field.
Extending classes should override this method and provide correct
data coercion.
"""
return value
class CharField(SearchField):
field_type = 'string'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetCharField
super(CharField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(CharField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return unicode(value)
class LocationField(SearchField):
field_type = 'location'
def prepare(self, obj):
from haystack.utils.geo import ensure_point
value = super(LocationField, self).prepare(obj)
if value is None:
return None
pnt = ensure_point(value)
pnt_lng, pnt_lat = pnt.get_coords()
return "%s,%s" % (pnt_lat, pnt_lng)
def convert(self, value):
from haystack.utils.geo import ensure_point, Point
if value is None:
return None
if hasattr(value, 'geom_type'):
value = ensure_point(value)
return value
if isinstance(value, basestring):
lat, lng = value.split(',')
elif isinstance(value, (list, tuple)):
# GeoJSON-alike
lat, lng = value[1], value[0]
elif isinstance(value, dict):
lat = value.get('lat', 0)
lng = value.get('lon', 0)
value = Point(float(lng), float(lat))
return value
class NgramField(CharField):
field_type = 'ngram'
def __init__(self, **kwargs):
if kwargs.get('faceted') is True:
raise SearchFieldError("%s can not be faceted." % self.__class__.__name__)
super(NgramField, self).__init__(**kwargs)
class EdgeNgramField(NgramField):
field_type = 'edge_ngram'
class IntegerField(SearchField):
field_type = 'integer'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetIntegerField
super(IntegerField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(IntegerField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return int(value)
class FloatField(SearchField):
field_type = 'float'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetFloatField
super(FloatField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(FloatField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return float(value)
class DecimalField(SearchField):
field_type = 'string'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetDecimalField
super(DecimalField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(DecimalField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return unicode(value)
class BooleanField(SearchField):
field_type = 'boolean'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetBooleanField
super(BooleanField, self).__init__(**kwargs)
def prepare(self, obj):
return self.convert(super(BooleanField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return bool(value)
class DateField(SearchField):
field_type = 'date'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetDateField
super(DateField, self).__init__(**kwargs)
def convert(self, value):
if value is None:
return None
if isinstance(value, basestring):
match = DATETIME_REGEX.search(value)
if match:
data = match.groupdict()
return datetime_safe.date(int(data['year']), int(data['month']), int(data['day']))
else:
raise SearchFieldError("Date provided to '%s' field doesn't appear to be a valid date string: '%s'" % (self.instance_name, value))
return value
class DateTimeField(SearchField):
field_type = 'datetime'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetDateTimeField
super(DateTimeField, self).__init__(**kwargs)
def convert(self, value):
if value is None:
return None
if isinstance(value, basestring):
match = DATETIME_REGEX.search(value)
if match:
data = match.groupdict()
return datetime_safe.datetime(int(data['year']), int(data['month']), int(data['day']), int(data['hour']), int(data['minute']), int(data['second']))
else:
raise SearchFieldError("Datetime provided to '%s' field doesn't appear to be a valid datetime string: '%s'" % (self.instance_name, value))
return value
class MultiValueField(SearchField):
field_type = 'string'
def __init__(self, **kwargs):
if kwargs.get('facet_class') is None:
kwargs['facet_class'] = FacetMultiValueField
if kwargs.get('use_template') is True:
raise SearchFieldError("'%s' fields can not use templates to prepare their data." % self.__class__.__name__)
super(MultiValueField, self).__init__(**kwargs)
self.is_multivalued = True
def prepare(self, obj):
return self.convert(super(MultiValueField, self).prepare(obj))
def convert(self, value):
if value is None:
return None
return list(value)
class FacetField(SearchField):
"""
``FacetField`` is slightly different than the other fields because it can
work in conjunction with other fields as its data source.
Accepts an optional ``facet_for`` kwarg, which should be the field name
(not ``index_fieldname``) of the field it should pull data from.
"""
instance_name = None
def __init__(self, **kwargs):
handled_kwargs = self.handle_facet_parameters(kwargs)
super(FacetField, self).__init__(**handled_kwargs)
def handle_facet_parameters(self, kwargs):
if kwargs.get('faceted', False):
raise SearchFieldError("FacetField (%s) does not accept the 'faceted' argument." % self.instance_name)
if not kwargs.get('null', True):
raise SearchFieldError("FacetField (%s) does not accept False for the 'null' argument." % self.instance_name)
if not kwargs.get('indexed', True):
raise SearchFieldError("FacetField (%s) does not accept False for the 'indexed' argument." % self.instance_name)
if kwargs.get('facet_class'):
raise SearchFieldError("FacetField (%s) does not accept the 'facet_class' argument." % self.instance_name)
self.facet_for = None
self.facet_class = None
# Make sure the field is nullable.
kwargs['null'] = True
if 'facet_for' in kwargs:
self.facet_for = kwargs['facet_for']
del(kwargs['facet_for'])
return kwargs
def get_facet_for_name(self):
return self.facet_for or self.instance_name
class FacetCharField(FacetField, CharField):
pass
class FacetIntegerField(FacetField, IntegerField):
pass
class FacetFloatField(FacetField, FloatField):
pass
class FacetDecimalField(FacetField, DecimalField):
pass
class FacetBooleanField(FacetField, BooleanField):
pass
class FacetDateField(FacetField, DateField):
pass
class FacetDateTimeField(FacetField, DateTimeField):
pass
class FacetMultiValueField(FacetField, MultiValueField):
pass
| bsd-3-clause |
akashsinghal/Speech-Memorization-App | Python_Backend/lib/requests/_internal_utils.py | 414 | 1096 | # -*- coding: utf-8 -*-
"""
requests._internal_utils
~~~~~~~~~~~~~~
Provides utility functions that are consumed internally by Requests
which depend on extremely few external helpers (such as compat)
"""
from .compat import is_py2, builtin_str, str
def to_native_string(string, encoding='ascii'):
"""Given a string object, regardless of type, returns a representation of
that string in the native string type, encoding and decoding where
necessary. This assumes ASCII unless told otherwise.
"""
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out
def unicode_is_ascii(u_string):
"""Determine if unicode string only contains ASCII characters.
:param str u_string: unicode string to check. Must be unicode
and not Python 2 `str`.
:rtype: bool
"""
assert isinstance(u_string, str)
try:
u_string.encode('ascii')
return True
except UnicodeEncodeError:
return False
| apache-2.0 |
Distrotech/bzr | bzrlib/gpg.py | 2 | 21179 | # Copyright (C) 2005, 2011 Canonical Ltd
# Authors: Robert Collins <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""GPG signing and checking logic."""
from __future__ import absolute_import
import os
import sys
from StringIO import StringIO
from bzrlib.lazy_import import lazy_import
lazy_import(globals(), """
import errno
import subprocess
from bzrlib import (
config,
errors,
trace,
ui,
)
from bzrlib.i18n import (
gettext,
ngettext,
)
""")
from bzrlib.symbol_versioning import (
deprecated_in,
deprecated_method,
)
#verification results
SIGNATURE_VALID = 0
SIGNATURE_KEY_MISSING = 1
SIGNATURE_NOT_VALID = 2
SIGNATURE_NOT_SIGNED = 3
SIGNATURE_EXPIRED = 4
def bulk_verify_signatures(repository, revids, strategy,
process_events_callback=None):
"""Do verifications on a set of revisions
:param repository: repository object
:param revids: list of revision ids to verify
:param strategy: GPG strategy to use
:param process_events_callback: method to call for GUI frontends that
want to keep their UI refreshed
:return: count dictionary of results of each type,
result list for each revision,
boolean True if all results are verified successfully
"""
count = {SIGNATURE_VALID: 0,
SIGNATURE_KEY_MISSING: 0,
SIGNATURE_NOT_VALID: 0,
SIGNATURE_NOT_SIGNED: 0,
SIGNATURE_EXPIRED: 0}
result = []
all_verifiable = True
total = len(revids)
pb = ui.ui_factory.nested_progress_bar()
try:
for i, (rev_id, verification_result, uid) in enumerate(
repository.verify_revision_signatures(
revids, strategy)):
pb.update("verifying signatures", i, total)
result.append([rev_id, verification_result, uid])
count[verification_result] += 1
if verification_result != SIGNATURE_VALID:
all_verifiable = False
if process_events_callback is not None:
process_events_callback()
finally:
pb.finished()
return (count, result, all_verifiable)
class DisabledGPGStrategy(object):
"""A GPG Strategy that makes everything fail."""
@staticmethod
def verify_signatures_available():
return True
def __init__(self, ignored):
"""Real strategies take a configuration."""
def sign(self, content):
raise errors.SigningFailed('Signing is disabled.')
def verify(self, content, testament):
raise errors.SignatureVerificationFailed('Signature verification is \
disabled.')
def set_acceptable_keys(self, command_line_input):
pass
class LoopbackGPGStrategy(object):
"""A GPG Strategy that acts like 'cat' - data is just passed through.
Used in tests.
"""
@staticmethod
def verify_signatures_available():
return True
def __init__(self, ignored):
"""Real strategies take a configuration."""
def sign(self, content):
return ("-----BEGIN PSEUDO-SIGNED CONTENT-----\n" + content +
"-----END PSEUDO-SIGNED CONTENT-----\n")
def verify(self, content, testament):
return SIGNATURE_VALID, None
def set_acceptable_keys(self, command_line_input):
if command_line_input is not None:
patterns = command_line_input.split(",")
self.acceptable_keys = []
for pattern in patterns:
if pattern == "unknown":
pass
else:
self.acceptable_keys.append(pattern)
@deprecated_method(deprecated_in((2, 6, 0)))
def do_verifications(self, revisions, repository):
return bulk_verify_signatures(repository, revisions, self)
@deprecated_method(deprecated_in((2, 6, 0)))
def valid_commits_message(self, count):
return valid_commits_message(count)
@deprecated_method(deprecated_in((2, 6, 0)))
def unknown_key_message(self, count):
return unknown_key_message(count)
@deprecated_method(deprecated_in((2, 6, 0)))
def commit_not_valid_message(self, count):
return commit_not_valid_message(count)
@deprecated_method(deprecated_in((2, 6, 0)))
def commit_not_signed_message(self, count):
return commit_not_signed_message(count)
@deprecated_method(deprecated_in((2, 6, 0)))
def expired_commit_message(self, count):
return expired_commit_message(count)
def _set_gpg_tty():
tty = os.environ.get('TTY')
if tty is not None:
os.environ['GPG_TTY'] = tty
trace.mutter('setting GPG_TTY=%s', tty)
else:
# This is not quite worthy of a warning, because some people
# don't need GPG_TTY to be set. But it is worthy of a big mark
# in ~/.bzr.log, so that people can debug it if it happens to them
trace.mutter('** Env var TTY empty, cannot set GPG_TTY.'
' Is TTY exported?')
class GPGStrategy(object):
"""GPG Signing and checking facilities."""
acceptable_keys = None
def __init__(self, config_stack):
self._config_stack = config_stack
try:
import gpgme
self.context = gpgme.Context()
except ImportError, error:
pass # can't use verify()
@staticmethod
def verify_signatures_available():
"""
check if this strategy can verify signatures
:return: boolean if this strategy can verify signatures
"""
try:
import gpgme
return True
except ImportError, error:
return False
def _command_line(self):
key = self._config_stack.get('gpg_signing_key')
if key is None or key == 'default':
# 'default' or not setting gpg_signing_key at all means we should
# use the user email address
key = config.extract_email_address(self._config_stack.get('email'))
return [self._config_stack.get('gpg_signing_command'), '--clearsign',
'-u', key]
def sign(self, content):
if isinstance(content, unicode):
raise errors.BzrBadParameterUnicode('content')
ui.ui_factory.clear_term()
preexec_fn = _set_gpg_tty
if sys.platform == 'win32':
# Win32 doesn't support preexec_fn, but wouldn't support TTY anyway.
preexec_fn = None
try:
process = subprocess.Popen(self._command_line(),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
preexec_fn=preexec_fn)
try:
result = process.communicate(content)[0]
if process.returncode is None:
process.wait()
if process.returncode != 0:
raise errors.SigningFailed(self._command_line())
return result
except OSError, e:
if e.errno == errno.EPIPE:
raise errors.SigningFailed(self._command_line())
else:
raise
except ValueError:
# bad subprocess parameters, should never happen.
raise
except OSError, e:
if e.errno == errno.ENOENT:
# gpg is not installed
raise errors.SigningFailed(self._command_line())
else:
raise
def verify(self, content, testament):
"""Check content has a valid signature.
:param content: the commit signature
:param testament: the valid testament string for the commit
:return: SIGNATURE_VALID or a failed SIGNATURE_ value, key uid if valid
"""
try:
import gpgme
except ImportError, error:
raise errors.GpgmeNotInstalled(error)
signature = StringIO(content)
plain_output = StringIO()
try:
result = self.context.verify(signature, None, plain_output)
except gpgme.GpgmeError,error:
raise errors.SignatureVerificationFailed(error[2])
# No result if input is invalid.
# test_verify_invalid()
if len(result) == 0:
return SIGNATURE_NOT_VALID, None
# User has specified a list of acceptable keys, check our result is in
# it. test_verify_unacceptable_key()
fingerprint = result[0].fpr
if self.acceptable_keys is not None:
if not fingerprint in self.acceptable_keys:
return SIGNATURE_KEY_MISSING, fingerprint[-8:]
# Check the signature actually matches the testament.
# test_verify_bad_testament()
if testament != plain_output.getvalue():
return SIGNATURE_NOT_VALID, None
# Yay gpgme set the valid bit.
# Can't write a test for this one as you can't set a key to be
# trusted using gpgme.
if result[0].summary & gpgme.SIGSUM_VALID:
key = self.context.get_key(fingerprint)
name = key.uids[0].name
email = key.uids[0].email
return SIGNATURE_VALID, name + " <" + email + ">"
# Sigsum_red indicates a problem, unfortunatly I have not been able
# to write any tests which actually set this.
if result[0].summary & gpgme.SIGSUM_RED:
return SIGNATURE_NOT_VALID, None
# GPG does not know this key.
# test_verify_unknown_key()
if result[0].summary & gpgme.SIGSUM_KEY_MISSING:
return SIGNATURE_KEY_MISSING, fingerprint[-8:]
# Summary isn't set if sig is valid but key is untrusted but if user
# has explicity set the key as acceptable we can validate it.
if result[0].summary == 0 and self.acceptable_keys is not None:
if fingerprint in self.acceptable_keys:
# test_verify_untrusted_but_accepted()
return SIGNATURE_VALID, None
# test_verify_valid_but_untrusted()
if result[0].summary == 0 and self.acceptable_keys is None:
return SIGNATURE_NOT_VALID, None
if result[0].summary & gpgme.SIGSUM_KEY_EXPIRED:
expires = self.context.get_key(result[0].fpr).subkeys[0].expires
if expires > result[0].timestamp:
# The expired key was not expired at time of signing.
# test_verify_expired_but_valid()
return SIGNATURE_EXPIRED, fingerprint[-8:]
else:
# I can't work out how to create a test where the signature
# was expired at the time of signing.
return SIGNATURE_NOT_VALID, None
# A signature from a revoked key gets this.
# test_verify_revoked_signature()
if result[0].summary & gpgme.SIGSUM_SYS_ERROR:
return SIGNATURE_NOT_VALID, None
# Other error types such as revoked keys should (I think) be caught by
# SIGSUM_RED so anything else means something is buggy.
raise errors.SignatureVerificationFailed("Unknown GnuPG key "\
"verification result")
def set_acceptable_keys(self, command_line_input):
"""Set the acceptable keys for verifying with this GPGStrategy.
:param command_line_input: comma separated list of patterns from
command line
:return: nothing
"""
key_patterns = None
acceptable_keys_config = self._config_stack.get('acceptable_keys')
try:
if isinstance(acceptable_keys_config, unicode):
acceptable_keys_config = str(acceptable_keys_config)
except UnicodeEncodeError:
# gpg Context.keylist(pattern) does not like unicode
raise errors.BzrCommandError(
gettext('Only ASCII permitted in option names'))
if acceptable_keys_config is not None:
key_patterns = acceptable_keys_config
if command_line_input is not None: # command line overrides config
key_patterns = command_line_input
if key_patterns is not None:
patterns = key_patterns.split(",")
self.acceptable_keys = []
for pattern in patterns:
result = self.context.keylist(pattern)
found_key = False
for key in result:
found_key = True
self.acceptable_keys.append(key.subkeys[0].fpr)
trace.mutter("Added acceptable key: " + key.subkeys[0].fpr)
if not found_key:
trace.note(gettext(
"No GnuPG key results for pattern: {0}"
).format(pattern))
@deprecated_method(deprecated_in((2, 6, 0)))
def do_verifications(self, revisions, repository,
process_events_callback=None):
"""do verifications on a set of revisions
:param revisions: list of revision ids to verify
:param repository: repository object
:param process_events_callback: method to call for GUI frontends that
want to keep their UI refreshed
:return: count dictionary of results of each type,
result list for each revision,
boolean True if all results are verified successfully
"""
return bulk_verify_signatures(repository, revisions, self,
process_events_callback)
@deprecated_method(deprecated_in((2, 6, 0)))
def verbose_valid_message(self, result):
"""takes a verify result and returns list of signed commits strings"""
return verbose_valid_message(result)
@deprecated_method(deprecated_in((2, 6, 0)))
def verbose_not_valid_message(self, result, repo):
"""takes a verify result and returns list of not valid commit info"""
return verbose_not_valid_message(result, repo)
@deprecated_method(deprecated_in((2, 6, 0)))
def verbose_not_signed_message(self, result, repo):
"""takes a verify result and returns list of not signed commit info"""
return verbose_not_valid_message(result, repo)
@deprecated_method(deprecated_in((2, 6, 0)))
def verbose_missing_key_message(self, result):
"""takes a verify result and returns list of missing key info"""
return verbose_missing_key_message(result)
@deprecated_method(deprecated_in((2, 6, 0)))
def verbose_expired_key_message(self, result, repo):
"""takes a verify result and returns list of expired key info"""
return verbose_expired_key_message(result, repo)
@deprecated_method(deprecated_in((2, 6, 0)))
def valid_commits_message(self, count):
"""returns message for number of commits"""
return valid_commits_message(count)
@deprecated_method(deprecated_in((2, 6, 0)))
def unknown_key_message(self, count):
"""returns message for number of commits"""
return unknown_key_message(count)
@deprecated_method(deprecated_in((2, 6, 0)))
def commit_not_valid_message(self, count):
"""returns message for number of commits"""
return commit_not_valid_message(count)
@deprecated_method(deprecated_in((2, 6, 0)))
def commit_not_signed_message(self, count):
"""returns message for number of commits"""
return commit_not_signed_message(count)
@deprecated_method(deprecated_in((2, 6, 0)))
def expired_commit_message(self, count):
"""returns message for number of commits"""
return expired_commit_message(count)
def valid_commits_message(count):
"""returns message for number of commits"""
return gettext(u"{0} commits with valid signatures").format(
count[SIGNATURE_VALID])
def unknown_key_message(count):
"""returns message for number of commits"""
return ngettext(u"{0} commit with unknown key",
u"{0} commits with unknown keys",
count[SIGNATURE_KEY_MISSING]).format(
count[SIGNATURE_KEY_MISSING])
def commit_not_valid_message(count):
"""returns message for number of commits"""
return ngettext(u"{0} commit not valid",
u"{0} commits not valid",
count[SIGNATURE_NOT_VALID]).format(
count[SIGNATURE_NOT_VALID])
def commit_not_signed_message(count):
"""returns message for number of commits"""
return ngettext(u"{0} commit not signed",
u"{0} commits not signed",
count[SIGNATURE_NOT_SIGNED]).format(
count[SIGNATURE_NOT_SIGNED])
def expired_commit_message(count):
"""returns message for number of commits"""
return ngettext(u"{0} commit with key now expired",
u"{0} commits with key now expired",
count[SIGNATURE_EXPIRED]).format(
count[SIGNATURE_EXPIRED])
def verbose_expired_key_message(result, repo):
"""takes a verify result and returns list of expired key info"""
signers = {}
fingerprint_to_authors = {}
for rev_id, validity, fingerprint in result:
if validity == SIGNATURE_EXPIRED:
revision = repo.get_revision(rev_id)
authors = ', '.join(revision.get_apparent_authors())
signers.setdefault(fingerprint, 0)
signers[fingerprint] += 1
fingerprint_to_authors[fingerprint] = authors
result = []
for fingerprint, number in signers.items():
result.append(
ngettext(u"{0} commit by author {1} with key {2} now expired",
u"{0} commits by author {1} with key {2} now expired",
number).format(
number, fingerprint_to_authors[fingerprint], fingerprint))
return result
def verbose_valid_message(result):
"""takes a verify result and returns list of signed commits strings"""
signers = {}
for rev_id, validity, uid in result:
if validity == SIGNATURE_VALID:
signers.setdefault(uid, 0)
signers[uid] += 1
result = []
for uid, number in signers.items():
result.append(ngettext(u"{0} signed {1} commit",
u"{0} signed {1} commits",
number).format(uid, number))
return result
def verbose_not_valid_message(result, repo):
"""takes a verify result and returns list of not valid commit info"""
signers = {}
for rev_id, validity, empty in result:
if validity == SIGNATURE_NOT_VALID:
revision = repo.get_revision(rev_id)
authors = ', '.join(revision.get_apparent_authors())
signers.setdefault(authors, 0)
signers[authors] += 1
result = []
for authors, number in signers.items():
result.append(ngettext(u"{0} commit by author {1}",
u"{0} commits by author {1}",
number).format(number, authors))
return result
def verbose_not_signed_message(result, repo):
"""takes a verify result and returns list of not signed commit info"""
signers = {}
for rev_id, validity, empty in result:
if validity == SIGNATURE_NOT_SIGNED:
revision = repo.get_revision(rev_id)
authors = ', '.join(revision.get_apparent_authors())
signers.setdefault(authors, 0)
signers[authors] += 1
result = []
for authors, number in signers.items():
result.append(ngettext(u"{0} commit by author {1}",
u"{0} commits by author {1}",
number).format(number, authors))
return result
def verbose_missing_key_message(result):
"""takes a verify result and returns list of missing key info"""
signers = {}
for rev_id, validity, fingerprint in result:
if validity == SIGNATURE_KEY_MISSING:
signers.setdefault(fingerprint, 0)
signers[fingerprint] += 1
result = []
for fingerprint, number in signers.items():
result.append(ngettext(u"Unknown key {0} signed {1} commit",
u"Unknown key {0} signed {1} commits",
number).format(fingerprint, number))
return result
| gpl-2.0 |
ticosax/django | django/contrib/gis/geos/polygon.py | 450 | 6843 | from ctypes import byref, c_uint
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.geometry import GEOSGeometry
from django.contrib.gis.geos.libgeos import GEOM_PTR, get_pointer_arr
from django.contrib.gis.geos.linestring import LinearRing
from django.utils import six
from django.utils.six.moves import range
class Polygon(GEOSGeometry):
_minlength = 1
def __init__(self, *args, **kwargs):
"""
Initializes on an exterior ring and a sequence of holes (both
instances may be either LinearRing instances, or a tuple/list
that may be constructed into a LinearRing).
Examples of initialization, where shell, hole1, and hole2 are
valid LinearRing geometries:
>>> from django.contrib.gis.geos import LinearRing, Polygon
>>> shell = hole1 = hole2 = LinearRing()
>>> poly = Polygon(shell, hole1, hole2)
>>> poly = Polygon(shell, (hole1, hole2))
>>> # Example where a tuple parameters are used:
>>> poly = Polygon(((0, 0), (0, 10), (10, 10), (0, 10), (0, 0)),
... ((4, 4), (4, 6), (6, 6), (6, 4), (4, 4)))
"""
if not args:
raise TypeError('Must provide at least one LinearRing, or a tuple, to initialize a Polygon.')
# Getting the ext_ring and init_holes parameters from the argument list
ext_ring = args[0]
init_holes = args[1:]
n_holes = len(init_holes)
# If initialized as Polygon(shell, (LinearRing, LinearRing)) [for backward-compatibility]
if n_holes == 1 and isinstance(init_holes[0], (tuple, list)):
if len(init_holes[0]) == 0:
init_holes = ()
n_holes = 0
elif isinstance(init_holes[0][0], LinearRing):
init_holes = init_holes[0]
n_holes = len(init_holes)
polygon = self._create_polygon(n_holes + 1, (ext_ring,) + init_holes)
super(Polygon, self).__init__(polygon, **kwargs)
def __iter__(self):
"Iterates over each ring in the polygon."
for i in range(len(self)):
yield self[i]
def __len__(self):
"Returns the number of rings in this Polygon."
return self.num_interior_rings + 1
@classmethod
def from_bbox(cls, bbox):
"Constructs a Polygon from a bounding box (4-tuple)."
x0, y0, x1, y1 = bbox
for z in bbox:
if not isinstance(z, six.integer_types + (float,)):
return GEOSGeometry('POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))' %
(x0, y0, x0, y1, x1, y1, x1, y0, x0, y0))
return Polygon(((x0, y0), (x0, y1), (x1, y1), (x1, y0), (x0, y0)))
# ### These routines are needed for list-like operation w/ListMixin ###
def _create_polygon(self, length, items):
# Instantiate LinearRing objects if necessary, but don't clone them yet
# _construct_ring will throw a TypeError if a parameter isn't a valid ring
# If we cloned the pointers here, we wouldn't be able to clean up
# in case of error.
rings = []
for r in items:
if isinstance(r, GEOM_PTR):
rings.append(r)
else:
rings.append(self._construct_ring(r))
shell = self._clone(rings.pop(0))
n_holes = length - 1
if n_holes:
holes = get_pointer_arr(n_holes)
for i, r in enumerate(rings):
holes[i] = self._clone(r)
holes_param = byref(holes)
else:
holes_param = None
return capi.create_polygon(shell, holes_param, c_uint(n_holes))
def _clone(self, g):
if isinstance(g, GEOM_PTR):
return capi.geom_clone(g)
else:
return capi.geom_clone(g.ptr)
def _construct_ring(self, param, msg=(
'Parameter must be a sequence of LinearRings or objects that can initialize to LinearRings')):
"Helper routine for trying to construct a ring from the given parameter."
if isinstance(param, LinearRing):
return param
try:
ring = LinearRing(param)
return ring
except TypeError:
raise TypeError(msg)
def _set_list(self, length, items):
# Getting the current pointer, replacing with the newly constructed
# geometry, and destroying the old geometry.
prev_ptr = self.ptr
srid = self.srid
self.ptr = self._create_polygon(length, items)
if srid:
self.srid = srid
capi.destroy_geom(prev_ptr)
def _get_single_internal(self, index):
"""
Returns the ring at the specified index. The first index, 0, will
always return the exterior ring. Indices > 0 will return the
interior ring at the given index (e.g., poly[1] and poly[2] would
return the first and second interior ring, respectively).
CAREFUL: Internal/External are not the same as Interior/Exterior!
_get_single_internal returns a pointer from the existing geometries for use
internally by the object's methods. _get_single_external returns a clone
of the same geometry for use by external code.
"""
if index == 0:
return capi.get_extring(self.ptr)
else:
# Getting the interior ring, have to subtract 1 from the index.
return capi.get_intring(self.ptr, index - 1)
def _get_single_external(self, index):
return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid)
_set_single = GEOSGeometry._set_single_rebuild
_assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild
# #### Polygon Properties ####
@property
def num_interior_rings(self):
"Returns the number of interior rings."
# Getting the number of rings
return capi.get_nrings(self.ptr)
def _get_ext_ring(self):
"Gets the exterior ring of the Polygon."
return self[0]
def _set_ext_ring(self, ring):
"Sets the exterior ring of the Polygon."
self[0] = ring
# Properties for the exterior ring/shell.
exterior_ring = property(_get_ext_ring, _set_ext_ring)
shell = exterior_ring
@property
def tuple(self):
"Gets the tuple for each ring in this Polygon."
return tuple(self[i].tuple for i in range(len(self)))
coords = tuple
@property
def kml(self):
"Returns the KML representation of this Polygon."
inner_kml = ''.join("<innerBoundaryIs>%s</innerBoundaryIs>" % self[i + 1].kml
for i in range(self.num_interior_rings))
return "<Polygon><outerBoundaryIs>%s</outerBoundaryIs>%s</Polygon>" % (self[0].kml, inner_kml)
| bsd-3-clause |
0x0all/nupic | py/regions/ImageSensorFilters/LogPolar.py | 17 | 9875 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
This file defines the LogPolar filter, an ImageSensor filter that distorts
incoming images in a "fish-eye" manner.
"""
from PIL import Image
import numpy
from nupic.regions.ImageSensorFilters.BaseFilter import BaseFilter
class LogPolar(BaseFilter):
"""
Apply a LogPolar transformation to the original image
"""
def __init__(self, xsize, ysize, c, preserveCenterResolution=False, Debug=False):
"""
Initializes the kernel matrices, a one-time cost, which are then applied to
each image.
@param xsize -- The x-dimension size of the desired output
@param ysize -- The y-dimension size of the desired output
@param c -- Paramaterizes how much the image bends (ie. how steep the
fish-eye. c=0 is no distortion. c=3 is severe.
@param preserveCenterResolution -- if True, the resolution of the center of the
image will be preserved and the edges will be sub-sampled.
If False, the center pixels will be blown up to keep the
outside corners at the original resolution.
@param Debug -- Determines whether to compute and save some intermediate
data structures for debugging (deltaxmat, deltaymat, scales).
"""
BaseFilter.__init__(self)
# Init params
self._lastOutputImage = None
self._xsize = xsize
self._ysize = ysize
self._c = c
self._pcr = preserveCenterResolution
self._debug = Debug
self._kernelx = None
self._kernely = None
self._kernel = None
self._imgSize = (-1,-1)
def process(self, image):
"""
Perform LogPolar filtering on the input image and return the response
@param image -- The image to process
"""
#image.save("logPolarDebugInput.png")
BaseFilter.process(self, image)
#image.save("logPolarDebugPostBase.png")
out = self._applyKernel(image, 0)
outImg = Image.fromarray(out.astype(numpy.int8))
#outImg.save("logPolarDebug.png")
maskOut = self._applyKernel(image, 1)
maskOutImg = Image.fromarray(maskOut.astype(numpy.int8))
outImg.putalpha(maskOutImg)
#outImg.save("logPolarDebugMask.png")
self._lastOutputImage = outImg
return outImg
def _applyKernel(self, img, channel=0, Mirror=False):
"""
The "guts" of the filter. Takes an input PIL image and returns a numpy
array containing the output image data
@param img -- The image to process
@param channel -- Which part of the image to process
0: The image
1: The mask
@param Mirror -- If the image is smaller than the output, whether to mirror
the image (or to fill with zeros)
"""
# Create the kernel if we haven't done so already
if self._kernelx is None:
self._makeKernel(self._xsize, self._ysize, self._c, Debug=self._debug, Save=False)
# Get the input image into a flattened array
data = numpy.array(img.split()[channel].getdata())
# Add a sentinel pixel at the end which is set to the background color
data = numpy.resize(data, data.size+1)
data[-1] = self.background
# See if we need to re-compute our clipped, flattened kernel, which depends on the
# image size
if img.size != self._imgSize:
# Convert our kernel matrix center to the center of the input image, and mark indicies
# that are outside the bounds of the input image with a sentinel
sentinel = -1 * img.size[0] * img.size[1]
kxBig = self._ceilFoor(self._kernelx, img.size[1], None, Mirror).astype('int')
kyBig = self._ceilFoor(self._kernely, img.size[0], None, Mirror).astype('int')
# When preserving the resolution at the edges, we make the output image size the
# same as the input image size. So, when the input image size is smaller than our
# kernel, we have to clip the outside edges of our kernel
if not self._pcr:
kx = self._cropMatCenter(kxBig, (img.size[1],img.size[0]))
ky = self._cropMatCenter(kyBig, (img.size[1],img.size[0]))
matSize = (img.size[1],img.size[0])
else:
kx = kxBig
ky = kyBig
matSize = (self._ysize, self._xsize)
# Convert our kernel to indices into the flattened array of the input image.
kernel = (kx + ky*img.size[0]).flatten()
# Convert all negative indices (sentinels) to reference the last element of the data
kernel[kernel < 0] = -1
self._kernel = kernel
self._imgSize = img.size
self._matSize = matSize
# Map the output from the kernel
output = data[self._kernel].reshape(self._matSize)
return output
def _ceilFoor(self, mat, width, sentinel, Mirror=False):
"""
Center our kernel matrix around the center of the given input image and ensure that
the kernel matrix does not try to access pixels outside the input data array.
"""
out = mat.copy()
# Re-center around the image center
maxIdx = width-1
out += maxIdx / 2.0
# Mark the indices that go outside the source image with a sentinel, we will use these as
# indicators to plug-in the background pixel value
if Mirror:
out[out < 0] = -out[out < 0]
out[out > maxIdx] = 2 * maxIdx-out[out > maxIdx]
else:
if sentinel is not None:
out[out < 0] = sentinel
out[out > maxIdx] = sentinel
else:
out[out < 0] = 0
out[out > maxIdx] = maxIdx
return out
def _cropMatCenter(self, mat, outSize):
"""
Crops mat to be outSize, maintaining the original center.
"""
(xsize, ysize) = outSize
if mat.shape[0] < xsize or mat.shape[1] < ysize:
raise ValueError("Mat shape %s must be >= (xsize=%i,ysize=%i)" %(str(mat.shape), xsize,ysize))
mcenterx = mat.shape[0]/2.
mcentery = mat.shape[1]/2.
x0 = int(mcenterx - xsize/2.)
y0 = int(mcentery - ysize/2.)
return mat[x0:x0+xsize, y0:y0+ysize]
def _makeKernel(self, xsize, ysize, c, Debug=False, Save=True):
"""
Make the original kernel matrices, of size (xsize,ysize) and with bending
parameter c. Debug determines whether to compute and store data structures
useful for debugging (deltaxmat, deltaymat, scales). Save determines whether
to save the kernel matrices to disk, eg. to be loaded later instead of
recomputed.
"""
# Numeric errors if c is exactly zero:
if c == 0:
c = 0.001
centerx = (xsize-1)/2.;
centery = (ysize-1)/2.;
self._kernelx = numpy.zeros((ysize,xsize))
self._kernely = numpy.zeros((ysize,xsize))
if Debug:
self._deltaxmat = numpy.zeros((ysize,xsize))
self._deltaymat = numpy.zeros((ysize,xsize))
self._scales = numpy.zeros((ysize,xsize))
hypotmax = numpy.sqrt(numpy.power(xsize-1-centerx,2) + \
numpy.power(ysize-1-centery,2))
k = 1 / (numpy.exp(c) - 1)
# Are we preserving the center resolution? If so, compute the factor required
# to give a scale of 1 to the center pixels
if self._pcr:
scaleInCenter = k * (numpy.exp(c*1.0/hypotmax)-1) / (1.0/hypotmax)
scaleFactor = 1.0/scaleInCenter
else:
scaleFactor = 1.0
for row in range(ysize):
for col in range(xsize):
if (col != centerx) or (row != centery):
deltax = col-centerx
deltay = row-centery
hypot = numpy.sqrt(numpy.power(deltax,2) + numpy.power(deltay,2))
scale = scaleFactor * k * (numpy.exp(c*hypot/hypotmax)-1) / (hypot/hypotmax)
# scale = numpy.power(hypot/centerx, 1.1) / hypot
self._kernelx[row,col] = scale*deltax
self._kernely[row,col] = scale*deltay
if Debug:
self._deltaxmat[row,col] = deltax
self._deltaymat[row,col] = deltay
self._scales[row,col] = scale
# Compute the optimim input image size so that the output image fills the self._xsize,
# self._ysize destination image
if self._pcr:
optSrcWidth = self._kernelx[centery][-1] * 2
optSrcHeight = self._kernely[-1][centerx] * 2
print "LogPolar Filter: Optimum input image size for this value of c (%f)" % (c), \
"is %d x %d (width x height)" % (optSrcWidth, optSrcHeight)
if Save:
import cPickle
f = open('kernelx%ix%ic%.2f.dat' %(xsize,ysize,c),'w')
cPickle.dump(self._kernelx, f)
f.close()
f = open('kernely%ix%ic%.2f.dat' %(xsize,ysize,c),'w')
cPickle.dump(self._kernely, f)
f.close()
if Debug:
f = open('deltax%ix%i.dat' %(xsize,ysize),'w')
cPickle.dump(self._deltaxmat, f)
f.close()
f = open('deltay%ix%i.dat' %(xsize,ysize),'w')
cPickle.dump(self._deltaymat, f)
f.close()
f = open('scales%ix%ic%.2f.dat' %(xsize,ysize,c),'w')
cPickle.dump(self._scales, f)
f.close()
| gpl-3.0 |
Colorfulstan/robotframework | src/robot/variables/tablesetter.py | 17 | 5033 | # Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
from robot.errors import DataError
from robot.utils import split_from_equals, unic, is_string, DotDict
from .isvar import validate_var
from .splitter import VariableSplitter
class VariableTableSetter(object):
def __init__(self, store):
self._store = store
def set(self, variables, overwrite=False):
for name, value in VariableTableReader().read(variables):
self._store.add(name, value, overwrite, decorated=False)
class VariableTableReader(object):
def read(self, variables):
for var in variables:
if not var:
continue
try:
yield self._get_name_and_value(var.name, var.value,
var.report_invalid_syntax)
except DataError as err:
var.report_invalid_syntax(err)
def _get_name_and_value(self, name, value, error_reporter):
return name[2:-1], VariableTableValue(value, name, error_reporter)
def VariableTableValue(value, name, error_reporter=None):
validate_var(name)
VariableTableValue = {'$': ScalarVariableTableValue,
'@': ListVariableTableValue,
'&': DictVariableTableValue}[name[0]]
return VariableTableValue(value, error_reporter)
class VariableTableValueBase(object):
def __init__(self, values, error_reporter=None):
self._values = self._format_values(values)
self._error_reporter = error_reporter
self._resolving = False
def _format_values(self, values):
return values
def resolve(self, variables):
with self._avoid_recursion:
return self._replace_variables(self._values, variables)
@property
@contextmanager
def _avoid_recursion(self):
if self._resolving:
raise DataError('Recursive variable definition.')
self._resolving = True
try:
yield
finally:
self._resolving = False
def _replace_variables(self, value, variables):
raise NotImplementedError
def report_error(self, error):
if self._error_reporter:
self._error_reporter(unicode(error))
class ScalarVariableTableValue(VariableTableValueBase):
def _format_values(self, values):
separator = None
if is_string(values):
values = [values]
elif values and values[0].startswith('SEPARATOR='):
separator = values.pop(0)[10:]
return separator, values
def _replace_variables(self, values, variables):
separator, values = values
if (separator is None and len(values) == 1 and
not VariableSplitter(values[0]).is_list_variable()):
return variables.replace_scalar(values[0])
if separator is None:
separator = ' '
separator = variables.replace_string(separator)
values = variables.replace_list(values)
return separator.join(unic(item) for item in values)
class ListVariableTableValue(VariableTableValueBase):
def _replace_variables(self, values, variables):
return variables.replace_list(values)
class DictVariableTableValue(VariableTableValueBase):
def _format_values(self, values):
return list(self._yield_formatted(values))
def _yield_formatted(self, values):
for item in values:
if VariableSplitter(item).is_dict_variable():
yield item
else:
name, value = split_from_equals(item)
if value is None:
raise DataError("Dictionary item '%s' does not contain "
"'=' separator." % item)
yield name, value
def _replace_variables(self, values, variables):
try:
return DotDict(self._yield_replaced(values,
variables.replace_scalar))
except TypeError as err:
raise DataError('Creating dictionary failed: %s' % err)
def _yield_replaced(self, values, replace_scalar):
for item in values:
if isinstance(item, tuple):
key, values = item
yield replace_scalar(key), replace_scalar(values)
else:
for key, values in replace_scalar(item).items():
yield key, values
| apache-2.0 |
hpcloud-mon/tempest | tempest/cmd/run_stress.py | 14 | 4969 | #!/usr/bin/env python
# Copyright 2013 Quanta Research Cambridge, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import inspect
import json
import sys
try:
from unittest import loader
except ImportError:
# unittest in python 2.6 does not contain loader, so uses unittest2
from unittest2 import loader
from oslo_log import log as logging
from testtools import testsuite
from tempest.stress import driver
LOG = logging.getLogger(__name__)
def discover_stress_tests(path="./", filter_attr=None, call_inherited=False):
"""Discovers all tempest tests and create action out of them
"""
LOG.info("Start test discovery")
tests = []
testloader = loader.TestLoader()
list = testloader.discover(path)
for func in (testsuite.iterate_tests(list)):
attrs = []
try:
method_name = getattr(func, '_testMethodName')
full_name = "%s.%s.%s" % (func.__module__,
func.__class__.__name__,
method_name)
test_func = getattr(func, method_name)
# NOTE(mkoderer): this contains a list of all type attributes
attrs = getattr(test_func, "__testtools_attrs")
except Exception:
next
if 'stress' in attrs:
if filter_attr is not None and filter_attr not in attrs:
continue
class_setup_per = getattr(test_func, "st_class_setup_per")
action = {'action':
"tempest.stress.actions.unit_test.UnitTest",
'kwargs': {"test_method": full_name,
"class_setup_per": class_setup_per
}
}
if (not call_inherited and
getattr(test_func, "st_allow_inheritance") is not True):
class_structure = inspect.getmro(test_func.im_class)
if test_func.__name__ not in class_structure[0].__dict__:
continue
tests.append(action)
return tests
parser = argparse.ArgumentParser(description='Run stress tests')
parser.add_argument('-d', '--duration', default=300, type=int,
help="Duration of test in secs")
parser.add_argument('-s', '--serial', action='store_true',
help="Trigger running tests serially")
parser.add_argument('-S', '--stop', action='store_true',
default=False, help="Stop on first error")
parser.add_argument('-n', '--number', type=int,
help="How often an action is executed for each process")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-a', '--all', action='store_true',
help="Execute all stress tests")
parser.add_argument('-T', '--type',
help="Filters tests of a certain type (e.g. gate)")
parser.add_argument('-i', '--call-inherited', action='store_true',
default=False,
help="Call also inherited function with stress attribute")
group.add_argument('-t', "--tests", nargs='?',
help="Name of the file with test description")
def main():
ns = parser.parse_args()
result = 0
if not ns.all:
tests = json.load(open(ns.tests, 'r'))
else:
tests = discover_stress_tests(filter_attr=ns.type,
call_inherited=ns.call_inherited)
if ns.serial:
# Duration is total time
duration = ns.duration / len(tests)
for test in tests:
step_result = driver.stress_openstack([test],
duration,
ns.number,
ns.stop)
# NOTE(mkoderer): we just save the last result code
if (step_result != 0):
result = step_result
if ns.stop:
return result
else:
result = driver.stress_openstack(tests,
ns.duration,
ns.number,
ns.stop)
return result
if __name__ == "__main__":
try:
sys.exit(main())
except Exception:
LOG.exception("Failure in the stress test framework")
sys.exit(1)
| apache-2.0 |
yugui/grpc | tools/run_tests/artifacts/distribtest_targets.py | 10 | 12759 | #!/usr/bin/env python2.7
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Definition of targets run distribution package tests."""
import os.path
import sys
sys.path.insert(0, os.path.abspath('..'))
import python_utils.jobset as jobset
def create_docker_jobspec(name, dockerfile_dir, shell_command, environ={},
flake_retries=0, timeout_retries=0):
"""Creates jobspec for a task running under docker."""
environ = environ.copy()
environ['RUN_COMMAND'] = shell_command
environ['RELATIVE_COPY_PATH'] = 'test/distrib'
docker_args=[]
for k,v in environ.items():
docker_args += ['-e', '%s=%s' % (k, v)]
docker_env = {'DOCKERFILE_DIR': dockerfile_dir,
'DOCKER_RUN_SCRIPT': 'tools/run_tests/dockerize/docker_run.sh'}
jobspec = jobset.JobSpec(
cmdline=['tools/run_tests/dockerize/build_and_run_docker.sh'] + docker_args,
environ=docker_env,
shortname='distribtest.%s' % (name),
timeout_seconds=30*60,
flake_retries=flake_retries,
timeout_retries=timeout_retries)
return jobspec
def create_jobspec(name, cmdline, environ=None, shell=False,
flake_retries=0, timeout_retries=0):
"""Creates jobspec."""
jobspec = jobset.JobSpec(
cmdline=cmdline,
environ=environ,
shortname='distribtest.%s' % (name),
timeout_seconds=10*60,
flake_retries=flake_retries,
timeout_retries=timeout_retries,
shell=shell)
return jobspec
class CSharpDistribTest(object):
"""Tests C# NuGet package"""
def __init__(self, platform, arch, docker_suffix=None, use_dotnet_cli=False):
self.name = 'csharp_nuget_%s_%s' % (platform, arch)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'csharp', platform, arch]
self.script_suffix = ''
if docker_suffix:
self.name += '_%s' % docker_suffix
self.labels.append(docker_suffix)
if use_dotnet_cli:
self.name += '_dotnetcli'
self.script_suffix = '_dotnetcli'
self.labels.append('dotnetcli')
else:
self.labels.append('olddotnet')
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'linux':
return create_docker_jobspec(self.name,
'tools/dockerfile/distribtest/csharp_%s_%s' % (
self.docker_suffix,
self.arch),
'test/distrib/csharp/run_distrib_test%s.sh' % self.script_suffix)
elif self.platform == 'macos':
return create_jobspec(self.name,
['test/distrib/csharp/run_distrib_test%s.sh' % self.script_suffix],
environ={'EXTERNAL_GIT_ROOT': '../../..'})
elif self.platform == 'windows':
if self.arch == 'x64':
environ={'MSBUILD_EXTRA_ARGS': '/p:Platform=x64',
'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\x64\\Debug'}
else:
environ={'DISTRIBTEST_OUTPATH': 'DistribTest\\bin\\\Debug'}
return create_jobspec(self.name,
['test\\distrib\\csharp\\run_distrib_test%s.bat' % self.script_suffix],
environ=environ)
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
class NodeDistribTest(object):
"""Tests Node package"""
def __init__(self, platform, arch, docker_suffix, node_version):
self.name = 'node_npm_%s_%s_%s' % (platform, arch, node_version)
self.platform = platform
self.arch = arch
self.node_version = node_version
self.labels = ['distribtest', 'node', platform, arch,
'node-%s' % node_version]
if docker_suffix is not None:
self.name += '_%s' % docker_suffix
self.docker_suffix = docker_suffix
self.labels.append(docker_suffix)
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'linux':
linux32 = ''
if self.arch == 'x86':
linux32 = 'linux32'
return create_docker_jobspec(self.name,
'tools/dockerfile/distribtest/node_%s_%s' % (
self.docker_suffix,
self.arch),
'%s test/distrib/node/run_distrib_test.sh %s' % (
linux32,
self.node_version))
elif self.platform == 'macos':
return create_jobspec(self.name,
['test/distrib/node/run_distrib_test.sh',
str(self.node_version)],
environ={'EXTERNAL_GIT_ROOT': '../../..'})
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
class PythonDistribTest(object):
"""Tests Python package"""
def __init__(self, platform, arch, docker_suffix):
self.name = 'python_%s_%s_%s' % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'python', platform, arch, docker_suffix]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if not self.platform == 'linux':
raise Exception("Not supported yet.")
return create_docker_jobspec(self.name,
'tools/dockerfile/distribtest/python_%s_%s' % (
self.docker_suffix,
self.arch),
'test/distrib/python/run_distrib_test.sh')
def __str__(self):
return self.name
class RubyDistribTest(object):
"""Tests Ruby package"""
def __init__(self, platform, arch, docker_suffix):
self.name = 'ruby_%s_%s_%s' % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'ruby', platform, arch, docker_suffix]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if not self.platform == 'linux':
raise Exception("Not supported yet.")
return create_docker_jobspec(self.name,
'tools/dockerfile/distribtest/ruby_%s_%s' % (
self.docker_suffix,
self.arch),
'test/distrib/ruby/run_distrib_test.sh')
def __str__(self):
return self.name
class PHPDistribTest(object):
"""Tests PHP package"""
def __init__(self, platform, arch, docker_suffix=None):
self.name = 'php_%s_%s_%s' % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'php', platform, arch, docker_suffix]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'linux':
return create_docker_jobspec(self.name,
'tools/dockerfile/distribtest/php_%s_%s' % (
self.docker_suffix,
self.arch),
'test/distrib/php/run_distrib_test.sh')
elif self.platform == 'macos':
return create_jobspec(self.name,
['test/distrib/php/run_distrib_test.sh'],
environ={'EXTERNAL_GIT_ROOT': '../../..'})
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
class CppDistribTest(object):
"""Tests Cpp make intall by building examples."""
def __init__(self, platform, arch, docker_suffix=None):
self.name = 'cpp_%s_%s_%s' % (platform, arch, docker_suffix)
self.platform = platform
self.arch = arch
self.docker_suffix = docker_suffix
self.labels = ['distribtest', 'cpp', platform, arch, docker_suffix]
def pre_build_jobspecs(self):
return []
def build_jobspec(self):
if self.platform == 'linux':
return create_docker_jobspec(self.name,
'tools/dockerfile/distribtest/cpp_%s_%s' % (
self.docker_suffix,
self.arch),
'test/distrib/cpp/run_distrib_test.sh')
else:
raise Exception("Not supported yet.")
def __str__(self):
return self.name
def targets():
"""Gets list of supported targets"""
return [CppDistribTest('linux', 'x64', 'jessie'),
CSharpDistribTest('linux', 'x64', 'wheezy'),
CSharpDistribTest('linux', 'x64', 'jessie'),
CSharpDistribTest('linux', 'x86', 'jessie'),
CSharpDistribTest('linux', 'x64', 'centos7'),
CSharpDistribTest('linux', 'x64', 'ubuntu1404'),
CSharpDistribTest('linux', 'x64', 'ubuntu1504'),
CSharpDistribTest('linux', 'x64', 'ubuntu1510'),
CSharpDistribTest('linux', 'x64', 'ubuntu1604'),
CSharpDistribTest('linux', 'x64', 'ubuntu1404', use_dotnet_cli=True),
CSharpDistribTest('macos', 'x86'),
CSharpDistribTest('windows', 'x86'),
CSharpDistribTest('windows', 'x64'),
PythonDistribTest('linux', 'x64', 'wheezy'),
PythonDistribTest('linux', 'x64', 'jessie'),
PythonDistribTest('linux', 'x86', 'jessie'),
PythonDistribTest('linux', 'x64', 'centos6'),
PythonDistribTest('linux', 'x64', 'centos7'),
PythonDistribTest('linux', 'x64', 'fedora20'),
PythonDistribTest('linux', 'x64', 'fedora21'),
PythonDistribTest('linux', 'x64', 'fedora22'),
PythonDistribTest('linux', 'x64', 'fedora23'),
PythonDistribTest('linux', 'x64', 'opensuse'),
PythonDistribTest('linux', 'x64', 'arch'),
PythonDistribTest('linux', 'x64', 'ubuntu1204'),
PythonDistribTest('linux', 'x64', 'ubuntu1404'),
PythonDistribTest('linux', 'x64', 'ubuntu1504'),
PythonDistribTest('linux', 'x64', 'ubuntu1510'),
PythonDistribTest('linux', 'x64', 'ubuntu1604'),
RubyDistribTest('linux', 'x64', 'wheezy'),
RubyDistribTest('linux', 'x64', 'jessie'),
RubyDistribTest('linux', 'x86', 'jessie'),
RubyDistribTest('linux', 'x64', 'centos6'),
RubyDistribTest('linux', 'x64', 'centos7'),
RubyDistribTest('linux', 'x64', 'fedora20'),
RubyDistribTest('linux', 'x64', 'fedora21'),
RubyDistribTest('linux', 'x64', 'fedora22'),
RubyDistribTest('linux', 'x64', 'fedora23'),
RubyDistribTest('linux', 'x64', 'opensuse'),
RubyDistribTest('linux', 'x64', 'ubuntu1204'),
RubyDistribTest('linux', 'x64', 'ubuntu1404'),
RubyDistribTest('linux', 'x64', 'ubuntu1504'),
RubyDistribTest('linux', 'x64', 'ubuntu1510'),
RubyDistribTest('linux', 'x64', 'ubuntu1604'),
NodeDistribTest('macos', 'x64', None, '4'),
NodeDistribTest('macos', 'x64', None, '5'),
NodeDistribTest('linux', 'x86', 'jessie', '4'),
PHPDistribTest('linux', 'x64', 'jessie'),
PHPDistribTest('macos', 'x64'),
] + [
NodeDistribTest('linux', 'x64', os, version)
for os in ('wheezy', 'jessie', 'ubuntu1204', 'ubuntu1404',
'ubuntu1504', 'ubuntu1510', 'ubuntu1604')
for version in ('0.12', '3', '4', '5')
]
| bsd-3-clause |
josh-willis/pycbc | tools/einsteinathome/check_GW150914_detection.py | 9 | 1290 | # Read a pycbc_inspiral HDF5 trigger file and check that it contains triggers
# compatible with GW150914
# 2016 Tito Dal Canton
import sys
import h5py
import numpy as np
# GW150914 params from my run
# https://www.atlas.aei.uni-hannover.de/~tito/LSC/er8/er8b_c00_1.2.0_run1
gw150914_time = 1126259462.4
gw150914_snr = {'H1': 19.71, 'L1': 13.28}
gw150914_chi2r = {'H1': 1.05, 'L1': 0.45}
f = h5py.File(sys.argv[1], 'r')
detector = tuple(f.keys())[0]
end_times = f[detector]['end_time'][:]
snrs = f[detector]['snr'][:]
chi2rs = f[detector]['chisq'][:] / (2 * f[detector]['chisq_dof'][:] - 2)
# search for trigs compatible with GW150914
mask = np.logical_and.reduce([abs(end_times - gw150914_time) < 0.1,
snrs > 0.8 * gw150914_snr[detector],
snrs < 1.2 * gw150914_snr[detector],
chi2rs > 0.8 * gw150914_chi2r[detector],
chi2rs < 1.2 * gw150914_chi2r[detector]])
if mask.any():
print('Pass: %d GW150914-like triggers' % sum(mask))
print('end_time snr reduced_chi2')
for t, s, c in zip(end_times[mask], snrs[mask], chi2rs[mask]):
print('%.3f %.3f %.3f' % (t, s, c))
sys.exit(0)
else:
print('Fail: no GW150914-like triggers')
sys.exit(1)
| gpl-3.0 |
Eksmo/calibre | src/calibre/gui2/dialogs/restore_library.py | 4 | 4621 | #!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
__license__ = 'GPL v3'
__copyright__ = '2011, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
from PyQt4.Qt import (QDialog, QLabel, QVBoxLayout, QDialogButtonBox,
QProgressBar, QSize, QTimer, pyqtSignal, Qt)
from calibre.library.restore import Restore
from calibre.gui2 import (error_dialog, question_dialog, warning_dialog,
info_dialog)
from calibre import force_unicode
from calibre.constants import filesystem_encoding
class DBRestore(QDialog):
update_signal = pyqtSignal(object, object)
def __init__(self, parent, library_path):
QDialog.__init__(self, parent)
self.l = QVBoxLayout()
self.setLayout(self.l)
self.l1 = QLabel('<b>'+_('Restoring database from backups, do not'
' interrupt, this will happen in three stages')+'...')
self.setWindowTitle(_('Restoring database'))
self.l.addWidget(self.l1)
self.pb = QProgressBar(self)
self.l.addWidget(self.pb)
self.pb.setMaximum(0)
self.pb.setMinimum(0)
self.msg = QLabel('')
self.l.addWidget(self.msg)
self.msg.setWordWrap(True)
self.bb = QDialogButtonBox(QDialogButtonBox.Cancel)
self.l.addWidget(self.bb)
self.bb.rejected.connect(self.reject)
self.resize(self.sizeHint() + QSize(100, 50))
self.error = None
self.rejected = False
self.library_path = library_path
self.update_signal.connect(self.do_update, type=Qt.QueuedConnection)
self.restorer = Restore(library_path, self)
self.restorer.daemon = True
# Give the metadata backup thread time to stop
QTimer.singleShot(2000, self.start)
def start(self):
self.restorer.start()
QTimer.singleShot(10, self.update)
def reject(self):
self.rejected = True
self.restorer.progress_callback = lambda x, y: x
QDialog.reject(self)
def update(self):
if self.restorer.is_alive():
QTimer.singleShot(10, self.update)
else:
self.restorer.progress_callback = lambda x, y: x
self.accept()
def __call__(self, msg, step):
self.update_signal.emit(msg, step)
def do_update(self, msg, step):
if msg is None:
self.pb.setMaximum(step)
else:
self.msg.setText(msg)
self.pb.setValue(step)
def _show_success_msg(restorer, parent=None):
r = restorer
olddb = _('The old database was saved as: %s')%force_unicode(r.olddb,
filesystem_encoding)
if r.errors_occurred:
warning_dialog(parent, _('Success'),
_('Restoring the database succeeded with some warnings'
' click Show details to see the details. %s')%olddb,
det_msg=r.report, show=True)
else:
info_dialog(parent, _('Success'),
_('Restoring database was successful. %s')%olddb, show=True,
show_copy_button=False)
def restore_database(db, parent=None):
if not question_dialog(parent, _('Are you sure?'), '<p>'+
_('Your list of books, with all their metadata is '
'stored in a single file, called a database. '
'In addition, metadata for each individual '
'book is stored in that books\' folder, as '
'a backup.'
'<p>This operation will rebuild '
'the database from the individual book '
'metadata. This is useful if the '
'database has been corrupted and you get a '
'blank list of books.'
'<p>Do you want to restore the database?')):
return False
db.conn.close()
d = DBRestore(parent, db.library_path)
d.exec_()
r = d.restorer
d.restorer = None
if d.rejected:
return True
if r.tb is not None:
error_dialog(parent, _('Failed'),
_('Restoring database failed, click Show details to see details'),
det_msg=r.tb, show=True)
else:
_show_success_msg(r, parent=parent)
return True
def repair_library_at(library_path, parent=None):
d = DBRestore(parent, library_path)
d.exec_()
if d.rejected:
return False
r = d.restorer
if r.tb is not None:
error_dialog(parent, _('Failed'),
_('Restoring database failed, click Show details to see details'),
det_msg=r.tb, show=True)
return False
_show_success_msg(r, parent=parent)
return True
| gpl-3.0 |
djwbrown/swift | benchmark/scripts/compare_perf_tests.py | 10 | 16354 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ===--- compare_perf_tests.py -------------------------------------------===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ===---------------------------------------------------------------------===//
from __future__ import print_function
import argparse
import csv
import sys
from math import sqrt
class PerformanceTestResult(object):
"""PerformanceTestResult holds results from executing an individual
benchmark from the Swift Benchmark Suite as reported by the test driver
(Benchmark_O, Benchmark_Onone, Benchmark_Ounchecked or Benchmark_Driver).
It depends on the log format emitted by the test driver in the form:
#,TEST,SAMPLES,MIN(μs),MAX(μs),MEAN(μs),SD(μs),MEDIAN(μs),MAX_RSS(B)
The last column, MAX_RSS, is emitted only for runs instrumented by the
Benchmark_Driver to measure rough memory use during the execution of the
benchmark.
"""
def __init__(self, csv_row):
"""PerformanceTestResult instance is created from an iterable with
length of 8 or 9. (Like a row provided by the CSV parser.)
"""
# csv_row[0] is just an ordinal number of the test - skip that
self.name = csv_row[1] # Name of the performance test
self.samples = int(csv_row[2]) # Number of measurement samples taken
self.min = int(csv_row[3]) # Minimum runtime (ms)
self.max = int(csv_row[4]) # Maximum runtime (ms)
self.mean = int(csv_row[5]) # Mean (average) runtime (ms)
sd = int(csv_row[6]) # Standard Deviation (ms)
# For computing running variance
self.S_runtime = (0 if self.samples < 2 else
(sd * sd) * (self.samples - 1))
self.median = int(csv_row[7]) # Median runtime (ms)
self.max_rss = ( # Maximum Resident Set Size (B)
int(csv_row[8]) if len(csv_row) > 8 else None)
def __repr__(self):
return (
'<PerformanceTestResult name:{0.name!r} '
'samples:{0.samples!r} min:{0.min!r} max:{0.max!r} '
'mean:{0.mean!r} sd:{0.sd!r} median:{0.median!r}>'.format(self))
@property
def sd(self):
"""Standard Deviation (ms)"""
return (0 if self.samples < 2 else
sqrt(self.S_runtime / (self.samples - 1)))
@staticmethod
def running_mean_variance((k, M_, S_), x):
"""
Compute running variance, B. P. Welford's method
See Knuth TAOCP vol 2, 3rd edition, page 232, or
https://www.johndcook.com/blog/standard_deviation/
M is mean, Standard Deviation is defined as sqrt(S/k-1)
"""
k = float(k + 1)
M = M_ + (x - M_) / k
S = S_ + (x - M_) * (x - M)
return (k, M, S)
def merge(self, r):
"""Merging test results recomputes min and max.
It attempts to recompute mean and standard deviation when all_samples
are available. There is no correct way to compute these values from
test results that are summaries from more than 3 samples.
The use case here is comparing tests results parsed from concatenated
log files from multiple runs of benchmark driver.
"""
self.min = min(self.min, r.min)
self.max = max(self.max, r.max)
# self.median = None # unclear what to do here
def push(x):
state = (self.samples, self.mean, self.S_runtime)
state = self.running_mean_variance(state, x)
(self.samples, self.mean, self.S_runtime) = state
# Merging test results with up to 3 samples is exact
values = [r.min, r.max, r.median][:min(r.samples, 3)]
map(push, values)
# Column labels for header row in results table
header = ('TEST', 'MIN', 'MAX', 'MEAN', 'MAX_RSS')
def values(self):
"""Values property for display in results table comparisons
in format: ('TEST', 'MIN', 'MAX', 'MEAN', 'MAX_RSS').
"""
return (
self.name,
str(self.min), str(self.max), str(int(self.mean)),
str(self.max_rss) if self.max_rss else '—'
)
class ResultComparison(object):
"""ResultComparison compares MINs from new and old PerformanceTestResult.
It computes speedup ratio and improvement delta (%).
"""
def __init__(self, old, new):
self.old = old
self.new = new
assert(old.name == new.name)
self.name = old.name # Test name, convenience accessor
# Speedup ratio
self.ratio = (old.min + 0.001) / (new.min + 0.001)
# Test runtime improvement in %
ratio = (new.min + 0.001) / (old.min + 0.001)
self.delta = ((ratio - 1) * 100)
# Add ' (?)' to the speedup column as indication of dubious changes:
# result's MIN falls inside the (MIN, MAX) interval of result they are
# being compared with.
self.is_dubious = (
' (?)' if ((old.min < new.min and new.min < old.max) or
(new.min < old.min and old.min < new.max))
else '')
# Column labels for header row in results table
header = ('TEST', 'OLD', 'NEW', 'DELTA', 'SPEEDUP')
def values(self):
"""Values property for display in results table comparisons
in format: ('TEST', 'OLD', 'NEW', 'DELTA', 'SPEEDUP').
"""
return (self.name,
str(self.old.min), str(self.new.min),
'{0:+.1f}%'.format(self.delta),
'{0:.2f}x{1}'.format(self.ratio, self.is_dubious))
class TestComparator(object):
"""TestComparator parses `PerformanceTestResult`s from CSV log files.
Then it determines which tests were `added`, `removed` and which can be
compared. It then splits the `ResultComparison`s into 3 groups according to
the `delta_threshold` by the change in performance: `increased`,
`descreased` and `unchanged`.
The lists of `added`, `removed` and `unchanged` tests are sorted
alphabetically. The `increased` and `decreased` lists are sorted in
descending order by the amount of change.
"""
def __init__(self, old_file, new_file, delta_threshold):
def load_from_CSV(filename): # handles output from Benchmark_O and
def skip_totals(row): # Benchmark_Driver (added MAX_RSS column)
return len(row) > 7 and row[0].isdigit()
tests = map(PerformanceTestResult,
filter(skip_totals, csv.reader(open(filename))))
def add_or_merge(names, r):
if r.name not in names:
names[r.name] = r
else:
names[r.name].merge(r)
return names
return reduce(add_or_merge, tests, dict())
old_results = load_from_CSV(old_file)
new_results = load_from_CSV(new_file)
old_tests = set(old_results.keys())
new_tests = set(new_results.keys())
comparable_tests = new_tests.intersection(old_tests)
added_tests = new_tests.difference(old_tests)
removed_tests = old_tests.difference(new_tests)
self.added = sorted([new_results[t] for t in added_tests],
key=lambda r: r.name)
self.removed = sorted([old_results[t] for t in removed_tests],
key=lambda r: r.name)
def compare(name):
return ResultComparison(old_results[name], new_results[name])
comparisons = map(compare, comparable_tests)
def partition(l, p):
return reduce(lambda x, y: x[not p(y)].append(y) or x, l, ([], []))
decreased, not_decreased = partition(
comparisons, lambda c: c.ratio < (1 - delta_threshold))
increased, unchanged = partition(
not_decreased, lambda c: c.ratio > (1 + delta_threshold))
# sorted partitions
names = [c.name for c in comparisons]
comparisons = dict(zip(names, comparisons))
self.decreased = [comparisons[c.name]
for c in sorted(decreased, key=lambda c: -c.delta)]
self.increased = [comparisons[c.name]
for c in sorted(increased, key=lambda c: c.delta)]
self.unchanged = [comparisons[c.name]
for c in sorted(unchanged, key=lambda c: c.name)]
class ReportFormatter(object):
"""ReportFormatter formats the `PerformanceTestResult`s and
`ResultComparison`s provided by `TestComparator` using their `header` and
`values()` into report table. Supported formats are: `markdown` (used for
displaying benchmark results on GitHub), `git` and `html`.
"""
def __init__(self, comparator, old_branch, new_branch, changes_only):
self.comparator = comparator
self.old_branch = old_branch
self.new_branch = new_branch
self.changes_only = changes_only
MARKDOWN_DETAIL = """
<details {3}>
<summary>{0} ({1})</summary>
{2}
</details>
"""
GIT_DETAIL = """
{0} ({1}): {2}"""
def markdown(self):
return self._formatted_text(
ROW='{0} | {1} | {2} | {3} | {4} \n',
HEADER_SEPARATOR='---',
DETAIL=self.MARKDOWN_DETAIL)
def git(self):
return self._formatted_text(
ROW='{0} {1} {2} {3} {4} \n',
HEADER_SEPARATOR=' ',
DETAIL=self.GIT_DETAIL)
def _column_widths(self):
changed = self.comparator.decreased + self.comparator.increased
comparisons = (changed if self.changes_only else
changed + self.comparator.unchanged)
comparisons += self.comparator.added + self.comparator.removed
widths = [
map(len, columns) for columns in
[PerformanceTestResult.header, ResultComparison.header] +
[c.values() for c in comparisons]
]
def max_widths(maximum, widths):
return tuple(map(max, zip(maximum, widths)))
return reduce(max_widths, widths, tuple([0] * 5))
def _formatted_text(self, ROW, HEADER_SEPARATOR, DETAIL):
widths = self._column_widths()
def justify_columns(contents):
return tuple([c.ljust(w) for w, c in zip(widths, contents)])
def row(contents):
return ROW.format(*justify_columns(contents))
def header(header):
return '\n' + row(header) + row(tuple([HEADER_SEPARATOR] * 5))
def format_columns(r, strong):
return (r if not strong else
r[:-1] + ('**{0}**'.format(r[-1]), ))
def table(title, results, is_strong=False, is_open=False):
rows = [
row(format_columns(result_comparison.values(), is_strong))
for result_comparison in results
]
return ('' if not rows else
DETAIL.format(*[
title, len(results),
(header(results[0].header) + ''.join(rows)),
('open' if is_open else '')
]))
return ''.join([
# FIXME print self.old_branch, self.new_branch
table('Regression', self.comparator.decreased, True, True),
table('Improvement', self.comparator.increased, True),
('' if self.changes_only else
table('No Changes', self.comparator.unchanged)),
table('Added', self.comparator.added, is_open=True),
table('Removed', self.comparator.removed, is_open=True)
])
HTML = """
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<style>
body {{ font-family: -apple-system, sans-serif; font-size: 14px; }}
table {{ border-spacing: 2px; border-color: gray; border-spacing: 0;
border-collapse: collapse; }}
table tr {{ background-color: #fff; border-top: 1px solid #c6cbd1; }}
table th, table td {{ padding: 6px 13px; border: 1px solid #dfe2e5; }}
th {{ text-align: center; padding-top: 130px; }}
td {{ text-align: right; }}
table td:first-child {{ text-align: left; }}
tr:nth-child(even) {{ background-color: #000000; }}
tr:nth-child(2n) {{ background-color: #f6f8fa; }}
</style>
</head>
<body>
<table>
{0}
</table>
</body>
</html>"""
HTML_HEADER_ROW = """
<tr>
<th align='left'>{0} ({1})</th>
<th align='left'>{2}</th>
<th align='left'>{3}</th>
<th align='left'>{4}</th>
<th align='left'>{5}</th>
</tr>
"""
HTML_ROW = """
<tr>
<td align='left'>{0}</td>
<td align='left'>{1}</td>
<td align='left'>{2}</td>
<td align='left'>{3}</td>
<td align='left'><font color='{4}'>{5}</font></td>
</tr>
"""
def html(self):
def row(name, old, new, delta, speedup, speedup_color):
return self.HTML_ROW.format(
name, old, new, delta, speedup_color, speedup)
def header(contents):
return self.HTML_HEADER_ROW.format(* contents)
def table(title, results, speedup_color):
rows = [
row(*(result_comparison.values() + (speedup_color,)))
for result_comparison in results
]
return ('' if not rows else
header((title, len(results)) + results[0].header[1:]) +
''.join(rows))
return self.HTML.format(
''.join([
# FIXME print self.old_branch, self.new_branch
table('Regression', self.comparator.decreased, 'red'),
table('Improvement', self.comparator.increased, 'green'),
('' if self.changes_only else
table('No Changes', self.comparator.unchanged, 'black')),
table('Added', self.comparator.added, ''),
table('Removed', self.comparator.removed, '')
]))
def parse_args(args):
"""Parse command line arguments and set default values."""
parser = argparse.ArgumentParser(description='Compare Performance tests.')
parser.add_argument('--old-file',
help='Baseline performance test suite (csv file)',
required=True)
parser.add_argument('--new-file',
help='New performance test suite (csv file)',
required=True)
parser.add_argument('--format',
choices=['markdown', 'git', 'html'],
help='Output format. Default is markdown.',
default="markdown")
parser.add_argument('--output', help='Output file name')
parser.add_argument('--changes-only',
help='Output only affected tests', action='store_true')
parser.add_argument('--new-branch',
help='Name of the new branch', default='NEW_MIN')
parser.add_argument('--old-branch',
help='Name of the old branch', default='OLD_MIN')
parser.add_argument('--delta-threshold',
help='Delta threshold. Default 0.05.',
type=float, default=0.05)
return parser.parse_args(args)
def main():
args = parse_args(sys.argv[1:])
comparator = TestComparator(args.old_file, args.new_file,
args.delta_threshold)
formatter = ReportFormatter(comparator, args.old_branch, args.new_branch,
args.changes_only)
formats = {
'markdown': formatter.markdown,
'git': formatter.git,
'html': formatter.html
}
report = formats[args.format]()
print(report)
if args.output:
with open(args.output, 'w') as f:
f.write(report)
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
BIDS-collaborative/cega-trace | q.py | 2 | 2030 | # -*- coding: utf-8 -*-
# import requests
# import urllib
import re
import urllib
import json
import sys
def search_doi(s):
url = "http://search.crossref.org/?q=" + convert_string(s)
htmlfile = urllib.urlopen(url)
htmltext = htmlfile.read()
regex ="href='http://dx.doi.org/"+ '(.*)' + "'>"
pattern = re.compile(regex)
match = re.search(pattern, htmltext)
if match:
doi = match.group(1)
else:
print('did not found')
return None
print(doi)
return doi
def convert_string(s):
replaced = re.sub(',', '%2C' , s)
replaced = re.sub(';', '%3B', replaced)
replaced = re.sub(' ', '+', replaced)
replaced = re.sub(':', '%3A' , replaced)
replaced = re.sub('/', '%2F', replaced)
replaced = re.sub('&', '%26' , replaced)
replaced = re.sub(r'\(', '%28', replaced)
replaced = re.sub(r'\)', '%29', replaced)
return replaced
# s = 'Benjamin, D., & Brandt, L.2002.Property rights, labour markets, and efficiency in a transition economy: the case of rural China.Canadian Journal of Economics, 35(4), 689-716.'
# search_doi(s)
# def check_matching(doi_citation, doi_title):
# if doi_citation == doi_title:
# return doi_title
# else:
# url_citation = "http://api.crossref.org/works/" + doi_citation
# url_title = "http://api.crossref.org/works/" + doi_title
# htmlfile_citation = urllib.urlopen(url_citation)
# htmlfile_title = urllib.urlopen(url_citation)
# htmltext_citation = htmlfile_citation.read()
# htmltext_titel = htmlfile_title.read()
def decode(parse_file):
with codecs.open(parse_file, 'r+', encoding='utf-8', errors='ignore') as txt_file:
txt = txt_file.readlines()
return txt
def main():
for i in range(2, 8): #range of files to run
name = (str(i) + '.txt')
data = decode(name) #decode if necessary
# data = open(name, 'r')
if data:
my_list = data
out = (str(i) + 'doi.txt')
outfile = open(out, 'w')
for line in my_list:
print(line)
doi = search_doi(line)
outfile.write(doi + '\n')
outfile.close()
if __name__ == '__main__':
main()
| bsd-2-clause |
nbargnesi/tornado | tornado/iostream.py | 38 | 64263 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility classes to write to and read from non-blocking files and sockets.
Contents:
* `BaseIOStream`: Generic interface for reading and writing.
* `IOStream`: Implementation of BaseIOStream using non-blocking sockets.
* `SSLIOStream`: SSL-aware version of IOStream.
* `PipeIOStream`: Pipe-based IOStream implementation.
"""
from __future__ import absolute_import, division, print_function, with_statement
import collections
import errno
import numbers
import os
import socket
import sys
import re
from tornado.concurrent import TracebackFuture
from tornado import ioloop
from tornado.log import gen_log, app_log
from tornado.netutil import ssl_wrap_socket, ssl_match_hostname, SSLCertificateError, _client_ssl_defaults, _server_ssl_defaults
from tornado import stack_context
from tornado.util import errno_from_exception
try:
from tornado.platform.posix import _set_nonblocking
except ImportError:
_set_nonblocking = None
try:
import ssl
except ImportError:
# ssl is not available on Google App Engine
ssl = None
# These errnos indicate that a non-blocking operation must be retried
# at a later time. On most platforms they're the same value, but on
# some they differ.
_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN)
if hasattr(errno, "WSAEWOULDBLOCK"):
_ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,)
# These errnos indicate that a connection has been abruptly terminated.
# They should be caught and handled less noisily than other errors.
_ERRNO_CONNRESET = (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE,
errno.ETIMEDOUT)
if hasattr(errno, "WSAECONNRESET"):
_ERRNO_CONNRESET += (errno.WSAECONNRESET, errno.WSAECONNABORTED, errno.WSAETIMEDOUT)
if sys.platform == 'darwin':
# OSX appears to have a race condition that causes send(2) to return
# EPROTOTYPE if called while a socket is being torn down:
# http://erickt.github.io/blog/2014/11/19/adventures-in-debugging-a-potential-osx-kernel-bug/
# Since the socket is being closed anyway, treat this as an ECONNRESET
# instead of an unexpected error.
_ERRNO_CONNRESET += (errno.EPROTOTYPE,)
# More non-portable errnos:
_ERRNO_INPROGRESS = (errno.EINPROGRESS,)
if hasattr(errno, "WSAEINPROGRESS"):
_ERRNO_INPROGRESS += (errno.WSAEINPROGRESS,)
class StreamClosedError(IOError):
"""Exception raised by `IOStream` methods when the stream is closed.
Note that the close callback is scheduled to run *after* other
callbacks on the stream (to allow for buffered data to be processed),
so you may see this error before you see the close callback.
"""
pass
class UnsatisfiableReadError(Exception):
"""Exception raised when a read cannot be satisfied.
Raised by ``read_until`` and ``read_until_regex`` with a ``max_bytes``
argument.
"""
pass
class StreamBufferFullError(Exception):
"""Exception raised by `IOStream` methods when the buffer is full.
"""
class BaseIOStream(object):
"""A utility class to write to and read from a non-blocking file or socket.
We support a non-blocking ``write()`` and a family of ``read_*()`` methods.
All of the methods take an optional ``callback`` argument and return a
`.Future` only if no callback is given. When the operation completes,
the callback will be run or the `.Future` will resolve with the data
read (or ``None`` for ``write()``). All outstanding ``Futures`` will
resolve with a `StreamClosedError` when the stream is closed; users
of the callback interface will be notified via
`.BaseIOStream.set_close_callback` instead.
When a stream is closed due to an error, the IOStream's ``error``
attribute contains the exception object.
Subclasses must implement `fileno`, `close_fd`, `write_to_fd`,
`read_from_fd`, and optionally `get_fd_error`.
"""
def __init__(self, io_loop=None, max_buffer_size=None,
read_chunk_size=None, max_write_buffer_size=None):
"""`BaseIOStream` constructor.
:arg io_loop: The `.IOLoop` to use; defaults to `.IOLoop.current`.
Deprecated since Tornado 4.1.
:arg max_buffer_size: Maximum amount of incoming data to buffer;
defaults to 100MB.
:arg read_chunk_size: Amount of data to read at one time from the
underlying transport; defaults to 64KB.
:arg max_write_buffer_size: Amount of outgoing data to buffer;
defaults to unlimited.
.. versionchanged:: 4.0
Add the ``max_write_buffer_size`` parameter. Changed default
``read_chunk_size`` to 64KB.
"""
self.io_loop = io_loop or ioloop.IOLoop.current()
self.max_buffer_size = max_buffer_size or 104857600
# A chunk size that is too close to max_buffer_size can cause
# spurious failures.
self.read_chunk_size = min(read_chunk_size or 65536,
self.max_buffer_size // 2)
self.max_write_buffer_size = max_write_buffer_size
self.error = None
self._read_buffer = collections.deque()
self._write_buffer = collections.deque()
self._read_buffer_size = 0
self._write_buffer_size = 0
self._write_buffer_frozen = False
self._read_delimiter = None
self._read_regex = None
self._read_max_bytes = None
self._read_bytes = None
self._read_partial = False
self._read_until_close = False
self._read_callback = None
self._read_future = None
self._streaming_callback = None
self._write_callback = None
self._write_future = None
self._close_callback = None
self._connect_callback = None
self._connect_future = None
# _ssl_connect_future should be defined in SSLIOStream
# but it's here so we can clean it up in maybe_run_close_callback.
# TODO: refactor that so subclasses can add additional futures
# to be cancelled.
self._ssl_connect_future = None
self._connecting = False
self._state = None
self._pending_callbacks = 0
self._closed = False
def fileno(self):
"""Returns the file descriptor for this stream."""
raise NotImplementedError()
def close_fd(self):
"""Closes the file underlying this stream.
``close_fd`` is called by `BaseIOStream` and should not be called
elsewhere; other users should call `close` instead.
"""
raise NotImplementedError()
def write_to_fd(self, data):
"""Attempts to write ``data`` to the underlying file.
Returns the number of bytes written.
"""
raise NotImplementedError()
def read_from_fd(self):
"""Attempts to read from the underlying file.
Returns ``None`` if there was nothing to read (the socket
returned `~errno.EWOULDBLOCK` or equivalent), otherwise
returns the data. When possible, should return no more than
``self.read_chunk_size`` bytes at a time.
"""
raise NotImplementedError()
def get_fd_error(self):
"""Returns information about any error on the underlying file.
This method is called after the `.IOLoop` has signaled an error on the
file descriptor, and should return an Exception (such as `socket.error`
with additional information, or None if no such information is
available.
"""
return None
def read_until_regex(self, regex, callback=None, max_bytes=None):
"""Asynchronously read until we have matched the given regex.
The result includes the data that matches the regex and anything
that came before it. If a callback is given, it will be run
with the data as an argument; if not, this method returns a
`.Future`.
If ``max_bytes`` is not None, the connection will be closed
if more than ``max_bytes`` bytes have been read and the regex is
not satisfied.
.. versionchanged:: 4.0
Added the ``max_bytes`` argument. The ``callback`` argument is
now optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._read_regex = re.compile(regex)
self._read_max_bytes = max_bytes
try:
self._try_inline_read()
except UnsatisfiableReadError as e:
# Handle this the same way as in _handle_events.
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
return future
except:
if future is not None:
# Ensure that the future doesn't log an error because its
# failure was never examined.
future.add_done_callback(lambda f: f.exception())
raise
return future
def read_until(self, delimiter, callback=None, max_bytes=None):
"""Asynchronously read until we have found the given delimiter.
The result includes all the data read including the delimiter.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
If ``max_bytes`` is not None, the connection will be closed
if more than ``max_bytes`` bytes have been read and the delimiter
is not found.
.. versionchanged:: 4.0
Added the ``max_bytes`` argument. The ``callback`` argument is
now optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._read_delimiter = delimiter
self._read_max_bytes = max_bytes
try:
self._try_inline_read()
except UnsatisfiableReadError as e:
# Handle this the same way as in _handle_events.
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
return future
except:
if future is not None:
future.add_done_callback(lambda f: f.exception())
raise
return future
def read_bytes(self, num_bytes, callback=None, streaming_callback=None,
partial=False):
"""Asynchronously read a number of bytes.
If a ``streaming_callback`` is given, it will be called with chunks
of data as they become available, and the final result will be empty.
Otherwise, the result is all the data that was read.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
If ``partial`` is true, the callback is run as soon as we have
any bytes to return (but never more than ``num_bytes``)
.. versionchanged:: 4.0
Added the ``partial`` argument. The callback argument is now
optional and a `.Future` will be returned if it is omitted.
"""
future = self._set_read_callback(callback)
assert isinstance(num_bytes, numbers.Integral)
self._read_bytes = num_bytes
self._read_partial = partial
self._streaming_callback = stack_context.wrap(streaming_callback)
try:
self._try_inline_read()
except:
if future is not None:
future.add_done_callback(lambda f: f.exception())
raise
return future
def read_until_close(self, callback=None, streaming_callback=None):
"""Asynchronously reads all data from the socket until it is closed.
If a ``streaming_callback`` is given, it will be called with chunks
of data as they become available, and the final result will be empty.
Otherwise, the result is all the data that was read.
If a callback is given, it will be run with the data as an argument;
if not, this method returns a `.Future`.
Note that if a ``streaming_callback`` is used, data will be
read from the socket as quickly as it becomes available; there
is no way to apply backpressure or cancel the reads. If flow
control or cancellation are desired, use a loop with
`read_bytes(partial=True) <.read_bytes>` instead.
.. versionchanged:: 4.0
The callback argument is now optional and a `.Future` will
be returned if it is omitted.
"""
future = self._set_read_callback(callback)
self._streaming_callback = stack_context.wrap(streaming_callback)
if self.closed():
if self._streaming_callback is not None:
self._run_read_callback(self._read_buffer_size, True)
self._run_read_callback(self._read_buffer_size, False)
return future
self._read_until_close = True
try:
self._try_inline_read()
except:
future.add_done_callback(lambda f: f.exception())
raise
return future
def write(self, data, callback=None):
"""Asynchronously write the given data to this stream.
If ``callback`` is given, we call it when all of the buffered write
data has been successfully written to the stream. If there was
previously buffered write data and an old write callback, that
callback is simply overwritten with this new callback.
If no ``callback`` is given, this method returns a `.Future` that
resolves (with a result of ``None``) when the write has been
completed. If `write` is called again before that `.Future` has
resolved, the previous future will be orphaned and will never resolve.
.. versionchanged:: 4.0
Now returns a `.Future` if no callback is given.
"""
assert isinstance(data, bytes)
self._check_closed()
# We use bool(_write_buffer) as a proxy for write_buffer_size>0,
# so never put empty strings in the buffer.
if data:
if (self.max_write_buffer_size is not None and
self._write_buffer_size + len(data) > self.max_write_buffer_size):
raise StreamBufferFullError("Reached maximum write buffer size")
# Break up large contiguous strings before inserting them in the
# write buffer, so we don't have to recopy the entire thing
# as we slice off pieces to send to the socket.
WRITE_BUFFER_CHUNK_SIZE = 128 * 1024
for i in range(0, len(data), WRITE_BUFFER_CHUNK_SIZE):
self._write_buffer.append(data[i:i + WRITE_BUFFER_CHUNK_SIZE])
self._write_buffer_size += len(data)
if callback is not None:
self._write_callback = stack_context.wrap(callback)
future = None
else:
future = self._write_future = TracebackFuture()
future.add_done_callback(lambda f: f.exception())
if not self._connecting:
self._handle_write()
if self._write_buffer:
self._add_io_state(self.io_loop.WRITE)
self._maybe_add_error_listener()
return future
def set_close_callback(self, callback):
"""Call the given callback when the stream is closed.
This is not necessary for applications that use the `.Future`
interface; all outstanding ``Futures`` will resolve with a
`StreamClosedError` when the stream is closed.
"""
self._close_callback = stack_context.wrap(callback)
self._maybe_add_error_listener()
def close(self, exc_info=False):
"""Close this stream.
If ``exc_info`` is true, set the ``error`` attribute to the current
exception from `sys.exc_info` (or if ``exc_info`` is a tuple,
use that instead of `sys.exc_info`).
"""
if not self.closed():
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
if any(exc_info):
self.error = exc_info[1]
if self._read_until_close:
if (self._streaming_callback is not None and
self._read_buffer_size):
self._run_read_callback(self._read_buffer_size, True)
self._read_until_close = False
self._run_read_callback(self._read_buffer_size, False)
if self._state is not None:
self.io_loop.remove_handler(self.fileno())
self._state = None
self.close_fd()
self._closed = True
self._maybe_run_close_callback()
def _maybe_run_close_callback(self):
# If there are pending callbacks, don't run the close callback
# until they're done (see _maybe_add_error_handler)
if self.closed() and self._pending_callbacks == 0:
futures = []
if self._read_future is not None:
futures.append(self._read_future)
self._read_future = None
if self._write_future is not None:
futures.append(self._write_future)
self._write_future = None
if self._connect_future is not None:
futures.append(self._connect_future)
self._connect_future = None
if self._ssl_connect_future is not None:
futures.append(self._ssl_connect_future)
self._ssl_connect_future = None
for future in futures:
if self._is_connreset(self.error):
# Treat connection resets as closed connections so
# clients only have to catch one kind of exception
# to avoid logging.
future.set_exception(StreamClosedError())
else:
future.set_exception(self.error or StreamClosedError())
if self._close_callback is not None:
cb = self._close_callback
self._close_callback = None
self._run_callback(cb)
# Delete any unfinished callbacks to break up reference cycles.
self._read_callback = self._write_callback = None
# Clear the buffers so they can be cleared immediately even
# if the IOStream object is kept alive by a reference cycle.
# TODO: Clear the read buffer too; it currently breaks some tests.
self._write_buffer = None
def reading(self):
"""Returns true if we are currently reading from the stream."""
return self._read_callback is not None or self._read_future is not None
def writing(self):
"""Returns true if we are currently writing to the stream."""
return bool(self._write_buffer)
def closed(self):
"""Returns true if the stream has been closed."""
return self._closed
def set_nodelay(self, value):
"""Sets the no-delay flag for this stream.
By default, data written to TCP streams may be held for a time
to make the most efficient use of bandwidth (according to
Nagle's algorithm). The no-delay flag requests that data be
written as soon as possible, even if doing so would consume
additional bandwidth.
This flag is currently defined only for TCP-based ``IOStreams``.
.. versionadded:: 3.1
"""
pass
def _handle_events(self, fd, events):
if self.closed():
gen_log.warning("Got events for closed stream %s", fd)
return
try:
if self._connecting:
# Most IOLoops will report a write failed connect
# with the WRITE event, but SelectIOLoop reports a
# READ as well so we must check for connecting before
# either.
self._handle_connect()
if self.closed():
return
if events & self.io_loop.READ:
self._handle_read()
if self.closed():
return
if events & self.io_loop.WRITE:
self._handle_write()
if self.closed():
return
if events & self.io_loop.ERROR:
self.error = self.get_fd_error()
# We may have queued up a user callback in _handle_read or
# _handle_write, so don't close the IOStream until those
# callbacks have had a chance to run.
self.io_loop.add_callback(self.close)
return
state = self.io_loop.ERROR
if self.reading():
state |= self.io_loop.READ
if self.writing():
state |= self.io_loop.WRITE
if state == self.io_loop.ERROR and self._read_buffer_size == 0:
# If the connection is idle, listen for reads too so
# we can tell if the connection is closed. If there is
# data in the read buffer we won't run the close callback
# yet anyway, so we don't need to listen in this case.
state |= self.io_loop.READ
if state != self._state:
assert self._state is not None, \
"shouldn't happen: _handle_events without self._state"
self._state = state
self.io_loop.update_handler(self.fileno(), self._state)
except UnsatisfiableReadError as e:
gen_log.info("Unsatisfiable read, closing connection: %s" % e)
self.close(exc_info=True)
except Exception:
gen_log.error("Uncaught exception, closing connection.",
exc_info=True)
self.close(exc_info=True)
raise
def _run_callback(self, callback, *args):
def wrapper():
self._pending_callbacks -= 1
try:
return callback(*args)
except Exception:
app_log.error("Uncaught exception, closing connection.",
exc_info=True)
# Close the socket on an uncaught exception from a user callback
# (It would eventually get closed when the socket object is
# gc'd, but we don't want to rely on gc happening before we
# run out of file descriptors)
self.close(exc_info=True)
# Re-raise the exception so that IOLoop.handle_callback_exception
# can see it and log the error
raise
finally:
self._maybe_add_error_listener()
# We schedule callbacks to be run on the next IOLoop iteration
# rather than running them directly for several reasons:
# * Prevents unbounded stack growth when a callback calls an
# IOLoop operation that immediately runs another callback
# * Provides a predictable execution context for e.g.
# non-reentrant mutexes
# * Ensures that the try/except in wrapper() is run outside
# of the application's StackContexts
with stack_context.NullContext():
# stack_context was already captured in callback, we don't need to
# capture it again for IOStream's wrapper. This is especially
# important if the callback was pre-wrapped before entry to
# IOStream (as in HTTPConnection._header_callback), as we could
# capture and leak the wrong context here.
self._pending_callbacks += 1
self.io_loop.add_callback(wrapper)
def _read_to_buffer_loop(self):
# This method is called from _handle_read and _try_inline_read.
try:
if self._read_bytes is not None:
target_bytes = self._read_bytes
elif self._read_max_bytes is not None:
target_bytes = self._read_max_bytes
elif self.reading():
# For read_until without max_bytes, or
# read_until_close, read as much as we can before
# scanning for the delimiter.
target_bytes = None
else:
target_bytes = 0
next_find_pos = 0
# Pretend to have a pending callback so that an EOF in
# _read_to_buffer doesn't trigger an immediate close
# callback. At the end of this method we'll either
# establish a real pending callback via
# _read_from_buffer or run the close callback.
#
# We need two try statements here so that
# pending_callbacks is decremented before the `except`
# clause below (which calls `close` and does need to
# trigger the callback)
self._pending_callbacks += 1
while not self.closed():
# Read from the socket until we get EWOULDBLOCK or equivalent.
# SSL sockets do some internal buffering, and if the data is
# sitting in the SSL object's buffer select() and friends
# can't see it; the only way to find out if it's there is to
# try to read it.
if self._read_to_buffer() == 0:
break
self._run_streaming_callback()
# If we've read all the bytes we can use, break out of
# this loop. We can't just call read_from_buffer here
# because of subtle interactions with the
# pending_callback and error_listener mechanisms.
#
# If we've reached target_bytes, we know we're done.
if (target_bytes is not None and
self._read_buffer_size >= target_bytes):
break
# Otherwise, we need to call the more expensive find_read_pos.
# It's inefficient to do this on every read, so instead
# do it on the first read and whenever the read buffer
# size has doubled.
if self._read_buffer_size >= next_find_pos:
pos = self._find_read_pos()
if pos is not None:
return pos
next_find_pos = self._read_buffer_size * 2
return self._find_read_pos()
finally:
self._pending_callbacks -= 1
def _handle_read(self):
try:
pos = self._read_to_buffer_loop()
except UnsatisfiableReadError:
raise
except Exception:
gen_log.warning("error on read", exc_info=True)
self.close(exc_info=True)
return
if pos is not None:
self._read_from_buffer(pos)
return
else:
self._maybe_run_close_callback()
def _set_read_callback(self, callback):
assert self._read_callback is None, "Already reading"
assert self._read_future is None, "Already reading"
if callback is not None:
self._read_callback = stack_context.wrap(callback)
else:
self._read_future = TracebackFuture()
return self._read_future
def _run_read_callback(self, size, streaming):
if streaming:
callback = self._streaming_callback
else:
callback = self._read_callback
self._read_callback = self._streaming_callback = None
if self._read_future is not None:
assert callback is None
future = self._read_future
self._read_future = None
future.set_result(self._consume(size))
if callback is not None:
assert (self._read_future is None) or streaming
self._run_callback(callback, self._consume(size))
else:
# If we scheduled a callback, we will add the error listener
# afterwards. If we didn't, we have to do it now.
self._maybe_add_error_listener()
def _try_inline_read(self):
"""Attempt to complete the current read operation from buffered data.
If the read can be completed without blocking, schedules the
read callback on the next IOLoop iteration; otherwise starts
listening for reads on the socket.
"""
# See if we've already got the data from a previous read
self._run_streaming_callback()
pos = self._find_read_pos()
if pos is not None:
self._read_from_buffer(pos)
return
self._check_closed()
try:
pos = self._read_to_buffer_loop()
except Exception:
# If there was an in _read_to_buffer, we called close() already,
# but couldn't run the close callback because of _pending_callbacks.
# Before we escape from this function, run the close callback if
# applicable.
self._maybe_run_close_callback()
raise
if pos is not None:
self._read_from_buffer(pos)
return
# We couldn't satisfy the read inline, so either close the stream
# or listen for new data.
if self.closed():
self._maybe_run_close_callback()
else:
self._add_io_state(ioloop.IOLoop.READ)
def _read_to_buffer(self):
"""Reads from the socket and appends the result to the read buffer.
Returns the number of bytes read. Returns 0 if there is nothing
to read (i.e. the read returns EWOULDBLOCK or equivalent). On
error closes the socket and raises an exception.
"""
try:
chunk = self.read_from_fd()
except (socket.error, IOError, OSError) as e:
# ssl.SSLError is a subclass of socket.error
if self._is_connreset(e):
# Treat ECONNRESET as a connection close rather than
# an error to minimize log spam (the exception will
# be available on self.error for apps that care).
self.close(exc_info=True)
return
self.close(exc_info=True)
raise
if chunk is None:
return 0
self._read_buffer.append(chunk)
self._read_buffer_size += len(chunk)
if self._read_buffer_size > self.max_buffer_size:
gen_log.error("Reached maximum read buffer size")
self.close()
raise StreamBufferFullError("Reached maximum read buffer size")
return len(chunk)
def _run_streaming_callback(self):
if self._streaming_callback is not None and self._read_buffer_size:
bytes_to_consume = self._read_buffer_size
if self._read_bytes is not None:
bytes_to_consume = min(self._read_bytes, bytes_to_consume)
self._read_bytes -= bytes_to_consume
self._run_read_callback(bytes_to_consume, True)
def _read_from_buffer(self, pos):
"""Attempts to complete the currently-pending read from the buffer.
The argument is either a position in the read buffer or None,
as returned by _find_read_pos.
"""
self._read_bytes = self._read_delimiter = self._read_regex = None
self._read_partial = False
self._run_read_callback(pos, False)
def _find_read_pos(self):
"""Attempts to find a position in the read buffer that satisfies
the currently-pending read.
Returns a position in the buffer if the current read can be satisfied,
or None if it cannot.
"""
if (self._read_bytes is not None and
(self._read_buffer_size >= self._read_bytes or
(self._read_partial and self._read_buffer_size > 0))):
num_bytes = min(self._read_bytes, self._read_buffer_size)
return num_bytes
elif self._read_delimiter is not None:
# Multi-byte delimiters (e.g. '\r\n') may straddle two
# chunks in the read buffer, so we can't easily find them
# without collapsing the buffer. However, since protocols
# using delimited reads (as opposed to reads of a known
# length) tend to be "line" oriented, the delimiter is likely
# to be in the first few chunks. Merge the buffer gradually
# since large merges are relatively expensive and get undone in
# _consume().
if self._read_buffer:
while True:
loc = self._read_buffer[0].find(self._read_delimiter)
if loc != -1:
delimiter_len = len(self._read_delimiter)
self._check_max_bytes(self._read_delimiter,
loc + delimiter_len)
return loc + delimiter_len
if len(self._read_buffer) == 1:
break
_double_prefix(self._read_buffer)
self._check_max_bytes(self._read_delimiter,
len(self._read_buffer[0]))
elif self._read_regex is not None:
if self._read_buffer:
while True:
m = self._read_regex.search(self._read_buffer[0])
if m is not None:
self._check_max_bytes(self._read_regex, m.end())
return m.end()
if len(self._read_buffer) == 1:
break
_double_prefix(self._read_buffer)
self._check_max_bytes(self._read_regex,
len(self._read_buffer[0]))
return None
def _check_max_bytes(self, delimiter, size):
if (self._read_max_bytes is not None and
size > self._read_max_bytes):
raise UnsatisfiableReadError(
"delimiter %r not found within %d bytes" % (
delimiter, self._read_max_bytes))
def _handle_write(self):
while self._write_buffer:
try:
if not self._write_buffer_frozen:
# On windows, socket.send blows up if given a
# write buffer that's too large, instead of just
# returning the number of bytes it was able to
# process. Therefore we must not call socket.send
# with more than 128KB at a time.
_merge_prefix(self._write_buffer, 128 * 1024)
num_bytes = self.write_to_fd(self._write_buffer[0])
if num_bytes == 0:
# With OpenSSL, if we couldn't write the entire buffer,
# the very same string object must be used on the
# next call to send. Therefore we suppress
# merging the write buffer after an incomplete send.
# A cleaner solution would be to set
# SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, but this is
# not yet accessible from python
# (http://bugs.python.org/issue8240)
self._write_buffer_frozen = True
break
self._write_buffer_frozen = False
_merge_prefix(self._write_buffer, num_bytes)
self._write_buffer.popleft()
self._write_buffer_size -= num_bytes
except (socket.error, IOError, OSError) as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
self._write_buffer_frozen = True
break
else:
if not self._is_connreset(e):
# Broken pipe errors are usually caused by connection
# reset, and its better to not log EPIPE errors to
# minimize log spam
gen_log.warning("Write error on %s: %s",
self.fileno(), e)
self.close(exc_info=True)
return
if not self._write_buffer:
if self._write_callback:
callback = self._write_callback
self._write_callback = None
self._run_callback(callback)
if self._write_future:
future = self._write_future
self._write_future = None
future.set_result(None)
def _consume(self, loc):
if loc == 0:
return b""
_merge_prefix(self._read_buffer, loc)
self._read_buffer_size -= loc
return self._read_buffer.popleft()
def _check_closed(self):
if self.closed():
raise StreamClosedError("Stream is closed")
def _maybe_add_error_listener(self):
# This method is part of an optimization: to detect a connection that
# is closed when we're not actively reading or writing, we must listen
# for read events. However, it is inefficient to do this when the
# connection is first established because we are going to read or write
# immediately anyway. Instead, we insert checks at various times to
# see if the connection is idle and add the read listener then.
if self._pending_callbacks != 0:
return
if self._state is None or self._state == ioloop.IOLoop.ERROR:
if self.closed():
self._maybe_run_close_callback()
elif (self._read_buffer_size == 0 and
self._close_callback is not None):
self._add_io_state(ioloop.IOLoop.READ)
def _add_io_state(self, state):
"""Adds `state` (IOLoop.{READ,WRITE} flags) to our event handler.
Implementation notes: Reads and writes have a fast path and a
slow path. The fast path reads synchronously from socket
buffers, while the slow path uses `_add_io_state` to schedule
an IOLoop callback. Note that in both cases, the callback is
run asynchronously with `_run_callback`.
To detect closed connections, we must have called
`_add_io_state` at some point, but we want to delay this as
much as possible so we don't have to set an `IOLoop.ERROR`
listener that will be overwritten by the next slow-path
operation. As long as there are callbacks scheduled for
fast-path ops, those callbacks may do more reads.
If a sequence of fast-path ops do not end in a slow-path op,
(e.g. for an @asynchronous long-poll request), we must add
the error handler. This is done in `_run_callback` and `write`
(since the write callback is optional so we can have a
fast-path write with no `_run_callback`)
"""
if self.closed():
# connection has been closed, so there can be no future events
return
if self._state is None:
self._state = ioloop.IOLoop.ERROR | state
with stack_context.NullContext():
self.io_loop.add_handler(
self.fileno(), self._handle_events, self._state)
elif not self._state & state:
self._state = self._state | state
self.io_loop.update_handler(self.fileno(), self._state)
def _is_connreset(self, exc):
"""Return true if exc is ECONNRESET or equivalent.
May be overridden in subclasses.
"""
return (isinstance(exc, (socket.error, IOError)) and
errno_from_exception(exc) in _ERRNO_CONNRESET)
class IOStream(BaseIOStream):
r"""Socket-based `IOStream` implementation.
This class supports the read and write methods from `BaseIOStream`
plus a `connect` method.
The ``socket`` parameter may either be connected or unconnected.
For server operations the socket is the result of calling
`socket.accept <socket.socket.accept>`. For client operations the
socket is created with `socket.socket`, and may either be
connected before passing it to the `IOStream` or connected with
`IOStream.connect`.
A very simple (and broken) HTTP client using this class:
.. testcode::
import tornado.ioloop
import tornado.iostream
import socket
def send_request():
stream.write(b"GET / HTTP/1.0\r\nHost: friendfeed.com\r\n\r\n")
stream.read_until(b"\r\n\r\n", on_headers)
def on_headers(data):
headers = {}
for line in data.split(b"\r\n"):
parts = line.split(b":")
if len(parts) == 2:
headers[parts[0].strip()] = parts[1].strip()
stream.read_bytes(int(headers[b"Content-Length"]), on_body)
def on_body(data):
print(data)
stream.close()
tornado.ioloop.IOLoop.current().stop()
if __name__ == '__main__':
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
stream = tornado.iostream.IOStream(s)
stream.connect(("friendfeed.com", 80), send_request)
tornado.ioloop.IOLoop.current().start()
.. testoutput::
:hide:
"""
def __init__(self, socket, *args, **kwargs):
self.socket = socket
self.socket.setblocking(False)
super(IOStream, self).__init__(*args, **kwargs)
def fileno(self):
return self.socket
def close_fd(self):
self.socket.close()
self.socket = None
def get_fd_error(self):
errno = self.socket.getsockopt(socket.SOL_SOCKET,
socket.SO_ERROR)
return socket.error(errno, os.strerror(errno))
def read_from_fd(self):
try:
chunk = self.socket.recv(self.read_chunk_size)
except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
def write_to_fd(self, data):
return self.socket.send(data)
def connect(self, address, callback=None, server_hostname=None):
"""Connects the socket to a remote address without blocking.
May only be called if the socket passed to the constructor was
not previously connected. The address parameter is in the
same format as for `socket.connect <socket.socket.connect>` for
the type of socket passed to the IOStream constructor,
e.g. an ``(ip, port)`` tuple. Hostnames are accepted here,
but will be resolved synchronously and block the IOLoop.
If you have a hostname instead of an IP address, the `.TCPClient`
class is recommended instead of calling this method directly.
`.TCPClient` will do asynchronous DNS resolution and handle
both IPv4 and IPv6.
If ``callback`` is specified, it will be called with no
arguments when the connection is completed; if not this method
returns a `.Future` (whose result after a successful
connection will be the stream itself).
In SSL mode, the ``server_hostname`` parameter will be used
for certificate validation (unless disabled in the
``ssl_options``) and SNI (if supported; requires Python
2.7.9+).
Note that it is safe to call `IOStream.write
<BaseIOStream.write>` while the connection is pending, in
which case the data will be written as soon as the connection
is ready. Calling `IOStream` read methods before the socket is
connected works on some platforms but is non-portable.
.. versionchanged:: 4.0
If no callback is given, returns a `.Future`.
.. versionchanged:: 4.2
SSL certificates are validated by default; pass
``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a
suitably-configured `ssl.SSLContext` to the
`SSLIOStream` constructor to disable.
"""
self._connecting = True
if callback is not None:
self._connect_callback = stack_context.wrap(callback)
future = None
else:
future = self._connect_future = TracebackFuture()
try:
self.socket.connect(address)
except socket.error as e:
# In non-blocking mode we expect connect() to raise an
# exception with EINPROGRESS or EWOULDBLOCK.
#
# On freebsd, other errors such as ECONNREFUSED may be
# returned immediately when attempting to connect to
# localhost, so handle them the same way as an error
# reported later in _handle_connect.
if (errno_from_exception(e) not in _ERRNO_INPROGRESS and
errno_from_exception(e) not in _ERRNO_WOULDBLOCK):
if future is None:
gen_log.warning("Connect error on fd %s: %s",
self.socket.fileno(), e)
self.close(exc_info=True)
return future
self._add_io_state(self.io_loop.WRITE)
return future
def start_tls(self, server_side, ssl_options=None, server_hostname=None):
"""Convert this `IOStream` to an `SSLIOStream`.
This enables protocols that begin in clear-text mode and
switch to SSL after some initial negotiation (such as the
``STARTTLS`` extension to SMTP and IMAP).
This method cannot be used if there are outstanding reads
or writes on the stream, or if there is any data in the
IOStream's buffer (data in the operating system's socket
buffer is allowed). This means it must generally be used
immediately after reading or writing the last clear-text
data. It can also be used immediately after connecting,
before any reads or writes.
The ``ssl_options`` argument may be either an `ssl.SSLContext`
object or a dictionary of keyword arguments for the
`ssl.wrap_socket` function. The ``server_hostname`` argument
will be used for certificate validation unless disabled
in the ``ssl_options``.
This method returns a `.Future` whose result is the new
`SSLIOStream`. After this method has been called,
any other operation on the original stream is undefined.
If a close callback is defined on this stream, it will be
transferred to the new stream.
.. versionadded:: 4.0
.. versionchanged:: 4.2
SSL certificates are validated by default; pass
``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a
suitably-configured `ssl.SSLContext` to disable.
"""
if (self._read_callback or self._read_future or
self._write_callback or self._write_future or
self._connect_callback or self._connect_future or
self._pending_callbacks or self._closed or
self._read_buffer or self._write_buffer):
raise ValueError("IOStream is not idle; cannot convert to SSL")
if ssl_options is None:
if server_side:
ssl_options = _server_ssl_defaults
else:
ssl_options = _client_ssl_defaults
socket = self.socket
self.io_loop.remove_handler(socket)
self.socket = None
socket = ssl_wrap_socket(socket, ssl_options,
server_hostname=server_hostname,
server_side=server_side,
do_handshake_on_connect=False)
orig_close_callback = self._close_callback
self._close_callback = None
future = TracebackFuture()
ssl_stream = SSLIOStream(socket, ssl_options=ssl_options,
io_loop=self.io_loop)
# Wrap the original close callback so we can fail our Future as well.
# If we had an "unwrap" counterpart to this method we would need
# to restore the original callback after our Future resolves
# so that repeated wrap/unwrap calls don't build up layers.
def close_callback():
if not future.done():
future.set_exception(ssl_stream.error or StreamClosedError())
if orig_close_callback is not None:
orig_close_callback()
ssl_stream.set_close_callback(close_callback)
ssl_stream._ssl_connect_callback = lambda: future.set_result(ssl_stream)
ssl_stream.max_buffer_size = self.max_buffer_size
ssl_stream.read_chunk_size = self.read_chunk_size
return future
def _handle_connect(self):
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
self.error = socket.error(err, os.strerror(err))
# IOLoop implementations may vary: some of them return
# an error state before the socket becomes writable, so
# in that case a connection failure would be handled by the
# error path in _handle_events instead of here.
if self._connect_future is None:
gen_log.warning("Connect error on fd %s: %s",
self.socket.fileno(), errno.errorcode[err])
self.close()
return
if self._connect_callback is not None:
callback = self._connect_callback
self._connect_callback = None
self._run_callback(callback)
if self._connect_future is not None:
future = self._connect_future
self._connect_future = None
future.set_result(self)
self._connecting = False
def set_nodelay(self, value):
if (self.socket is not None and
self.socket.family in (socket.AF_INET, socket.AF_INET6)):
try:
self.socket.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY, 1 if value else 0)
except socket.error as e:
# Sometimes setsockopt will fail if the socket is closed
# at the wrong time. This can happen with HTTPServer
# resetting the value to false between requests.
if e.errno != errno.EINVAL and not self._is_connreset(e):
raise
class SSLIOStream(IOStream):
"""A utility class to write to and read from a non-blocking SSL socket.
If the socket passed to the constructor is already connected,
it should be wrapped with::
ssl.wrap_socket(sock, do_handshake_on_connect=False, **kwargs)
before constructing the `SSLIOStream`. Unconnected sockets will be
wrapped when `IOStream.connect` is finished.
"""
def __init__(self, *args, **kwargs):
"""The ``ssl_options`` keyword argument may either be an
`ssl.SSLContext` object or a dictionary of keywords arguments
for `ssl.wrap_socket`
"""
self._ssl_options = kwargs.pop('ssl_options', _client_ssl_defaults)
super(SSLIOStream, self).__init__(*args, **kwargs)
self._ssl_accepting = True
self._handshake_reading = False
self._handshake_writing = False
self._ssl_connect_callback = None
self._server_hostname = None
# If the socket is already connected, attempt to start the handshake.
try:
self.socket.getpeername()
except socket.error:
pass
else:
# Indirectly start the handshake, which will run on the next
# IOLoop iteration and then the real IO state will be set in
# _handle_events.
self._add_io_state(self.io_loop.WRITE)
def reading(self):
return self._handshake_reading or super(SSLIOStream, self).reading()
def writing(self):
return self._handshake_writing or super(SSLIOStream, self).writing()
def _do_ssl_handshake(self):
# Based on code from test_ssl.py in the python stdlib
try:
self._handshake_reading = False
self._handshake_writing = False
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] == ssl.SSL_ERROR_WANT_READ:
self._handshake_reading = True
return
elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE:
self._handshake_writing = True
return
elif err.args[0] in (ssl.SSL_ERROR_EOF,
ssl.SSL_ERROR_ZERO_RETURN):
return self.close(exc_info=True)
elif err.args[0] == ssl.SSL_ERROR_SSL:
try:
peer = self.socket.getpeername()
except Exception:
peer = '(not connected)'
gen_log.warning("SSL Error on %s %s: %s",
self.socket.fileno(), peer, err)
return self.close(exc_info=True)
raise
except socket.error as err:
# Some port scans (e.g. nmap in -sT mode) have been known
# to cause do_handshake to raise EBADF, so make that error
# quiet as well.
# https://groups.google.com/forum/?fromgroups#!topic/python-tornado/ApucKJat1_0
if self._is_connreset(err) or err.args[0] == errno.EBADF:
return self.close(exc_info=True)
raise
except AttributeError:
# On Linux, if the connection was reset before the call to
# wrap_socket, do_handshake will fail with an
# AttributeError.
return self.close(exc_info=True)
else:
self._ssl_accepting = False
if not self._verify_cert(self.socket.getpeercert()):
self.close()
return
self._run_ssl_connect_callback()
def _run_ssl_connect_callback(self):
if self._ssl_connect_callback is not None:
callback = self._ssl_connect_callback
self._ssl_connect_callback = None
self._run_callback(callback)
if self._ssl_connect_future is not None:
future = self._ssl_connect_future
self._ssl_connect_future = None
future.set_result(self)
def _verify_cert(self, peercert):
"""Returns True if peercert is valid according to the configured
validation mode and hostname.
The ssl handshake already tested the certificate for a valid
CA signature; the only thing that remains is to check
the hostname.
"""
if isinstance(self._ssl_options, dict):
verify_mode = self._ssl_options.get('cert_reqs', ssl.CERT_NONE)
elif isinstance(self._ssl_options, ssl.SSLContext):
verify_mode = self._ssl_options.verify_mode
assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL)
if verify_mode == ssl.CERT_NONE or self._server_hostname is None:
return True
cert = self.socket.getpeercert()
if cert is None and verify_mode == ssl.CERT_REQUIRED:
gen_log.warning("No SSL certificate given")
return False
try:
ssl_match_hostname(peercert, self._server_hostname)
except SSLCertificateError:
gen_log.warning("Invalid SSL certificate", exc_info=True)
return False
else:
return True
def _handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
return
super(SSLIOStream, self)._handle_read()
def _handle_write(self):
if self._ssl_accepting:
self._do_ssl_handshake()
return
super(SSLIOStream, self)._handle_write()
def connect(self, address, callback=None, server_hostname=None):
self._server_hostname = server_hostname
# Pass a dummy callback to super.connect(), which is slightly
# more efficient than letting it return a Future we ignore.
super(SSLIOStream, self).connect(address, callback=lambda: None)
return self.wait_for_handshake(callback)
def _handle_connect(self):
# Call the superclass method to check for errors.
super(SSLIOStream, self)._handle_connect()
if self.closed():
return
# When the connection is complete, wrap the socket for SSL
# traffic. Note that we do this by overriding _handle_connect
# instead of by passing a callback to super().connect because
# user callbacks are enqueued asynchronously on the IOLoop,
# but since _handle_events calls _handle_connect immediately
# followed by _handle_write we need this to be synchronous.
#
# The IOLoop will get confused if we swap out self.socket while the
# fd is registered, so remove it now and re-register after
# wrap_socket().
self.io_loop.remove_handler(self.socket)
old_state = self._state
self._state = None
self.socket = ssl_wrap_socket(self.socket, self._ssl_options,
server_hostname=self._server_hostname,
do_handshake_on_connect=False)
self._add_io_state(old_state)
def wait_for_handshake(self, callback=None):
"""Wait for the initial SSL handshake to complete.
If a ``callback`` is given, it will be called with no
arguments once the handshake is complete; otherwise this
method returns a `.Future` which will resolve to the
stream itself after the handshake is complete.
Once the handshake is complete, information such as
the peer's certificate and NPN/ALPN selections may be
accessed on ``self.socket``.
This method is intended for use on server-side streams
or after using `IOStream.start_tls`; it should not be used
with `IOStream.connect` (which already waits for the
handshake to complete). It may only be called once per stream.
.. versionadded:: 4.2
"""
if (self._ssl_connect_callback is not None or
self._ssl_connect_future is not None):
raise RuntimeError("Already waiting")
if callback is not None:
self._ssl_connect_callback = stack_context.wrap(callback)
future = None
else:
future = self._ssl_connect_future = TracebackFuture()
if not self._ssl_accepting:
self._run_ssl_connect_callback()
return future
def write_to_fd(self, data):
try:
return self.socket.send(data)
except ssl.SSLError as e:
if e.args[0] == ssl.SSL_ERROR_WANT_WRITE:
# In Python 3.5+, SSLSocket.send raises a WANT_WRITE error if
# the socket is not writeable; we need to transform this into
# an EWOULDBLOCK socket.error or a zero return value,
# either of which will be recognized by the caller of this
# method. Prior to Python 3.5, an unwriteable socket would
# simply return 0 bytes written.
return 0
raise
def read_from_fd(self):
if self._ssl_accepting:
# If the handshake hasn't finished yet, there can't be anything
# to read (attempting to read may or may not raise an exception
# depending on the SSL version)
return None
try:
# SSLSocket objects have both a read() and recv() method,
# while regular sockets only have recv().
# The recv() method blocks (at least in python 2.6) if it is
# called when there is nothing to read, so we have to use
# read() instead.
chunk = self.socket.read(self.read_chunk_size)
except ssl.SSLError as e:
# SSLError is a subclass of socket.error, so this except
# block must come first.
if e.args[0] == ssl.SSL_ERROR_WANT_READ:
return None
else:
raise
except socket.error as e:
if e.args[0] in _ERRNO_WOULDBLOCK:
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
def _is_connreset(self, e):
if isinstance(e, ssl.SSLError) and e.args[0] == ssl.SSL_ERROR_EOF:
return True
return super(SSLIOStream, self)._is_connreset(e)
class PipeIOStream(BaseIOStream):
"""Pipe-based `IOStream` implementation.
The constructor takes an integer file descriptor (such as one returned
by `os.pipe`) rather than an open file object. Pipes are generally
one-way, so a `PipeIOStream` can be used for reading or writing but not
both.
"""
def __init__(self, fd, *args, **kwargs):
self.fd = fd
_set_nonblocking(fd)
super(PipeIOStream, self).__init__(*args, **kwargs)
def fileno(self):
return self.fd
def close_fd(self):
os.close(self.fd)
def write_to_fd(self, data):
return os.write(self.fd, data)
def read_from_fd(self):
try:
chunk = os.read(self.fd, self.read_chunk_size)
except (IOError, OSError) as e:
if errno_from_exception(e) in _ERRNO_WOULDBLOCK:
return None
elif errno_from_exception(e) == errno.EBADF:
# If the writing half of a pipe is closed, select will
# report it as readable but reads will fail with EBADF.
self.close(exc_info=True)
return None
else:
raise
if not chunk:
self.close()
return None
return chunk
def _double_prefix(deque):
"""Grow by doubling, but don't split the second chunk just because the
first one is small.
"""
new_len = max(len(deque[0]) * 2,
(len(deque[0]) + len(deque[1])))
_merge_prefix(deque, new_len)
def _merge_prefix(deque, size):
"""Replace the first entries in a deque of strings with a single
string of up to size bytes.
>>> d = collections.deque(['abc', 'de', 'fghi', 'j'])
>>> _merge_prefix(d, 5); print(d)
deque(['abcde', 'fghi', 'j'])
Strings will be split as necessary to reach the desired size.
>>> _merge_prefix(d, 7); print(d)
deque(['abcdefg', 'hi', 'j'])
>>> _merge_prefix(d, 3); print(d)
deque(['abc', 'defg', 'hi', 'j'])
>>> _merge_prefix(d, 100); print(d)
deque(['abcdefghij'])
"""
if len(deque) == 1 and len(deque[0]) <= size:
return
prefix = []
remaining = size
while deque and remaining > 0:
chunk = deque.popleft()
if len(chunk) > remaining:
deque.appendleft(chunk[remaining:])
chunk = chunk[:remaining]
prefix.append(chunk)
remaining -= len(chunk)
# This data structure normally just contains byte strings, but
# the unittest gets messy if it doesn't use the default str() type,
# so do the merge based on the type of data that's actually present.
if prefix:
deque.appendleft(type(prefix[0])().join(prefix))
if not deque:
deque.appendleft(b"")
def doctests():
import doctest
return doctest.DocTestSuite()
| apache-2.0 |
foufou55/Sick-Beard | sickbeard/frenchFinder.py | 21 | 6074 | # Author: Ludovic SARAKHA
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import datetime
import operator
import sickbeard
from sickbeard import db
from sickbeard import helpers, logger, show_name_helpers
from sickbeard import providers
from sickbeard import search
from sickbeard.common import SNATCHED_FRENCH
from sickbeard.common import showLanguages
import re
resultFilters = ["sub(pack|s|bed)", "nlsub(bed|s)?", "swesub(bed)?",
"(dir|sample|nfo)fix", "sample", "(dvd)?extras"]
class FrenchFinder():
def __init__(self, force=None, show=None):
#TODOif not sickbeard.DOWNLOAD_FRENCH:
# return
if sickbeard.showList==None:
return
logger.log(u"Beginning the search for french episodes older than "+ str(sickbeard.FRENCH_DELAY) +" days")
frenchlist=[]
#get list of english episodes that we want to search in french
myDB = db.DBConnection()
today = datetime.date.today().toordinal()
if show:
frenchsql=myDB.select("SELECT showid, season, episode from tv_episodes where audio_langs='en' and tv_episodes.showid =? and (? - tv_episodes.airdate) > ? order by showid, airdate asc",[show,today,sickbeard.FRENCH_DELAY])
count=myDB.select("SELECT count(*) from tv_episodes where audio_langs='en' and tv_episodes.showid =? and (? - tv_episodes.airdate) > ?",[show,today,sickbeard.FRENCH_DELAY])
else:
frenchsql=myDB.select("SELECT showid, season, episode from tv_episodes, tv_shows where audio_langs='en' and tv_episodes.showid = tv_shows.tvdb_id and tv_shows.frenchsearch = 1 and (? - tv_episodes.airdate) > ? order by showid, airdate asc",[today,sickbeard.FRENCH_DELAY])
count=myDB.select("SELECT count(*) from tv_episodes, tv_shows where audio_langs='en' and tv_episodes.showid = tv_shows.tvdb_id and tv_shows.frenchsearch = 1 and (? - tv_episodes.airdate) > ?",[today,sickbeard.FRENCH_DELAY])
#make the episodes objects
logger.log(u"Searching for "+str(count[0][0]) +" episodes in french")
for episode in frenchsql:
showObj = helpers.findCertainShow(sickbeard.showList, episode[0])
epObj = showObj.getEpisode(episode[1], episode[2])
frenchlist.append(epObj)
#for each episode in frenchlist fire a search in french
delay=[]
rest=count[0][0]
for frepisode in frenchlist:
rest=rest-1
if frepisode.show.tvdbid in delay:
logger.log(u"Previous episode for show "+str(frepisode.show.tvdbid)+" not found in french so skipping this search", logger.DEBUG)
continue
result=[]
for curProvider in providers.sortedProviderList():
if not curProvider.isActive():
continue
logger.log(u"Searching for french episode on "+curProvider.name +" for " +frepisode.show.name +" season "+str(frepisode.season)+" episode "+str(frepisode.episode))
try:
curfrench = curProvider.findFrench(frepisode, manualSearch=True)
except:
logger.log(u"Exception", logger.DEBUG)
pass
test=0
if curfrench:
for x in curfrench:
if not show_name_helpers.filterBadReleases(x.name):
logger.log(u"French "+x.name+" isn't a valid scene release that we want, ignoring it", logger.DEBUG)
test+=1
continue
if sickbeard.IGNORE_WORDS == "":
ignore_words="ztreyfgut"
else:
ignore_words=str(sickbeard.IGNORE_WORDS)
for fil in resultFilters + ignore_words.split(','):
if fil == showLanguages.get(u"fr"):
continue
if re.search('(^|[\W_])'+fil+'($|[\W_])', x.url, re.I) or re.search('(^|[\W_])'+fil+'($|[\W_])', x.name, re.I) :
logger.log(u"Invalid scene release: "+x.url+" contains "+fil+", ignoring it", logger.DEBUG)
test+=1
if test==0:
result.append(x)
best=None
try:
epi={}
epi[1]=frepisode
best = search.pickBestResult(result, episode = epi)
except:
pass
if best:
best.name=best.name + ' snatchedfr'
logger.log(u"Found french episode for " +frepisode.show.name +" season "+str(frepisode.season)+" episode "+str(frepisode.episode))
try:
search.snatchEpisode(best, SNATCHED_FRENCH)
except:
logger.log(u"Exception", logger.DEBUG)
pass
else:
delay.append(frepisode.show.tvdbid)
logger.log(u"No french episode found for " +frepisode.show.name +" season "+str(frepisode.season)+" episode "+str(frepisode.episode))
logger.log(str(rest) + u" episodes left")
| gpl-3.0 |
branchard/django-react-scrapy-sample | djangoapp/components/models.py | 1 | 3265 | from django.db import models
class Component(models.Model):
name = models.CharField(max_length=50, unique=True)
photoUrl = models.URLField(null=True, blank=True)
brand = models.ForeignKey(
'Brand',
on_delete=models.CASCADE,
)
class Brand(models.Model):
name = models.CharField(max_length=50)
class Processor(Component):
frequency = models.FloatField() # Ghz
cores = models.IntegerField()
socket = models.ForeignKey(
'Socket',
on_delete=models.CASCADE,
)
class Motherboard(Component):
ramSlots = models.IntegerField() # number of slots
maxRam = models.IntegerField() # Go
ramtype = models.ForeignKey(
'RamType',
on_delete=models.CASCADE,
)
ramfrequency = models.ManyToManyField("RamFrequency") # une carte mere est compatible avec plusieurs frequences de ram
socket = models.ForeignKey(
'Socket',
on_delete=models.CASCADE,
)
pcitypes = models.ManyToManyField("PciType") # une carte mere peut avoir plusieurs slots PCI
formfactor = models.ForeignKey(
'MotherBoardFormFactor',
on_delete=models.CASCADE,
)
class Socket(models.Model):
name = models.CharField(max_length=10)
def __str__(self):
return "{{id: {}, name: {}}}".format(self.id, self.name)
class Ram(Component):
capacity = models.IntegerField() # Go par barrette, capacity * quantity = total memory
quantity = models.IntegerField() # nombre de barrette
ramtype = models.ForeignKey(
'RamType',
on_delete=models.CASCADE,
)
frequency = models.ForeignKey(
'RamFrequency',
on_delete=models.CASCADE,
)
class RamFrequency(models.Model):
frequency = models.IntegerField() # Mhz
class RamType(models.Model):
typeName = models.CharField(max_length=10) # DDR2, DDR3, DDR4
class GraphicCard(Component):
memory = models.IntegerField() # Mo
pcitype = models.ForeignKey(
'PciType',
on_delete=models.CASCADE,
)
class PciType(models.Model):
name = models.CharField(max_length=50) # PCI-E 3.0, PCI-E 2.0
class Case(Component):
weight = models.FloatField() # in Kg
width = models.IntegerField() # in mm
height = models.IntegerField() # in mm
depth = models.IntegerField() # in mm
motherBoardFormFactors = models.ManyToManyField("MotherBoardFormFactor") # un boitier peut etre compatible avec plusieurs Carte mere
powerSupplyFormFactor = models.ForeignKey(
'PowerSupplyFormFactor',
on_delete=models.CASCADE,
)
class MotherBoardFormFactor(models.Model):
name = models.CharField(max_length=10)
class PowerSupply(Component):
watts = models.IntegerField() # in watt
modular = models.BooleanField()
factorForm = models.ForeignKey(
'PowerSupplyFormFactor',
on_delete=models.CASCADE,
)
class PowerSupplyFormFactor(models.Model):
name = models.CharField(max_length=10)
class HardDrive(Component):
capacity = models.IntegerField() # Go
hardDriveType = models.ForeignKey(
'HardDriveType',
on_delete=models.CASCADE,
)
class HardDriveType(models.Model):
name = models.CharField(max_length=10) # SSD ou HDD
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.