repo_name
stringlengths
5
100
path
stringlengths
4
375
copies
stringclasses
991 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
evaschalde/odoo
odoo.py
257
5618
#!/usr/bin/env python #---------------------------------------------------------- # odoo cli # # To install your odoo development environement type: # # wget -O- https://raw.githubusercontent.com/odoo/odoo/8.0/odoo.py | python # # The setup_* subcommands used to boostrap odoo are defined here inline and may # only depends on the python 2.7 stdlib # # The rest of subcommands are defined in odoo/cli or in <module>/cli by # subclassing the Command object # #---------------------------------------------------------- import os import re import sys import subprocess GIT_HOOKS_PRE_PUSH = """ #!/usr/bin/env python2 import re import sys if re.search('github.com[:/]odoo/odoo.git$', sys.argv[2]): print "Pushing to /odoo/odoo.git is forbidden, please push to odoo-dev, use --no-verify to override" sys.exit(1) """ def printf(f,*l): print "odoo:" + f % l def run(*l): if isinstance(l[0], list): l = l[0] printf("running %s", " ".join(l)) subprocess.check_call(l) def git_locate(): # Locate git dir # TODO add support for os.environ.get('GIT_DIR') # check for an odoo child if os.path.isfile('odoo/.git/config'): os.chdir('odoo') path = os.getcwd() while path != os.path.abspath(os.sep): gitconfig_path = os.path.join(path, '.git/config') if os.path.isfile(gitconfig_path): release_py = os.path.join(path, 'openerp/release.py') if os.path.isfile(release_py): break path = os.path.dirname(path) if path == os.path.abspath(os.sep): path = None return path def cmd_setup_git(): git_dir = git_locate() if git_dir: printf('git repo found at %s',git_dir) else: run("git", "init", "odoo") os.chdir('odoo') git_dir = os.getcwd() if git_dir: # push sane config for git < 2.0, and hooks #run('git','config','push.default','simple') # alias run('git','config','alias.st','status') # merge bzr style run('git','config','merge.commit','no') # pull let me choose between merge or rebase only works in git > 2.0, use an alias for 1 run('git','config','pull.ff','only') run('git','config','alias.pl','pull --ff-only') pre_push_path = os.path.join(git_dir, '.git/hooks/pre-push') open(pre_push_path,'w').write(GIT_HOOKS_PRE_PUSH.strip()) os.chmod(pre_push_path, 0755) # setup odoo remote run('git','config','remote.odoo.url','https://github.com/odoo/odoo.git') run('git','config','remote.odoo.pushurl','[email protected]:odoo/odoo.git') run('git','config','--add','remote.odoo.fetch','dummy') run('git','config','--unset-all','remote.odoo.fetch') run('git','config','--add','remote.odoo.fetch','+refs/heads/*:refs/remotes/odoo/*') # setup odoo-dev remote run('git','config','remote.odoo-dev.url','https://github.com/odoo-dev/odoo.git') run('git','config','remote.odoo-dev.pushurl','[email protected]:odoo-dev/odoo.git') run('git','remote','update') # setup 8.0 branch run('git','config','branch.8.0.remote','odoo') run('git','config','branch.8.0.merge','refs/heads/8.0') run('git','checkout','8.0') else: printf('no git repo found') def cmd_setup_git_dev(): git_dir = git_locate() if git_dir: # setup odoo-dev remote run('git','config','--add','remote.odoo-dev.fetch','dummy') run('git','config','--unset-all','remote.odoo-dev.fetch') run('git','config','--add','remote.odoo-dev.fetch','+refs/heads/*:refs/remotes/odoo-dev/*') run('git','config','--add','remote.odoo-dev.fetch','+refs/pull/*:refs/remotes/odoo-dev/pull/*') run('git','remote','update') def cmd_setup_git_review(): git_dir = git_locate() if git_dir: # setup odoo-dev remote run('git','config','--add','remote.odoo.fetch','dummy') run('git','config','--unset-all','remote.odoo.fetch') run('git','config','--add','remote.odoo.fetch','+refs/heads/*:refs/remotes/odoo/*') run('git','config','--add','remote.odoo.fetch','+refs/tags/*:refs/remotes/odoo/tags/*') run('git','config','--add','remote.odoo.fetch','+refs/pull/*:refs/remotes/odoo/pull/*') def setup_deps_debian(git_dir): debian_control_path = os.path.join(git_dir, 'debian/control') debian_control = open(debian_control_path).read() debs = re.findall('python-[0-9a-z]+',debian_control) debs += ["postgresql"] proc = subprocess.Popen(['sudo','apt-get','install'] + debs, stdin=open('/dev/tty')) proc.communicate() def cmd_setup_deps(): git_dir = git_locate() if git_dir: if os.path.isfile('/etc/debian_version'): setup_deps_debian(git_dir) def setup_pg_debian(git_dir): cmd = ['sudo','su','-','postgres','-c','createuser -s %s' % os.environ['USER']] subprocess.call(cmd) def cmd_setup_pg(): git_dir = git_locate() if git_dir: if os.path.isfile('/etc/debian_version'): setup_pg_debian(git_dir) def cmd_setup(): cmd_setup_git() cmd_setup_deps() cmd_setup_pg() def main(): # regsitry of commands g = globals() cmds = dict([(i[4:],g[i]) for i in g if i.startswith('cmd_')]) # if curl URL | python2 then use command setup if len(sys.argv) == 1 and __file__ == '<stdin>': cmd_setup() elif len(sys.argv) == 2 and sys.argv[1] in cmds: cmds[sys.argv[1]]() else: import openerp openerp.cli.main() if __name__ == "__main__": main()
agpl-3.0
internetarchive/warctools
hanzo/warctools/warc.py
1
11905
"""An object to represent warc records, using the abstract record in record.py""" import re import hashlib from hanzo.warctools.record import ArchiveRecord, ArchiveParser from hanzo.warctools.archive_detect import register_record_type import uuid bad_lines = 5 # when to give up looking for the version stamp @ArchiveRecord.HEADERS( DATE=b'WARC-Date', TYPE=b'WARC-Type', ID=b'WARC-Record-ID', CONCURRENT_TO=b'WARC-Concurrent-To', REFERS_TO=b'WARC-Refers-To', REFERS_TO_TARGET_URI=b'WARC-Refers-To-Target-URI', REFERS_TO_DATE=b'WARC-Refers-To-Date', CONTENT_LENGTH=b'Content-Length', CONTENT_TYPE=b'Content-Type', URL=b'WARC-Target-URI', BLOCK_DIGEST=b'WARC-Block-Digest', PAYLOAD_DIGEST=b'WARC-Payload-Digest', IP_ADDRESS=b'WARC-IP-Address', FILENAME=b'WARC-Filename', WARCINFO_ID=b'WARC-Warcinfo-ID', PROFILE=b'WARC-Profile' ) class WarcRecord(ArchiveRecord): # Pylint is very bad at decorators, E1101 is the message that says # a member variable does not exist # pylint: disable-msg=E1101 VERSION = b"WARC/1.0" VERSION18 = b"WARC/0.18" VERSION17 = b"WARC/0.17" RESPONSE = b"response" RESOURCE = b"resource" REQUEST = b"request" REVISIT = b"revisit" METADATA = b"metadata" CONVERSION = b"conversion" WARCINFO = b"warcinfo" PROFILE_IDENTICAL_PAYLOAD_DIGEST = b"http://netpreserve.org/warc/1.0/revisit/identical-payload-digest" TRAILER = b'\r\n\r\n' def __init__(self, version=VERSION, headers=None, content=None, errors=None, content_file=None): """ WarcRecord constructor. Either content or content_file must be provided, but not both. If content, which is a tuple (content_type, content_buffer), is provided, when writing the warc record, any Content-Type and Content-Length that appear in the supplied headers are ignored, and the values content[0] and len(content[1]), respectively, are used. When reading, the caller can stream content_file or use content, which is lazily filled using content_file, and after which content_file is unavailable. """ ArchiveRecord.__init__(self, headers, content, errors) self.version = version self.content_file = content_file @property def id(self): return self.get_header(self.ID) def _write_to(self, out, nl): """WARC Format: VERSION NL (Key: Value NL)* NL CONTENT NL NL don't write multi line headers """ out.write(self.version) out.write(nl) for k, v in self.headers: if self.content_file is not None or k not in (self.CONTENT_TYPE, self.CONTENT_LENGTH): out.write(k) out.write(b": ") out.write(v) out.write(nl) if self.content_file is not None: out.write(nl) # end of header blank nl while True: buf = self.content_file.read(8192) if buf == b'': break out.write(buf) else: # if content tuple is provided, set Content-Type and # Content-Length based on the values in the tuple content_type, content_buffer = self.content if content_type: out.write(self.CONTENT_TYPE) out.write(b": ") out.write(content_type) out.write(nl) if content_buffer is None: content_buffer = b"" content_length = len(content_buffer) out.write(self.CONTENT_LENGTH) out.write(b": ") out.write(str(content_length).encode('ascii')) out.write(nl) out.write(nl) # end of header blank nl if content_buffer: out.write(content_buffer) # end of record nl nl out.write(nl) out.write(nl) out.flush() def repair(self): pass def validate(self): return self.errors @classmethod def make_parser(self): return WarcParser() def block_digest(self, content_buffer): block_hash = hashlib.sha256() block_hash.update(content_buffer) digest = "sha256:%s" % block_hash.hexdigest() return digest @staticmethod def warc_uuid(text): return "<urn:uuid:{}>".format(uuid.UUID(hashlib.sha1(text).hexdigest()[0:32])).encode('ascii') @staticmethod def random_warc_uuid(): return "<urn:uuid:{}>".format(uuid.uuid4()).encode('ascii') def rx(pat): """Helper to compile regexps with IGNORECASE option set.""" return re.compile(pat, flags=re.IGNORECASE) version_rx = rx(br'^(?P<prefix>.*?)(?P<version>\s*WARC/(?P<number>.*?))' b'(?P<nl>\r\n|\r|\n)\\Z') # a header is key: <ws> value plus any following lines with leading whitespace header_rx = rx(br'^(?P<name>.*?):\s?(?P<value>.*?)' b'(?P<nl>\r\n|\r|\n)\\Z') value_rx = rx(br'^\s+(?P<value>.+?)' b'(?P<nl>\r\n|\r|\n)\\Z') nl_rx = rx(b'^(?P<nl>\r\n|\r|\n\\Z)') length_rx = rx(b'^' + WarcRecord.CONTENT_LENGTH + b'$' ) # pylint: disable-msg=E1101 type_rx = rx(b'^' + WarcRecord.CONTENT_TYPE + b'$') # pylint: disable-msg=E1101 required_headers = set(( WarcRecord.TYPE.lower(), # pylint: disable-msg=E1101 WarcRecord.ID.lower(), # pylint: disable-msg=E1101 WarcRecord.CONTENT_LENGTH.lower(), # pylint: disable-msg=E1101 WarcRecord.DATE.lower(), # pylint: disable-msg=E1101 )) class WarcParser(ArchiveParser): KNOWN_VERSIONS = set((b'1.0', b'0.17', b'0.18')) def parse(self, stream, offset, line=None): """Reads a warc record from the stream, returns a tuple (record, errors). Either records is null or errors is null. Any record-specific errors are contained in the record - errors is only used when *nothing* could be parsed""" # pylint: disable-msg=E1101 errors = [] version = None # find WARC/.* if line is None: line = stream.readline() while line: match = version_rx.match(line) if match: version = match.group('version') if offset is not None: offset += len(match.group('prefix')) break else: if offset is not None: offset += len(line) if not nl_rx.match(line): errors.append(('ignored line', line)) if len(errors) > bad_lines: errors.append(('too many errors, giving up hope',)) return (None, errors, offset) line = stream.readline() if not line: if version: errors.append(('warc version but no headers', version)) return (None, errors, offset) if line: content_length = 0 content_type = None record = WarcRecord(errors=errors, version=version) if match.group('nl') != b'\x0d\x0a': record.error('incorrect newline in version', match.group('nl')) if match.group('number') not in self.KNOWN_VERSIONS: record.error('version field is not known (%s)' % (",".join(self.KNOWN_VERSIONS)), match.group('number')) prefix = match.group('prefix') if prefix: record.error('bad prefix on WARC version header', prefix) #Read headers line = stream.readline() while line and not nl_rx.match(line): #print 'header', repr(line) match = header_rx.match(line) if match: if match.group('nl') != b'\x0d\x0a': record.error('incorrect newline in header', match.group('nl')) name = match.group('name').strip() value = [match.group('value').strip()] #print 'match',name, value line = stream.readline() match = value_rx.match(line) while match: #print 'follow', repr(line) if match.group('nl') != b'\x0d\x0a': record.error('incorrect newline in follow header', line, match.group('nl')) value.append(match.group('value').strip()) line = stream.readline() match = value_rx.match(line) value = b" ".join(value) record.headers.append((name, value)) if type_rx.match(name): if value: content_type = value else: record.error('invalid header', name, value) elif length_rx.match(name): try: #print name, value content_length = int(value) #print content_length except ValueError: record.error('invalid header', name, value) # have read blank line following headers record.content_file = stream record.content_file.bytes_to_eoc = content_length # check mandatory headers # WARC-Type WARC-Date WARC-Record-ID Content-Length return (record, (), offset) blank_rx = rx(br'^$') register_record_type(version_rx, WarcRecord) register_record_type(blank_rx, WarcRecord) def make_response(id, date, url, content, request_id): # pylint: disable-msg=E1101 headers = [ (WarcRecord.TYPE, WarcRecord.RESPONSE), (WarcRecord.ID, id), (WarcRecord.DATE, date), (WarcRecord.URL, url), ] if request_id: headers.append((WarcRecord.CONCURRENT_TO, request_id)) record = WarcRecord(headers=headers, content=content) return record def make_request(request_id, date, url, content, response_id): # pylint: disable-msg=E1101 headers = [ (WarcRecord.TYPE, WarcRecord.REQUEST), (WarcRecord.ID, request_id), (WarcRecord.DATE, date), (WarcRecord.URL, url), ] if response_id: headers.append((WarcRecord.CONCURRENT_TO, response_id)) record = WarcRecord(headers=headers, content=content) return record def make_metadata(meta_id, date, content, concurrent_to=None, url=None): # pylint: disable-msg=E1101 headers = [ (WarcRecord.TYPE, WarcRecord.METADATA), (WarcRecord.ID, meta_id), (WarcRecord.DATE, date), ] if concurrent_to: headers.append((WarcRecord.CONCURRENT_TO, concurrent_to)) if url: headers.append((WarcRecord.URL, url)) record = WarcRecord(headers=headers, content=content) return record def make_conversion(conv_id, date, content, refers_to=None, url=None): # pylint: disable-msg=E1101 headers = [ (WarcRecord.TYPE, WarcRecord.CONVERSION), (WarcRecord.ID, conv_id), (WarcRecord.DATE, date), ] if refers_to: headers.append((WarcRecord.REFERS_TO, refers_to)) if url: headers.append((WarcRecord.URL, url)) record = WarcRecord(headers=headers, content=content) return record def warc_datetime_str(d): s = d.isoformat() if '.' in s: s = s[:s.find('.')] return (s + 'Z').encode('utf-8')
mit
an7oine/WinVHS
Cygwin/lib/python2.7/encodings/euc_jis_2004.py
816
1051
# # euc_jis_2004.py: Python Unicode Codec for EUC_JIS_2004 # # Written by Hye-Shik Chang <[email protected]> # import _codecs_jp, codecs import _multibytecodec as mbc codec = _codecs_jp.getcodec('euc_jis_2004') class Codec(codecs.Codec): encode = codec.encode decode = codec.decode class IncrementalEncoder(mbc.MultibyteIncrementalEncoder, codecs.IncrementalEncoder): codec = codec class IncrementalDecoder(mbc.MultibyteIncrementalDecoder, codecs.IncrementalDecoder): codec = codec class StreamReader(Codec, mbc.MultibyteStreamReader, codecs.StreamReader): codec = codec class StreamWriter(Codec, mbc.MultibyteStreamWriter, codecs.StreamWriter): codec = codec def getregentry(): return codecs.CodecInfo( name='euc_jis_2004', encode=Codec().encode, decode=Codec().decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, )
gpl-3.0
jgmanzanas/CMNT_004_15
project-addons/flask_middleware_connector/__openerp__.py
1
1575
# -*- coding: utf-8 -*- ############################################################################## # # Copyright (C) 2015 Comunitea All Rights Reserved # $Omar Castiñeira Saavedra <[email protected]>$ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': "Flask middleware connector", 'version': '1.0', 'category': 'Connector', 'description': """Connect to Visiotech flask middleware using Odoo connector""", 'author': 'Comunitea', 'website': 'www.comunitea.com', "depends": ['base', 'product', 'connector', 'stock', 'custom_partner', 'crm_claim_rma', 'product_virtual_stock_conservative'], "data": ["views/middleware_view.xml", "views/product_view.xml", 'views/res_users.xml', "views/product_brand.xml", "views/claim_line_view.xml", "security/ir.model.access.csv"], "installable": True }
agpl-3.0
pwong-mapr/private-hue
desktop/core/ext-py/Django-1.4.5/django/conf/locale/cs/formats.py
86
1313
# -*- encoding: utf-8 -*- # This file is distributed under the same license as the Django package. # # The *_FORMAT strings use the Django date format syntax, # see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date DATE_FORMAT = 'j. E Y' TIME_FORMAT = 'G:i:s' DATETIME_FORMAT = 'j. E Y G:i:s' YEAR_MONTH_FORMAT = 'F Y' MONTH_DAY_FORMAT = 'j. F' SHORT_DATE_FORMAT = 'd.m.Y' SHORT_DATETIME_FORMAT = 'd.m.Y G:i:s' FIRST_DAY_OF_WEEK = 1 # Monday # The *_INPUT_FORMATS strings use the Python strftime format syntax, # see http://docs.python.org/library/datetime.html#strftime-strptime-behavior DATE_INPUT_FORMATS = ( '%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06' '%Y-%m-%d', '%y-%m-%d', # '2006-10-25', '06-10-25' # '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006' ) TIME_INPUT_FORMATS = ( '%H:%M:%S', # '14:30:59' '%H:%M', # '14:30' ) DATETIME_INPUT_FORMATS = ( '%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59' '%d.%m.%Y %H:%M', # '25.10.2006 14:30' '%d.%m.%Y', # '25.10.2006' '%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59' '%Y-%m-%d %H:%M', # '2006-10-25 14:30' '%Y-%m-%d', # '2006-10-25' ) DECIMAL_SEPARATOR = ',' THOUSAND_SEPARATOR = u'\xa0' # non-breaking space NUMBER_GROUPING = 3
apache-2.0
MauHernandez/cyclope
cyclope/apps/contacts/urls.py
2
1044
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2010-2015 Código Sur Sociedad Civil. # All rights reserved. # # This file is part of Cyclope. # # Cyclope is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Cyclope is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from django.conf.urls import patterns, url from cyclope.views import ContentDeleteView urlpatterns = patterns( '', url(r'^(?P<content_type>contact)/(?P<slug>[\w-]+)/delete/$', ContentDeleteView.as_view(), {'app': 'contacts'}, name='contacts-delete'), )
gpl-3.0
nke001/attention-lvcsr
libs/blocks-extras/blocks/extras/bricks/__init__.py
7
1928
from theano import shared, tensor from blocks.bricks import Feedforward from blocks.bricks.base import application, lazy from blocks.extras.initialization import PermutationMatrix from blocks.extras.utils import check_valid_permutation from blocks.utils import shared_floatx class FixedPermutation(Feedforward): """Perform a fixed permutation of the input features. Parameters ---------- order : ndarray-like A 1-dimensional container containing a permutation on the integers. dot : bool, optional Whether or not to perform the permutation by matrix multiplication. This may be faster in some circumstances but requires allocation of a permutation matrix. """ @lazy(allocation=['order']) def __init__(self, order, dot=True, **kwargs): self.order = order self._dot = dot super(FixedPermutation, self).__init__(**kwargs) def _allocate(self): self.order = check_valid_permutation(self.order) if self.input_dim != len(self.order): raise ValueError("input_dim does not match length of order " "vector") # No roles assigned here, since these are not learnable parameters. if self._dot: shape = (self.order.shape[0], self.order.shape[0]) self._matrix = shared_floatx( PermutationMatrix(self.order).generate(None, shape)) else: order = self.order.astype('int32') assert order.min() == 0 # Catch highly unlikely downcast issue. self._permutation = shared(order) @property def input_dim(self): return len(self.order) @application(inputs=['input_'], outputs=['output_']) def apply(self, input_): if self._dot: return tensor.dot(input_, self._matrix) else: return tensor.take(input_, self._permutation, axis=1)
mit
jn2840/bitcoin
qa/rpc-tests/mempool_packages.py
1
7342
#!/usr/bin/env python2 # Copyright (c) 2014-2015 The Beardcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # Test descendant package tracking code from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * def satoshi_round(amount): return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) class MempoolPackagesTest(BitcoinTestFramework): def setup_network(self): self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-relaypriority=0", "-debug"])) self.nodes.append(start_node(1, self.options.tmpdir, ["-maxorphantx=1000", "-relaypriority=0", "-limitancestorcount=5", "-debug"])) connect_nodes(self.nodes[0], 1) self.is_network_split = False self.sync_all() # Build a transaction that spends parent_txid:vout # Return amount sent def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs): send_value = satoshi_round((value - fee)/num_outputs) inputs = [ {'txid' : parent_txid, 'vout' : vout} ] outputs = {} for i in xrange(num_outputs): outputs[node.getnewaddress()] = send_value rawtx = node.createrawtransaction(inputs, outputs) signedtx = node.signrawtransaction(rawtx) txid = node.sendrawtransaction(signedtx['hex']) fulltx = node.getrawtransaction(txid, 1) assert(len(fulltx['vout']) == num_outputs) # make sure we didn't generate a change output return (txid, send_value) def run_test(self): ''' Mine some blocks and have them mature. ''' self.nodes[0].generate(101) utxo = self.nodes[0].listunspent(10) txid = utxo[0]['txid'] vout = utxo[0]['vout'] value = utxo[0]['amount'] fee = Decimal("0.0001") # 100 transactions off a confirmed tx should be fine chain = [] for i in xrange(100): (txid, sent_value) = self.chain_transaction(self.nodes[0], txid, 0, value, fee, 1) value = sent_value chain.append(txid) # Check mempool has 100 transactions in it, and descendant # count and fees should look correct mempool = self.nodes[0].getrawmempool(True) assert_equal(len(mempool), 100) descendant_count = 1 descendant_fees = 0 descendant_size = 0 SATOSHIS = 100000000 for x in reversed(chain): assert_equal(mempool[x]['descendantcount'], descendant_count) descendant_fees += mempool[x]['fee'] assert_equal(mempool[x]['descendantfees'], SATOSHIS*descendant_fees) descendant_size += mempool[x]['size'] assert_equal(mempool[x]['descendantsize'], descendant_size) descendant_count += 1 # Adding one more transaction on to the chain should fail. try: self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1) except JSONRPCException as e: print "too-long-ancestor-chain successfully rejected" # TODO: check that node1's mempool is as expected # TODO: test ancestor size limits # Now test descendant chain limits txid = utxo[1]['txid'] value = utxo[1]['amount'] vout = utxo[1]['vout'] transaction_package = [] # First create one parent tx with 10 children (txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 10) parent_transaction = txid for i in xrange(10): transaction_package.append({'txid': txid, 'vout': i, 'amount': sent_value}) for i in xrange(1000): utxo = transaction_package.pop(0) try: (txid, sent_value) = self.chain_transaction(self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10) for j in xrange(10): transaction_package.append({'txid': txid, 'vout': j, 'amount': sent_value}) if i == 998: mempool = self.nodes[0].getrawmempool(True) assert_equal(mempool[parent_transaction]['descendantcount'], 1000) except JSONRPCException as e: print e.error['message'] assert_equal(i, 999) print "tx that would create too large descendant package successfully rejected" # TODO: check that node1's mempool is as expected # TODO: test descendant size limits # Test reorg handling # First, the basics: self.nodes[0].generate(1) sync_blocks(self.nodes) self.nodes[1].invalidateblock(self.nodes[0].getbestblockhash()) self.nodes[1].reconsiderblock(self.nodes[0].getbestblockhash()) # Now test the case where node1 has a transaction T in its mempool that # depends on transactions A and B which are in a mined block, and the # block containing A and B is disconnected, AND B is not accepted back # into node1's mempool because its ancestor count is too high. # Create 8 transactions, like so: # Tx0 -> Tx1 (vout0) # \--> Tx2 (vout1) -> Tx3 -> Tx4 -> Tx5 -> Tx6 -> Tx7 # # Mine them in the next block, then generate a new tx8 that spends # Tx1 and Tx7, and add to node1's mempool, then disconnect the # last block. # Create tx0 with 2 outputs utxo = self.nodes[0].listunspent() txid = utxo[0]['txid'] value = utxo[0]['amount'] vout = utxo[0]['vout'] send_value = satoshi_round((value - fee)/2) inputs = [ {'txid' : txid, 'vout' : vout} ] outputs = {} for i in xrange(2): outputs[self.nodes[0].getnewaddress()] = send_value rawtx = self.nodes[0].createrawtransaction(inputs, outputs) signedtx = self.nodes[0].signrawtransaction(rawtx) txid = self.nodes[0].sendrawtransaction(signedtx['hex']) tx0_id = txid value = send_value # Create tx1 (tx1_id, tx1_value) = self.chain_transaction(self.nodes[0], tx0_id, 0, value, fee, 1) # Create tx2-7 vout = 1 txid = tx0_id for i in xrange(6): (txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1) vout = 0 value = sent_value # Mine these in a block self.nodes[0].generate(1) self.sync_all() # Now generate tx8, with a big fee inputs = [ {'txid' : tx1_id, 'vout': 0}, {'txid' : txid, 'vout': 0} ] outputs = { self.nodes[0].getnewaddress() : send_value + value - 4*fee } rawtx = self.nodes[0].createrawtransaction(inputs, outputs) signedtx = self.nodes[0].signrawtransaction(rawtx) txid = self.nodes[0].sendrawtransaction(signedtx['hex']) sync_mempools(self.nodes) # Now try to disconnect the tip on each node... self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash()) self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash()) sync_blocks(self.nodes) if __name__ == '__main__': MempoolPackagesTest().main()
mit
alex-march/micropython
esp8266/modules/flashbdev.py
7
2025
import esp class FlashBdev: SEC_SIZE = 4096 START_SEC = esp.flash_user_start() // SEC_SIZE NUM_BLK = 0x6b def __init__(self, blocks=NUM_BLK): self.blocks = blocks def readblocks(self, n, buf): #print("readblocks(%s, %x(%d))" % (n, id(buf), len(buf))) esp.flash_read((n + self.START_SEC) * self.SEC_SIZE, buf) def writeblocks(self, n, buf): #print("writeblocks(%s, %x(%d))" % (n, id(buf), len(buf))) #assert len(buf) <= self.SEC_SIZE, len(buf) esp.flash_erase(n + self.START_SEC) esp.flash_write((n + self.START_SEC) * self.SEC_SIZE, buf) def ioctl(self, op, arg): #print("ioctl(%d, %r)" % (op, arg)) if op == 4: # BP_IOCTL_SEC_COUNT return self.blocks if op == 5: # BP_IOCTL_SEC_SIZE return self.SEC_SIZE def set_bl_flash_size(real_size): if real_size == 256*1024: code = 1 elif real_size == 512*1024: code = 0 elif real_size == 1024*1024: code = 2 elif real_size == 2048*1024: code = 3 elif real_size == 4096*1024: code = 4 else: code = 2 buf = bytearray(4096) esp.flash_read(0, buf) buf[3] = (buf[3] & 0xf) | (code << 4) esp.flash_erase(0) esp.flash_write(0, buf) # If bootloader size ID doesn't correspond to real Flash size, # fix bootloader value and reboot. size = esp.flash_id() >> 16 # Check that it looks like realistic power of 2 for flash sizes # commonly used with esp8266 if 22 >= size >= 18: size = 1 << size if size != esp.flash_size(): import machine import time print("Bootloader Flash size appear to have been set incorrectly, trying to fix") set_bl_flash_size(size) machine.reset() while 1: time.sleep(1) size = esp.flash_size() if size < 1024*1024: bdev = None else: # 20K at the flash end is reserved for SDK params storage bdev = FlashBdev((size - 20480) // FlashBdev.SEC_SIZE - FlashBdev.START_SEC)
mit
SayCV/tools-OpenOCD
tools/xsvf_tools/svf2xsvf.py
101
26710
#!/usr/bin/python3.0 # Copyright 2008, SoftPLC Corporation http://softplc.com # Dick Hollenbeck [email protected] # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, you may find one here: # http://www.gnu.org/licenses/old-licenses/gpl-2.0.html # or you may search the http://www.gnu.org website for the version 2 license, # or you may write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA # A python program to convert an SVF file to an XSVF file. There is an # option to include comments containing the source file line number from the origin # SVF file before each outputted XSVF statement. # # We deviate from the XSVF spec in that we introduce a new command called # XWAITSTATE which directly flows from the SVF RUNTEST command. Unfortunately # XRUNSTATE was ill conceived and is not used here. We also add support for the # three Lattice extensions to SVF: LCOUNT, LDELAY, and LSDR. The xsvf file # generated from this program is suitable for use with the xsvf player in # OpenOCD with my modifications to xsvf.c. # # This program is written for python 3.0, and it is not easy to change this # back to 2.x. You may find it easier to use python 3.x even if that means # building it. import re import sys import struct # There are both ---<Lexer>--- and ---<Parser>--- sections to this program if len( sys.argv ) < 3: print("usage %s <svf_filename> <xsvf_filename>" % sys.argv[0]) exit(1) inputFilename = sys.argv[1] outputFilename = sys.argv[2] doCOMMENTs = True # Save XCOMMENTs in the output xsvf file #doCOMMENTs = False # Save XCOMMENTs in the output xsvf file # pick your file encoding file_encoding = 'ISO-8859-1' #file_encoding = 'utf-8' xrepeat = 0 # argument to XREPEAT, gives retry count for masked compares #-----< Lexer >--------------------------------------------------------------- StateBin = (RESET,IDLE, DRSELECT,DRCAPTURE,DRSHIFT,DREXIT1,DRPAUSE,DREXIT2,DRUPDATE, IRSELECT,IRCAPTURE,IRSHIFT,IREXIT1,IRPAUSE,IREXIT2,IRUPDATE) = range(16) # Any integer index into this tuple will be equal to its corresponding StateBin value StateTxt = ("RESET","IDLE", "DRSELECT","DRCAPTURE","DRSHIFT","DREXIT1","DRPAUSE","DREXIT2","DRUPDATE", "IRSELECT","IRCAPTURE","IRSHIFT","IREXIT1","IRPAUSE","IREXIT2","IRUPDATE") (XCOMPLETE,XTDOMASK,XSIR,XSDR,XRUNTEST,hole0,hole1,XREPEAT,XSDRSIZE,XSDRTDO, XSETSDRMASKS,XSDRINC,XSDRB,XSDRC,XSDRE,XSDRTDOB,XSDRTDOC, XSDRTDOE,XSTATE,XENDIR,XENDDR,XSIR2,XCOMMENT,XWAIT,XWAITSTATE, LCOUNT,LDELAY,LSDR,XTRST) = range(29) #Note: LCOUNT, LDELAY, and LSDR are Lattice extensions to SVF and provide a way to loop back # and check a completion status, essentially waiting on a part until it signals that it is done. # For example below: loop 25 times, each time through the loop do a LDELAY (same as a true RUNTEST) # and exit loop when LSDR compares match. """ LCOUNT 25; ! Step to DRPAUSE give 5 clocks and wait for 1.00e+000 SEC. LDELAY DRPAUSE 5 TCK 1.00E-003 SEC; ! Test for the completed status. Match means pass. ! Loop back to LDELAY line if not match and loop count less than 25. LSDR 1 TDI (0) TDO (1); """ #XTRST is an opcode Xilinx seemed to have missed and it comes from the SVF TRST statement. LineNumber = 1 def s_ident(scanner, token): return ("ident", token.upper(), LineNumber) def s_hex(scanner, token): global LineNumber LineNumber = LineNumber + token.count('\n') token = ''.join(token.split()) return ("hex", token[1:-1], LineNumber) def s_int(scanner, token): return ("int", int(token), LineNumber) def s_float(scanner, token): return ("float", float(token), LineNumber) #def s_comment(scanner, token): return ("comment", token, LineNumber) def s_semicolon(scanner, token): return ("semi", token, LineNumber) def s_nl(scanner,token): global LineNumber LineNumber = LineNumber + 1 #print( 'LineNumber=', LineNumber, file=sys.stderr ) return None #2.00E-002 scanner = re.Scanner([ (r"[a-zA-Z]\w*", s_ident), # (r"[-+]?[0-9]+[.]?[0-9]*([eE][-+]?[0-9]+)?", s_float), (r"[-+]?[0-9]+(([.][0-9eE+-]*)|([eE]+[-+]?[0-9]+))", s_float), (r"\d+", s_int), (r"\(([0-9a-fA-F]|\s)*\)", s_hex), (r"(!|//).*$", None), (r";", s_semicolon), (r"\n",s_nl), (r"\s*", None), ], re.MULTILINE ) # open the file using the given encoding file = open( sys.argv[1], encoding=file_encoding ) # read all svf file input into string "input" input = file.read() file.close() # Lexer: # create a list of tuples containing (tokenType, tokenValue, LineNumber) tokens = scanner.scan( input )[0] input = None # allow gc to reclaim memory holding file #for tokenType, tokenValue, ln in tokens: print( "line %d: %s" % (ln, tokenType), tokenValue ) #-----<parser>----------------------------------------------------------------- tokVal = tokType = tokLn = None tup = iter( tokens ) def nextTok(): """ Function to read the next token from tup into tokType, tokVal, tokLn (linenumber) which are globals. """ global tokType, tokVal, tokLn, tup tokType, tokVal, tokLn = tup.__next__() class ParseError(Exception): """A class to hold a parsing error message""" def __init__(self, linenumber, token, message): self.linenumber = linenumber self.token = token self.message = message def __str__(self): global inputFilename return "Error in file \'%s\' at line %d near token %s\n %s" % ( inputFilename, self.linenumber, repr(self.token), self.message) class MASKSET(object): """ Class MASKSET holds a set of bit vectors, all of which are related, will all have the same length, and are associated with one of the seven shiftOps: HIR, HDR, TIR, TDR, SIR, SDR, LSDR. One of these holds a mask, smask, tdi, tdo, and a size. """ def __init__(self, name): self.empty() self.name = name def empty(self): self.mask = bytearray() self.smask = bytearray() self.tdi = bytearray() self.tdo = bytearray() self.size = 0 def syncLengths( self, sawTDI, sawTDO, sawMASK, sawSMASK, newSize ): """ Set all the lengths equal in the event some of the masks were not seen as part of the last change set. """ if self.size == newSize: return if newSize == 0: self.empty() return # If an SIR was given without a MASK(), then use a mask of all zeros. # this is not consistent with the SVF spec, but it makes sense because # it would be odd to be testing an instruction register read out of a # tap without giving a mask for it. Also, lattice seems to agree and is # generating SVF files that comply with this philosophy. if self.name == 'SIR' and not sawMASK: self.mask = bytearray( newSize ) if newSize != len(self.mask): self.mask = bytearray( newSize ) if self.name == 'SDR': # leave mask for HIR,HDR,TIR,TDR,SIR zeros for i in range( newSize ): self.mask[i] = 1 if newSize != len(self.tdo): self.tdo = bytearray( newSize ) if newSize != len(self.tdi): self.tdi = bytearray( newSize ) if newSize != len(self.smask): self.smask = bytearray( newSize ) self.size = newSize #-----</MASKSET>----- def makeBitArray( hexString, bitCount ): """ Converts a packed sequence of hex ascii characters into a bytearray where each element in the array holds exactly one bit. Only "bitCount" bits are scanned and these must be the least significant bits in the hex number. That is, it is legal to have some unused bits in the must significant hex nibble of the input "hexString". The string is scanned starting from the backend, then just before returning we reverse the array. This way the append() method can be used, which I assume is faster than an insert. """ global tokLn a = bytearray() length = bitCount hexString = list(hexString) hexString.reverse() #print(hexString) for c in hexString: if length <= 0: break; c = int(c, 16) for mask in [1,2,4,8]: if length <= 0: break; length = length - 1 a.append( (c & mask) != 0 ) if length > 0: raise ParseError( tokLn, hexString, "Insufficient hex characters for given length of %d" % bitCount ) a.reverse() #print(a) return a def makeXSVFbytes( bitarray ): """ Make a bytearray which is contains the XSVF bits which will be written directly to disk. The number of bytes needed is calculated from the size of the argument bitarray. """ bitCount = len(bitarray) byteCount = (bitCount+7)//8 ba = bytearray( byteCount ) firstBit = (bitCount % 8) - 1 if firstBit == -1: firstBit = 7 bitNdx = 0 for byteNdx in range(byteCount): mask = 1<<firstBit byte = 0 while mask: if bitarray[bitNdx]: byte |= mask; mask = mask >> 1 bitNdx = bitNdx + 1 ba[byteNdx] = byte firstBit = 7 return ba def writeComment( outputFile, shiftOp_linenum, shiftOp ): """ Write an XCOMMENT record to outputFile """ comment = "%s @%d\0" % (shiftOp, shiftOp_linenum) # \0 is terminating nul ba = bytearray(1) ba[0] = XCOMMENT ba += comment.encode() outputFile.write( ba ) def combineBitVectors( trailer, meat, header ): """ Combine the 3 bit vectors comprizing a transmission. Since the least significant bits are sent first, the header is put onto the list last so they are sent first from that least significant position. """ ret = bytearray() ret.extend( trailer ) ret.extend( meat ) ret.extend( header ) return ret def writeRUNTEST( outputFile, run_state, end_state, run_count, min_time, tokenTxt ): """ Write the output for the SVF RUNTEST command. run_count - the number of clocks min_time - the number of seconds tokenTxt - either RUNTEST or LDELAY """ # convert from secs to usecs min_time = int( min_time * 1000000) # the SVF RUNTEST command does NOT map to the XSVF XRUNTEST command. Check the SVF spec, then # read the XSVF command. They are not the same. Use an XSVF XWAITSTATE to # implement the required behavior of the SVF RUNTEST command. if doCOMMENTs: writeComment( output, tokLn, tokenTxt ) if tokenTxt == 'RUNTEST': obuf = bytearray(11) obuf[0] = XWAITSTATE obuf[1] = run_state obuf[2] = end_state struct.pack_into(">i", obuf, 3, run_count ) # big endian 4 byte int to obuf struct.pack_into(">i", obuf, 7, min_time ) # big endian 4 byte int to obuf outputFile.write( obuf ) else: # == 'LDELAY' obuf = bytearray(10) obuf[0] = LDELAY obuf[1] = run_state # LDELAY has no end_state struct.pack_into(">i", obuf, 2, run_count ) # big endian 4 byte int to obuf struct.pack_into(">i", obuf, 6, min_time ) # big endian 4 byte int to obuf outputFile.write( obuf ) output = open( outputFilename, mode='wb' ) hir = MASKSET('HIR') hdr = MASKSET('HDR') tir = MASKSET('TIR') tdr = MASKSET('TDR') sir = MASKSET('SIR') sdr = MASKSET('SDR') expecting_eof = True # one of the commands that take the shiftParts after the length, the parse # template for all of these commands is identical shiftOps = ('SDR', 'SIR', 'LSDR', 'HDR', 'HIR', 'TDR', 'TIR') # the order must correspond to shiftOps, this holds the MASKSETS. 'LSDR' shares sdr with 'SDR' shiftSets = (sdr, sir, sdr, hdr, hir, tdr, tir ) # what to expect as parameters to a shiftOp, i.e. after a SDR length or SIR length shiftParts = ('TDI', 'TDO', 'MASK', 'SMASK') # the set of legal states which can trail the RUNTEST command run_state_allowed = ('IRPAUSE', 'DRPAUSE', 'RESET', 'IDLE') enddr_state_allowed = ('DRPAUSE', 'IDLE') endir_state_allowed = ('IRPAUSE', 'IDLE') trst_mode_allowed = ('ON', 'OFF', 'Z', 'ABSENT') enddr_state = IDLE endir_state = IDLE frequency = 1.00e+006 # HZ; # change detection for xsdrsize and xtdomask xsdrsize = -1 # the last one sent, send only on change xtdomask = bytearray() # the last one sent, send only on change # we use a number of single byte writes for the XSVF command below cmdbuf = bytearray(1) # Save the XREPEAT setting into the file as first thing. obuf = bytearray(2) obuf[0] = XREPEAT obuf[1] = xrepeat output.write( obuf ) try: while 1: expecting_eof = True nextTok() expecting_eof = False # print( tokType, tokVal, tokLn ) if tokVal in shiftOps: shiftOp_linenum = tokLn shiftOp = tokVal set = shiftSets[shiftOps.index(shiftOp)] # set flags false, if we see one later, set that one true later sawTDI = sawTDO = sawMASK = sawSMASK = False nextTok() if tokType != 'int': raise ParseError( tokLn, tokVal, "Expecting 'int' giving %s length, got '%s'" % (shiftOp, tokType) ) length = tokVal nextTok() while tokVal != ';': if tokVal not in shiftParts: raise ParseError( tokLn, tokVal, "Expecting TDI, TDO, MASK, SMASK, or ';'") shiftPart = tokVal nextTok() if tokType != 'hex': raise ParseError( tokLn, tokVal, "Expecting hex bits" ) bits = makeBitArray( tokVal, length ) if shiftPart == 'TDI': sawTDI = True set.tdi = bits elif shiftPart == 'TDO': sawTDO = True set.tdo = bits elif shiftPart == 'MASK': sawMASK = True set.mask = bits elif shiftPart == 'SMASK': sawSMASK = True set.smask = bits nextTok() set.syncLengths( sawTDI, sawTDO, sawMASK, sawSMASK, length ) # process all the gathered parameters and generate outputs here if shiftOp == 'SIR': if doCOMMENTs: writeComment( output, shiftOp_linenum, 'SIR' ) tdi = combineBitVectors( tir.tdi, sir.tdi, hir.tdi ) if len(tdi) > 255: obuf = bytearray(3) obuf[0] = XSIR2 struct.pack_into( ">h", obuf, 1, len(tdi) ) else: obuf = bytearray(2) obuf[0] = XSIR obuf[1] = len(tdi) output.write( obuf ) obuf = makeXSVFbytes( tdi ) output.write( obuf ) elif shiftOp == 'SDR': if doCOMMENTs: writeComment( output, shiftOp_linenum, shiftOp ) if not sawTDO: # pass a zero filled bit vector for the sdr.mask mask = combineBitVectors( tdr.mask, bytearray(sdr.size), hdr.mask ) tdi = combineBitVectors( tdr.tdi, sdr.tdi, hdr.tdi ) if xsdrsize != len(tdi): xsdrsize = len(tdi) cmdbuf[0] = XSDRSIZE output.write( cmdbuf ) obuf = bytearray(4) struct.pack_into( ">i", obuf, 0, xsdrsize ) # big endian 4 byte int to obuf output.write( obuf ) if xtdomask != mask: xtdomask = mask cmdbuf[0] = XTDOMASK output.write( cmdbuf ) obuf = makeXSVFbytes( mask ) output.write( obuf ) cmdbuf[0] = XSDR output.write( cmdbuf ) obuf = makeXSVFbytes( tdi ) output.write( obuf ) else: mask = combineBitVectors( tdr.mask, sdr.mask, hdr.mask ) tdi = combineBitVectors( tdr.tdi, sdr.tdi, hdr.tdi ) tdo = combineBitVectors( tdr.tdo, sdr.tdo, hdr.tdo ) if xsdrsize != len(tdi): xsdrsize = len(tdi) cmdbuf[0] = XSDRSIZE output.write( cmdbuf ) obuf = bytearray(4) struct.pack_into(">i", obuf, 0, xsdrsize ) # big endian 4 byte int to obuf output.write( obuf ) if xtdomask != mask: xtdomask = mask cmdbuf[0] = XTDOMASK output.write( cmdbuf ) obuf = makeXSVFbytes( mask ) output.write( obuf ) cmdbuf[0] = XSDRTDO output.write( cmdbuf ) obuf = makeXSVFbytes( tdi ) output.write( obuf ) obuf = makeXSVFbytes( tdo ) output.write( obuf ) #print( "len(tdo)=", len(tdo), "len(tdr.tdo)=", len(tdr.tdo), "len(sdr.tdo)=", len(sdr.tdo), "len(hdr.tdo)=", len(hdr.tdo) ) elif shiftOp == 'LSDR': if doCOMMENTs: writeComment( output, shiftOp_linenum, shiftOp ) mask = combineBitVectors( tdr.mask, sdr.mask, hdr.mask ) tdi = combineBitVectors( tdr.tdi, sdr.tdi, hdr.tdi ) tdo = combineBitVectors( tdr.tdo, sdr.tdo, hdr.tdo ) if xsdrsize != len(tdi): xsdrsize = len(tdi) cmdbuf[0] = XSDRSIZE output.write( cmdbuf ) obuf = bytearray(4) struct.pack_into(">i", obuf, 0, xsdrsize ) # big endian 4 byte int to obuf output.write( obuf ) if xtdomask != mask: xtdomask = mask cmdbuf[0] = XTDOMASK output.write( cmdbuf ) obuf = makeXSVFbytes( mask ) output.write( obuf ) cmdbuf[0] = LSDR output.write( cmdbuf ) obuf = makeXSVFbytes( tdi ) output.write( obuf ) obuf = makeXSVFbytes( tdo ) output.write( obuf ) #print( "len(tdo)=", len(tdo), "len(tdr.tdo)=", len(tdr.tdo), "len(sdr.tdo)=", len(sdr.tdo), "len(hdr.tdo)=", len(hdr.tdo) ) elif tokVal == 'RUNTEST' or tokVal == 'LDELAY': # e.g. from lattice tools: # "RUNTEST IDLE 5 TCK 1.00E-003 SEC;" saveTok = tokVal nextTok() min_time = 0 run_count = 0 max_time = 600 # ten minutes if tokVal in run_state_allowed: run_state = StateTxt.index(tokVal) end_state = run_state # bottom of page 17 of SVF spec nextTok() if tokType != 'int' and tokType != 'float': raise ParseError( tokLn, tokVal, "Expecting 'int' or 'float' after RUNTEST [run_state]") timeval = tokVal; nextTok() if tokVal != 'TCK' and tokVal != 'SEC' and tokVal != 'SCK': raise ParseError( tokLn, tokVal, "Expecting 'TCK' or 'SEC' or 'SCK' after RUNTEST [run_state] (run_count|min_time)") if tokVal == 'TCK' or tokVal == 'SCK': run_count = int( timeval ) else: min_time = timeval nextTok() if tokType == 'int' or tokType == 'float': min_time = tokVal nextTok() if tokVal != 'SEC': raise ParseError( tokLn, tokVal, "Expecting 'SEC' after RUNTEST [run_state] run_count min_time") nextTok() if tokVal == 'MAXIMUM': nextTok() if tokType != 'int' and tokType != 'float': raise ParseError( tokLn, tokVal, "Expecting 'max_time' after RUNTEST [run_state] min_time SEC MAXIMUM") max_time = tokVal nextTok() if tokVal != 'SEC': raise ParseError( tokLn, tokVal, "Expecting 'max_time' after RUNTEST [run_state] min_time SEC MAXIMUM max_time") nextTok() if tokVal == 'ENDSTATE': nextTok() if tokVal not in run_state_allowed: raise ParseError( tokLn, tokVal, "Expecting 'run_state' after RUNTEST .... ENDSTATE") end_state = StateTxt.index(tokVal) nextTok() if tokVal != ';': raise ParseError( tokLn, tokVal, "Expecting ';' after RUNTEST ....") # print( "run_count=", run_count, "min_time=", min_time, # "max_time=", max_time, "run_state=", State[run_state], "end_state=", State[end_state] ) writeRUNTEST( output, run_state, end_state, run_count, min_time, saveTok ) elif tokVal == 'LCOUNT': nextTok() if tokType != 'int': raise ParseError( tokLn, tokVal, "Expecting integer 'count' after LCOUNT") loopCount = tokVal nextTok() if tokVal != ';': raise ParseError( tokLn, tokVal, "Expecting ';' after LCOUNT count") if doCOMMENTs: writeComment( output, tokLn, 'LCOUNT' ) obuf = bytearray(5) obuf[0] = LCOUNT struct.pack_into(">i", obuf, 1, loopCount ) # big endian 4 byte int to obuf output.write( obuf ) elif tokVal == 'ENDDR': nextTok() if tokVal not in enddr_state_allowed: raise ParseError( tokLn, tokVal, "Expecting 'stable_state' after ENDDR. (one of: DRPAUSE, IDLE)") enddr_state = StateTxt.index(tokVal) nextTok() if tokVal != ';': raise ParseError( tokLn, tokVal, "Expecting ';' after ENDDR stable_state") if doCOMMENTs: writeComment( output, tokLn, 'ENDDR' ) obuf = bytearray(2) obuf[0] = XENDDR # Page 10 of the March 1999 SVF spec shows that RESET is also allowed here. # Yet the XSVF spec has no provision for that, and uses a non-standard, i.e. # boolean argument to XENDDR which only handles two of the 3 intended states. obuf[1] = 1 if enddr_state == DRPAUSE else 0 output.write( obuf ) elif tokVal == 'ENDIR': nextTok() if tokVal not in endir_state_allowed: raise ParseError( tokLn, tokVal, "Expecting 'stable_state' after ENDIR. (one of: IRPAUSE, IDLE)") endir_state = StateTxt.index(tokVal) nextTok() if tokVal != ';': raise ParseError( tokLn, tokVal, "Expecting ';' after ENDIR stable_state") if doCOMMENTs: writeComment( output, tokLn, 'ENDIR' ) obuf = bytearray(2) obuf[0] = XENDIR # Page 10 of the March 1999 SVF spec shows that RESET is also allowed here. # Yet the XSVF spec has no provision for that, and uses a non-standard, i.e. # boolean argument to XENDDR which only handles two of the 3 intended states. obuf[1] = 1 if endir_state == IRPAUSE else 0 output.write( obuf ) elif tokVal == 'STATE': nextTok() ln = tokLn while tokVal != ';': if tokVal not in StateTxt: raise ParseError( tokLn, tokVal, "Expecting 'stable_state' after STATE") stable_state = StateTxt.index( tokVal ) if doCOMMENTs and ln != -1: writeComment( output, ln, 'STATE' ) ln = -1 # save comment only once obuf = bytearray(2) obuf[0] = XSTATE obuf[1] = stable_state output.write( obuf ) nextTok() elif tokVal == 'FREQUENCY': nextTok() if tokVal != ';': if tokType != 'int' and tokType != 'float': raise ParseError( tokLn, tokVal, "Expecting 'cycles HZ' after FREQUENCY") frequency = tokVal nextTok() if tokVal != 'HZ': raise ParseError( tokLn, tokVal, "Expecting 'HZ' after FREQUENCY cycles") nextTok() if tokVal != ';': raise ParseError( tokLn, tokVal, "Expecting ';' after FREQUENCY cycles HZ") elif tokVal == 'TRST': nextTok() if tokVal not in trst_mode_allowed: raise ParseError( tokLn, tokVal, "Expecting 'ON|OFF|Z|ABSENT' after TRST") trst_mode = tokVal nextTok() if tokVal != ';': raise ParseError( tokLn, tokVal, "Expecting ';' after TRST trst_mode") if doCOMMENTs: writeComment( output, tokLn, 'TRST %s' % trst_mode ) obuf = bytearray( 2 ) obuf[0] = XTRST obuf[1] = trst_mode_allowed.index( trst_mode ) # use the index as the binary argument to XTRST opcode output.write( obuf ) else: raise ParseError( tokLn, tokVal, "Unknown token '%s'" % tokVal) except StopIteration: if not expecting_eof: print( "Unexpected End of File at line ", tokLn ) except ParseError as pe: print( "\n", pe ) finally: # print( "closing file" ) cmdbuf[0] = XCOMPLETE output.write( cmdbuf ) output.close()
gpl-2.0
lexus24/w16b_test
static/Brython3.1.1-20150328-091302/Lib/xml/etree/ElementTree.py
730
61800
# # ElementTree # $Id: ElementTree.py 3440 2008-07-18 14:45:01Z fredrik $ # # light-weight XML support for Python 2.3 and later. # # history (since 1.2.6): # 2005-11-12 fl added tostringlist/fromstringlist helpers # 2006-07-05 fl merged in selected changes from the 1.3 sandbox # 2006-07-05 fl removed support for 2.1 and earlier # 2007-06-21 fl added deprecation/future warnings # 2007-08-25 fl added doctype hook, added parser version attribute etc # 2007-08-26 fl added new serializer code (better namespace handling, etc) # 2007-08-27 fl warn for broken /tag searches on tree level # 2007-09-02 fl added html/text methods to serializer (experimental) # 2007-09-05 fl added method argument to tostring/tostringlist # 2007-09-06 fl improved error handling # 2007-09-13 fl added itertext, iterfind; assorted cleanups # 2007-12-15 fl added C14N hooks, copy method (experimental) # # Copyright (c) 1999-2008 by Fredrik Lundh. All rights reserved. # # [email protected] # http://www.pythonware.com # # -------------------------------------------------------------------- # The ElementTree toolkit is # # Copyright (c) 1999-2008 by Fredrik Lundh # # By obtaining, using, and/or copying this software and/or its # associated documentation, you agree that you have read, understood, # and will comply with the following terms and conditions: # # Permission to use, copy, modify, and distribute this software and # its associated documentation for any purpose and without fee is # hereby granted, provided that the above copyright notice appears in # all copies, and that both that copyright notice and this permission # notice appear in supporting documentation, and that the name of # Secret Labs AB or the author not be used in advertising or publicity # pertaining to distribution of the software without specific, written # prior permission. # # SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD # TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- # ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR # BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY # DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, # WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS # ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE # OF THIS SOFTWARE. # -------------------------------------------------------------------- # Licensed to PSF under a Contributor Agreement. # See http://www.python.org/psf/license for licensing details. __all__ = [ # public symbols "Comment", "dump", "Element", "ElementTree", "fromstring", "fromstringlist", "iselement", "iterparse", "parse", "ParseError", "PI", "ProcessingInstruction", "QName", "SubElement", "tostring", "tostringlist", "TreeBuilder", "VERSION", "XML", "XMLID", "XMLParser", "XMLTreeBuilder", "register_namespace", ] VERSION = "1.3.0" ## # The <b>Element</b> type is a flexible container object, designed to # store hierarchical data structures in memory. The type can be # described as a cross between a list and a dictionary. # <p> # Each element has a number of properties associated with it: # <ul> # <li>a <i>tag</i>. This is a string identifying what kind of data # this element represents (the element type, in other words).</li> # <li>a number of <i>attributes</i>, stored in a Python dictionary.</li> # <li>a <i>text</i> string.</li> # <li>an optional <i>tail</i> string.</li> # <li>a number of <i>child elements</i>, stored in a Python sequence</li> # </ul> # # To create an element instance, use the {@link #Element} constructor # or the {@link #SubElement} factory function. # <p> # The {@link #ElementTree} class can be used to wrap an element # structure, and convert it from and to XML. ## import sys import re import warnings import io import contextlib from . import ElementPath ## # Parser error. This is a subclass of <b>SyntaxError</b>. # <p> # In addition to the exception value, an exception instance contains a # specific exception code in the <b>code</b> attribute, and the line and # column of the error in the <b>position</b> attribute. class ParseError(SyntaxError): pass # -------------------------------------------------------------------- ## # Checks if an object appears to be a valid element object. # # @param An element instance. # @return A true value if this is an element object. # @defreturn flag def iselement(element): # FIXME: not sure about this; # isinstance(element, Element) or look for tag/attrib/text attributes return hasattr(element, 'tag') ## # Element class. This class defines the Element interface, and # provides a reference implementation of this interface. # <p> # The element name, attribute names, and attribute values can be # either ASCII strings (ordinary Python strings containing only 7-bit # ASCII characters) or Unicode strings. # # @param tag The element name. # @param attrib An optional dictionary, containing element attributes. # @param **extra Additional attributes, given as keyword arguments. # @see Element # @see SubElement # @see Comment # @see ProcessingInstruction class Element: # <tag attrib>text<child/>...</tag>tail ## # (Attribute) Element tag. tag = None ## # (Attribute) Element attribute dictionary. Where possible, use # {@link #Element.get}, # {@link #Element.set}, # {@link #Element.keys}, and # {@link #Element.items} to access # element attributes. attrib = None ## # (Attribute) Text before first subelement. This is either a # string or the value None. Note that if there was no text, this # attribute may be either None or an empty string, depending on # the parser. text = None ## # (Attribute) Text after this element's end tag, but before the # next sibling element's start tag. This is either a string or # the value None. Note that if there was no text, this attribute # may be either None or an empty string, depending on the parser. tail = None # text after end tag, if any # constructor def __init__(self, tag, attrib={}, **extra): if not isinstance(attrib, dict): raise TypeError("attrib must be dict, not %s" % ( attrib.__class__.__name__,)) attrib = attrib.copy() attrib.update(extra) self.tag = tag self.attrib = attrib self._children = [] def __repr__(self): return "<Element %s at 0x%x>" % (repr(self.tag), id(self)) ## # Creates a new element object of the same type as this element. # # @param tag Element tag. # @param attrib Element attributes, given as a dictionary. # @return A new element instance. def makeelement(self, tag, attrib): return self.__class__(tag, attrib) ## # (Experimental) Copies the current element. This creates a # shallow copy; subelements will be shared with the original tree. # # @return A new element instance. def copy(self): elem = self.makeelement(self.tag, self.attrib) elem.text = self.text elem.tail = self.tail elem[:] = self return elem ## # Returns the number of subelements. Note that this only counts # full elements; to check if there's any content in an element, you # have to check both the length and the <b>text</b> attribute. # # @return The number of subelements. def __len__(self): return len(self._children) def __bool__(self): warnings.warn( "The behavior of this method will change in future versions. " "Use specific 'len(elem)' or 'elem is not None' test instead.", FutureWarning, stacklevel=2 ) return len(self._children) != 0 # emulate old behaviour, for now ## # Returns the given subelement, by index. # # @param index What subelement to return. # @return The given subelement. # @exception IndexError If the given element does not exist. def __getitem__(self, index): return self._children[index] ## # Replaces the given subelement, by index. # # @param index What subelement to replace. # @param element The new element value. # @exception IndexError If the given element does not exist. def __setitem__(self, index, element): # if isinstance(index, slice): # for elt in element: # assert iselement(elt) # else: # assert iselement(element) self._children[index] = element ## # Deletes the given subelement, by index. # # @param index What subelement to delete. # @exception IndexError If the given element does not exist. def __delitem__(self, index): del self._children[index] ## # Adds a subelement to the end of this element. In document order, # the new element will appear after the last existing subelement (or # directly after the text, if it's the first subelement), but before # the end tag for this element. # # @param element The element to add. def append(self, element): self._assert_is_element(element) self._children.append(element) ## # Appends subelements from a sequence. # # @param elements A sequence object with zero or more elements. # @since 1.3 def extend(self, elements): for element in elements: self._assert_is_element(element) self._children.extend(elements) ## # Inserts a subelement at the given position in this element. # # @param index Where to insert the new subelement. def insert(self, index, element): self._assert_is_element(element) self._children.insert(index, element) def _assert_is_element(self, e): # Need to refer to the actual Python implementation, not the # shadowing C implementation. if not isinstance(e, _Element): raise TypeError('expected an Element, not %s' % type(e).__name__) ## # Removes a matching subelement. Unlike the <b>find</b> methods, # this method compares elements based on identity, not on tag # value or contents. To remove subelements by other means, the # easiest way is often to use a list comprehension to select what # elements to keep, and use slice assignment to update the parent # element. # # @param element What element to remove. # @exception ValueError If a matching element could not be found. def remove(self, element): # assert iselement(element) self._children.remove(element) ## # (Deprecated) Returns all subelements. The elements are returned # in document order. # # @return A list of subelements. # @defreturn list of Element instances def getchildren(self): warnings.warn( "This method will be removed in future versions. " "Use 'list(elem)' or iteration over elem instead.", DeprecationWarning, stacklevel=2 ) return self._children ## # Finds the first matching subelement, by tag name or path. # # @param path What element to look for. # @keyparam namespaces Optional namespace prefix map. # @return The first matching element, or None if no element was found. # @defreturn Element or None def find(self, path, namespaces=None): return ElementPath.find(self, path, namespaces) ## # Finds text for the first matching subelement, by tag name or path. # # @param path What element to look for. # @param default What to return if the element was not found. # @keyparam namespaces Optional namespace prefix map. # @return The text content of the first matching element, or the # default value no element was found. Note that if the element # is found, but has no text content, this method returns an # empty string. # @defreturn string def findtext(self, path, default=None, namespaces=None): return ElementPath.findtext(self, path, default, namespaces) ## # Finds all matching subelements, by tag name or path. # # @param path What element to look for. # @keyparam namespaces Optional namespace prefix map. # @return A list or other sequence containing all matching elements, # in document order. # @defreturn list of Element instances def findall(self, path, namespaces=None): return ElementPath.findall(self, path, namespaces) ## # Finds all matching subelements, by tag name or path. # # @param path What element to look for. # @keyparam namespaces Optional namespace prefix map. # @return An iterator or sequence containing all matching elements, # in document order. # @defreturn a generated sequence of Element instances def iterfind(self, path, namespaces=None): return ElementPath.iterfind(self, path, namespaces) ## # Resets an element. This function removes all subelements, clears # all attributes, and sets the <b>text</b> and <b>tail</b> attributes # to None. def clear(self): self.attrib.clear() self._children = [] self.text = self.tail = None ## # Gets an element attribute. Equivalent to <b>attrib.get</b>, but # some implementations may handle this a bit more efficiently. # # @param key What attribute to look for. # @param default What to return if the attribute was not found. # @return The attribute value, or the default value, if the # attribute was not found. # @defreturn string or None def get(self, key, default=None): return self.attrib.get(key, default) ## # Sets an element attribute. Equivalent to <b>attrib[key] = value</b>, # but some implementations may handle this a bit more efficiently. # # @param key What attribute to set. # @param value The attribute value. def set(self, key, value): self.attrib[key] = value ## # Gets a list of attribute names. The names are returned in an # arbitrary order (just like for an ordinary Python dictionary). # Equivalent to <b>attrib.keys()</b>. # # @return A list of element attribute names. # @defreturn list of strings def keys(self): return self.attrib.keys() ## # Gets element attributes, as a sequence. The attributes are # returned in an arbitrary order. Equivalent to <b>attrib.items()</b>. # # @return A list of (name, value) tuples for all attributes. # @defreturn list of (string, string) tuples def items(self): return self.attrib.items() ## # Creates a tree iterator. The iterator loops over this element # and all subelements, in document order, and returns all elements # with a matching tag. # <p> # If the tree structure is modified during iteration, new or removed # elements may or may not be included. To get a stable set, use the # list() function on the iterator, and loop over the resulting list. # # @param tag What tags to look for (default is to return all elements). # @return An iterator containing all the matching elements. # @defreturn iterator def iter(self, tag=None): if tag == "*": tag = None if tag is None or self.tag == tag: yield self for e in self._children: for e in e.iter(tag): yield e # compatibility def getiterator(self, tag=None): # Change for a DeprecationWarning in 1.4 warnings.warn( "This method will be removed in future versions. " "Use 'elem.iter()' or 'list(elem.iter())' instead.", PendingDeprecationWarning, stacklevel=2 ) return list(self.iter(tag)) ## # Creates a text iterator. The iterator loops over this element # and all subelements, in document order, and returns all inner # text. # # @return An iterator containing all inner text. # @defreturn iterator def itertext(self): tag = self.tag if not isinstance(tag, str) and tag is not None: return if self.text: yield self.text for e in self: for s in e.itertext(): yield s if e.tail: yield e.tail # compatibility _Element = _ElementInterface = Element ## # Subelement factory. This function creates an element instance, and # appends it to an existing element. # <p> # The element name, attribute names, and attribute values can be # either 8-bit ASCII strings or Unicode strings. # # @param parent The parent element. # @param tag The subelement name. # @param attrib An optional dictionary, containing element attributes. # @param **extra Additional attributes, given as keyword arguments. # @return An element instance. # @defreturn Element def SubElement(parent, tag, attrib={}, **extra): attrib = attrib.copy() attrib.update(extra) element = parent.makeelement(tag, attrib) parent.append(element) return element ## # Comment element factory. This factory function creates a special # element that will be serialized as an XML comment by the standard # serializer. # <p> # The comment string can be either an 8-bit ASCII string or a Unicode # string. # # @param text A string containing the comment string. # @return An element instance, representing a comment. # @defreturn Element def Comment(text=None): element = Element(Comment) element.text = text return element ## # PI element factory. This factory function creates a special element # that will be serialized as an XML processing instruction by the standard # serializer. # # @param target A string containing the PI target. # @param text A string containing the PI contents, if any. # @return An element instance, representing a PI. # @defreturn Element def ProcessingInstruction(target, text=None): element = Element(ProcessingInstruction) element.text = target if text: element.text = element.text + " " + text return element PI = ProcessingInstruction ## # QName wrapper. This can be used to wrap a QName attribute value, in # order to get proper namespace handling on output. # # @param text A string containing the QName value, in the form {uri}local, # or, if the tag argument is given, the URI part of a QName. # @param tag Optional tag. If given, the first argument is interpreted as # an URI, and this argument is interpreted as a local name. # @return An opaque object, representing the QName. class QName: def __init__(self, text_or_uri, tag=None): if tag: text_or_uri = "{%s}%s" % (text_or_uri, tag) self.text = text_or_uri def __str__(self): return self.text def __repr__(self): return '<QName %r>' % (self.text,) def __hash__(self): return hash(self.text) def __le__(self, other): if isinstance(other, QName): return self.text <= other.text return self.text <= other def __lt__(self, other): if isinstance(other, QName): return self.text < other.text return self.text < other def __ge__(self, other): if isinstance(other, QName): return self.text >= other.text return self.text >= other def __gt__(self, other): if isinstance(other, QName): return self.text > other.text return self.text > other def __eq__(self, other): if isinstance(other, QName): return self.text == other.text return self.text == other def __ne__(self, other): if isinstance(other, QName): return self.text != other.text return self.text != other # -------------------------------------------------------------------- ## # ElementTree wrapper class. This class represents an entire element # hierarchy, and adds some extra support for serialization to and from # standard XML. # # @param element Optional root element. # @keyparam file Optional file handle or file name. If given, the # tree is initialized with the contents of this XML file. class ElementTree: def __init__(self, element=None, file=None): # assert element is None or iselement(element) self._root = element # first node if file: self.parse(file) ## # Gets the root element for this tree. # # @return An element instance. # @defreturn Element def getroot(self): return self._root ## # Replaces the root element for this tree. This discards the # current contents of the tree, and replaces it with the given # element. Use with care. # # @param element An element instance. def _setroot(self, element): # assert iselement(element) self._root = element ## # Loads an external XML document into this element tree. # # @param source A file name or file object. If a file object is # given, it only has to implement a <b>read(n)</b> method. # @keyparam parser An optional parser instance. If not given, the # standard {@link XMLParser} parser is used. # @return The document root element. # @defreturn Element # @exception ParseError If the parser fails to parse the document. def parse(self, source, parser=None): close_source = False if not hasattr(source, "read"): source = open(source, "rb") close_source = True try: if not parser: parser = XMLParser(target=TreeBuilder()) while 1: data = source.read(65536) if not data: break parser.feed(data) self._root = parser.close() return self._root finally: if close_source: source.close() ## # Creates a tree iterator for the root element. The iterator loops # over all elements in this tree, in document order. # # @param tag What tags to look for (default is to return all elements) # @return An iterator. # @defreturn iterator def iter(self, tag=None): # assert self._root is not None return self._root.iter(tag) # compatibility def getiterator(self, tag=None): # Change for a DeprecationWarning in 1.4 warnings.warn( "This method will be removed in future versions. " "Use 'tree.iter()' or 'list(tree.iter())' instead.", PendingDeprecationWarning, stacklevel=2 ) return list(self.iter(tag)) ## # Same as getroot().find(path), starting at the root of the tree. # # @param path What element to look for. # @keyparam namespaces Optional namespace prefix map. # @return The first matching element, or None if no element was found. # @defreturn Element or None def find(self, path, namespaces=None): # assert self._root is not None if path[:1] == "/": path = "." + path warnings.warn( "This search is broken in 1.3 and earlier, and will be " "fixed in a future version. If you rely on the current " "behaviour, change it to %r" % path, FutureWarning, stacklevel=2 ) return self._root.find(path, namespaces) ## # Same as getroot().findtext(path), starting at the root of the tree. # # @param path What element to look for. # @param default What to return if the element was not found. # @keyparam namespaces Optional namespace prefix map. # @return The text content of the first matching element, or the # default value no element was found. Note that if the element # is found, but has no text content, this method returns an # empty string. # @defreturn string def findtext(self, path, default=None, namespaces=None): # assert self._root is not None if path[:1] == "/": path = "." + path warnings.warn( "This search is broken in 1.3 and earlier, and will be " "fixed in a future version. If you rely on the current " "behaviour, change it to %r" % path, FutureWarning, stacklevel=2 ) return self._root.findtext(path, default, namespaces) ## # Same as getroot().findall(path), starting at the root of the tree. # # @param path What element to look for. # @keyparam namespaces Optional namespace prefix map. # @return A list or iterator containing all matching elements, # in document order. # @defreturn list of Element instances def findall(self, path, namespaces=None): # assert self._root is not None if path[:1] == "/": path = "." + path warnings.warn( "This search is broken in 1.3 and earlier, and will be " "fixed in a future version. If you rely on the current " "behaviour, change it to %r" % path, FutureWarning, stacklevel=2 ) return self._root.findall(path, namespaces) ## # Finds all matching subelements, by tag name or path. # Same as getroot().iterfind(path). # # @param path What element to look for. # @keyparam namespaces Optional namespace prefix map. # @return An iterator or sequence containing all matching elements, # in document order. # @defreturn a generated sequence of Element instances def iterfind(self, path, namespaces=None): # assert self._root is not None if path[:1] == "/": path = "." + path warnings.warn( "This search is broken in 1.3 and earlier, and will be " "fixed in a future version. If you rely on the current " "behaviour, change it to %r" % path, FutureWarning, stacklevel=2 ) return self._root.iterfind(path, namespaces) ## # Writes the element tree to a file, as XML. # # @def write(file, **options) # @param file A file name, or a file object opened for writing. # @param **options Options, given as keyword arguments. # @keyparam encoding Optional output encoding (default is US-ASCII). # Use "unicode" to return a Unicode string. # @keyparam xml_declaration Controls if an XML declaration should # be added to the file. Use False for never, True for always, # None for only if not US-ASCII or UTF-8 or Unicode. None is default. # @keyparam default_namespace Sets the default XML namespace (for "xmlns"). # @keyparam method Optional output method ("xml", "html", "text" or # "c14n"; default is "xml"). def write(self, file_or_filename, encoding=None, xml_declaration=None, default_namespace=None, method=None): if not method: method = "xml" elif method not in _serialize: raise ValueError("unknown method %r" % method) if not encoding: if method == "c14n": encoding = "utf-8" else: encoding = "us-ascii" else: encoding = encoding.lower() with _get_writer(file_or_filename, encoding) as write: if method == "xml" and (xml_declaration or (xml_declaration is None and encoding not in ("utf-8", "us-ascii", "unicode"))): declared_encoding = encoding if encoding == "unicode": # Retrieve the default encoding for the xml declaration import locale declared_encoding = locale.getpreferredencoding() write("<?xml version='1.0' encoding='%s'?>\n" % ( declared_encoding,)) if method == "text": _serialize_text(write, self._root) else: qnames, namespaces = _namespaces(self._root, default_namespace) serialize = _serialize[method] serialize(write, self._root, qnames, namespaces) def write_c14n(self, file): # lxml.etree compatibility. use output method instead return self.write(file, method="c14n") # -------------------------------------------------------------------- # serialization support @contextlib.contextmanager def _get_writer(file_or_filename, encoding): # returns text write method and release all resourses after using try: write = file_or_filename.write except AttributeError: # file_or_filename is a file name if encoding == "unicode": file = open(file_or_filename, "w") else: file = open(file_or_filename, "w", encoding=encoding, errors="xmlcharrefreplace") with file: yield file.write else: # file_or_filename is a file-like object # encoding determines if it is a text or binary writer if encoding == "unicode": # use a text writer as is yield write else: # wrap a binary writer with TextIOWrapper with contextlib.ExitStack() as stack: if isinstance(file_or_filename, io.BufferedIOBase): file = file_or_filename elif isinstance(file_or_filename, io.RawIOBase): file = io.BufferedWriter(file_or_filename) # Keep the original file open when the BufferedWriter is # destroyed stack.callback(file.detach) else: # This is to handle passed objects that aren't in the # IOBase hierarchy, but just have a write method file = io.BufferedIOBase() file.writable = lambda: True file.write = write try: # TextIOWrapper uses this methods to determine # if BOM (for UTF-16, etc) should be added file.seekable = file_or_filename.seekable file.tell = file_or_filename.tell except AttributeError: pass file = io.TextIOWrapper(file, encoding=encoding, errors="xmlcharrefreplace", newline="\n") # Keep the original file open when the TextIOWrapper is # destroyed stack.callback(file.detach) yield file.write def _namespaces(elem, default_namespace=None): # identify namespaces used in this tree # maps qnames to *encoded* prefix:local names qnames = {None: None} # maps uri:s to prefixes namespaces = {} if default_namespace: namespaces[default_namespace] = "" def add_qname(qname): # calculate serialized qname representation try: if qname[:1] == "{": uri, tag = qname[1:].rsplit("}", 1) prefix = namespaces.get(uri) if prefix is None: prefix = _namespace_map.get(uri) if prefix is None: prefix = "ns%d" % len(namespaces) if prefix != "xml": namespaces[uri] = prefix if prefix: qnames[qname] = "%s:%s" % (prefix, tag) else: qnames[qname] = tag # default element else: if default_namespace: # FIXME: can this be handled in XML 1.0? raise ValueError( "cannot use non-qualified names with " "default_namespace option" ) qnames[qname] = qname except TypeError: _raise_serialization_error(qname) # populate qname and namespaces table for elem in elem.iter(): tag = elem.tag if isinstance(tag, QName): if tag.text not in qnames: add_qname(tag.text) elif isinstance(tag, str): if tag not in qnames: add_qname(tag) elif tag is not None and tag is not Comment and tag is not PI: _raise_serialization_error(tag) for key, value in elem.items(): if isinstance(key, QName): key = key.text if key not in qnames: add_qname(key) if isinstance(value, QName) and value.text not in qnames: add_qname(value.text) text = elem.text if isinstance(text, QName) and text.text not in qnames: add_qname(text.text) return qnames, namespaces def _serialize_xml(write, elem, qnames, namespaces): tag = elem.tag text = elem.text if tag is Comment: write("<!--%s-->" % text) elif tag is ProcessingInstruction: write("<?%s?>" % text) else: tag = qnames[tag] if tag is None: if text: write(_escape_cdata(text)) for e in elem: _serialize_xml(write, e, qnames, None) else: write("<" + tag) items = list(elem.items()) if items or namespaces: if namespaces: for v, k in sorted(namespaces.items(), key=lambda x: x[1]): # sort on prefix if k: k = ":" + k write(" xmlns%s=\"%s\"" % ( k, _escape_attrib(v) )) for k, v in sorted(items): # lexical order if isinstance(k, QName): k = k.text if isinstance(v, QName): v = qnames[v.text] else: v = _escape_attrib(v) write(" %s=\"%s\"" % (qnames[k], v)) if text or len(elem): write(">") if text: write(_escape_cdata(text)) for e in elem: _serialize_xml(write, e, qnames, None) write("</" + tag + ">") else: write(" />") if elem.tail: write(_escape_cdata(elem.tail)) HTML_EMPTY = ("area", "base", "basefont", "br", "col", "frame", "hr", "img", "input", "isindex", "link", "meta", "param") try: HTML_EMPTY = set(HTML_EMPTY) except NameError: pass def _serialize_html(write, elem, qnames, namespaces): tag = elem.tag text = elem.text if tag is Comment: write("<!--%s-->" % _escape_cdata(text)) elif tag is ProcessingInstruction: write("<?%s?>" % _escape_cdata(text)) else: tag = qnames[tag] if tag is None: if text: write(_escape_cdata(text)) for e in elem: _serialize_html(write, e, qnames, None) else: write("<" + tag) items = list(elem.items()) if items or namespaces: if namespaces: for v, k in sorted(namespaces.items(), key=lambda x: x[1]): # sort on prefix if k: k = ":" + k write(" xmlns%s=\"%s\"" % ( k, _escape_attrib(v) )) for k, v in sorted(items): # lexical order if isinstance(k, QName): k = k.text if isinstance(v, QName): v = qnames[v.text] else: v = _escape_attrib_html(v) # FIXME: handle boolean attributes write(" %s=\"%s\"" % (qnames[k], v)) write(">") tag = tag.lower() if text: if tag == "script" or tag == "style": write(text) else: write(_escape_cdata(text)) for e in elem: _serialize_html(write, e, qnames, None) if tag not in HTML_EMPTY: write("</" + tag + ">") if elem.tail: write(_escape_cdata(elem.tail)) def _serialize_text(write, elem): for part in elem.itertext(): write(part) if elem.tail: write(elem.tail) _serialize = { "xml": _serialize_xml, "html": _serialize_html, "text": _serialize_text, # this optional method is imported at the end of the module # "c14n": _serialize_c14n, } ## # Registers a namespace prefix. The registry is global, and any # existing mapping for either the given prefix or the namespace URI # will be removed. # # @param prefix Namespace prefix. # @param uri Namespace uri. Tags and attributes in this namespace # will be serialized with the given prefix, if at all possible. # @exception ValueError If the prefix is reserved, or is otherwise # invalid. def register_namespace(prefix, uri): if re.match("ns\d+$", prefix): raise ValueError("Prefix format reserved for internal use") for k, v in list(_namespace_map.items()): if k == uri or v == prefix: del _namespace_map[k] _namespace_map[uri] = prefix _namespace_map = { # "well-known" namespace prefixes "http://www.w3.org/XML/1998/namespace": "xml", "http://www.w3.org/1999/xhtml": "html", "http://www.w3.org/1999/02/22-rdf-syntax-ns#": "rdf", "http://schemas.xmlsoap.org/wsdl/": "wsdl", # xml schema "http://www.w3.org/2001/XMLSchema": "xs", "http://www.w3.org/2001/XMLSchema-instance": "xsi", # dublin core "http://purl.org/dc/elements/1.1/": "dc", } # For tests and troubleshooting register_namespace._namespace_map = _namespace_map def _raise_serialization_error(text): raise TypeError( "cannot serialize %r (type %s)" % (text, type(text).__name__) ) def _escape_cdata(text): # escape character data try: # it's worth avoiding do-nothing calls for strings that are # shorter than 500 character, or so. assume that's, by far, # the most common case in most applications. if "&" in text: text = text.replace("&", "&amp;") if "<" in text: text = text.replace("<", "&lt;") if ">" in text: text = text.replace(">", "&gt;") return text except (TypeError, AttributeError): _raise_serialization_error(text) def _escape_attrib(text): # escape attribute value try: if "&" in text: text = text.replace("&", "&amp;") if "<" in text: text = text.replace("<", "&lt;") if ">" in text: text = text.replace(">", "&gt;") if "\"" in text: text = text.replace("\"", "&quot;") if "\n" in text: text = text.replace("\n", "&#10;") return text except (TypeError, AttributeError): _raise_serialization_error(text) def _escape_attrib_html(text): # escape attribute value try: if "&" in text: text = text.replace("&", "&amp;") if ">" in text: text = text.replace(">", "&gt;") if "\"" in text: text = text.replace("\"", "&quot;") return text except (TypeError, AttributeError): _raise_serialization_error(text) # -------------------------------------------------------------------- ## # Generates a string representation of an XML element, including all # subelements. If encoding is "unicode", the return type is a string; # otherwise it is a bytes array. # # @param element An Element instance. # @keyparam encoding Optional output encoding (default is US-ASCII). # Use "unicode" to return a Unicode string. # @keyparam method Optional output method ("xml", "html", "text" or # "c14n"; default is "xml"). # @return An (optionally) encoded string containing the XML data. # @defreturn string def tostring(element, encoding=None, method=None): stream = io.StringIO() if encoding == 'unicode' else io.BytesIO() ElementTree(element).write(stream, encoding, method=method) return stream.getvalue() ## # Generates a string representation of an XML element, including all # subelements. # # @param element An Element instance. # @keyparam encoding Optional output encoding (default is US-ASCII). # Use "unicode" to return a Unicode string. # @keyparam method Optional output method ("xml", "html", "text" or # "c14n"; default is "xml"). # @return A sequence object containing the XML data. # @defreturn sequence # @since 1.3 class _ListDataStream(io.BufferedIOBase): """ An auxiliary stream accumulating into a list reference """ def __init__(self, lst): self.lst = lst def writable(self): return True def seekable(self): return True def write(self, b): self.lst.append(b) def tell(self): return len(self.lst) def tostringlist(element, encoding=None, method=None): lst = [] stream = _ListDataStream(lst) ElementTree(element).write(stream, encoding, method=method) return lst ## # Writes an element tree or element structure to sys.stdout. This # function should be used for debugging only. # <p> # The exact output format is implementation dependent. In this # version, it's written as an ordinary XML file. # # @param elem An element tree or an individual element. def dump(elem): # debugging if not isinstance(elem, ElementTree): elem = ElementTree(elem) elem.write(sys.stdout, encoding="unicode") tail = elem.getroot().tail if not tail or tail[-1] != "\n": sys.stdout.write("\n") # -------------------------------------------------------------------- # parsing ## # Parses an XML document into an element tree. # # @param source A filename or file object containing XML data. # @param parser An optional parser instance. If not given, the # standard {@link XMLParser} parser is used. # @return An ElementTree instance def parse(source, parser=None): tree = ElementTree() tree.parse(source, parser) return tree ## # Parses an XML document into an element tree incrementally, and reports # what's going on to the user. # # @param source A filename or file object containing XML data. # @param events A list of events to report back. If omitted, only "end" # events are reported. # @param parser An optional parser instance. If not given, the # standard {@link XMLParser} parser is used. # @return A (event, elem) iterator. def iterparse(source, events=None, parser=None): close_source = False if not hasattr(source, "read"): source = open(source, "rb") close_source = True if not parser: parser = XMLParser(target=TreeBuilder()) return _IterParseIterator(source, events, parser, close_source) class _IterParseIterator: def __init__(self, source, events, parser, close_source=False): self._file = source self._close_file = close_source self._events = [] self._index = 0 self._error = None self.root = self._root = None self._parser = parser # wire up the parser for event reporting parser = self._parser._parser append = self._events.append if events is None: events = ["end"] for event in events: if event == "start": try: parser.ordered_attributes = 1 parser.specified_attributes = 1 def handler(tag, attrib_in, event=event, append=append, start=self._parser._start_list): append((event, start(tag, attrib_in))) parser.StartElementHandler = handler except AttributeError: def handler(tag, attrib_in, event=event, append=append, start=self._parser._start): append((event, start(tag, attrib_in))) parser.StartElementHandler = handler elif event == "end": def handler(tag, event=event, append=append, end=self._parser._end): append((event, end(tag))) parser.EndElementHandler = handler elif event == "start-ns": def handler(prefix, uri, event=event, append=append): append((event, (prefix or "", uri or ""))) parser.StartNamespaceDeclHandler = handler elif event == "end-ns": def handler(prefix, event=event, append=append): append((event, None)) parser.EndNamespaceDeclHandler = handler else: raise ValueError("unknown event %r" % event) def __next__(self): while 1: try: item = self._events[self._index] self._index += 1 return item except IndexError: pass if self._error: e = self._error self._error = None raise e if self._parser is None: self.root = self._root if self._close_file: self._file.close() raise StopIteration # load event buffer del self._events[:] self._index = 0 data = self._file.read(16384) if data: try: self._parser.feed(data) except SyntaxError as exc: self._error = exc else: self._root = self._parser.close() self._parser = None def __iter__(self): return self ## # Parses an XML document from a string constant. This function can # be used to embed "XML literals" in Python code. # # @param source A string containing XML data. # @param parser An optional parser instance. If not given, the # standard {@link XMLParser} parser is used. # @return An Element instance. # @defreturn Element def XML(text, parser=None): if not parser: parser = XMLParser(target=TreeBuilder()) parser.feed(text) return parser.close() ## # Parses an XML document from a string constant, and also returns # a dictionary which maps from element id:s to elements. # # @param source A string containing XML data. # @param parser An optional parser instance. If not given, the # standard {@link XMLParser} parser is used. # @return A tuple containing an Element instance and a dictionary. # @defreturn (Element, dictionary) def XMLID(text, parser=None): if not parser: parser = XMLParser(target=TreeBuilder()) parser.feed(text) tree = parser.close() ids = {} for elem in tree.iter(): id = elem.get("id") if id: ids[id] = elem return tree, ids ## # Parses an XML document from a string constant. Same as {@link #XML}. # # @def fromstring(text) # @param source A string containing XML data. # @return An Element instance. # @defreturn Element fromstring = XML ## # Parses an XML document from a sequence of string fragments. # # @param sequence A list or other sequence containing XML data fragments. # @param parser An optional parser instance. If not given, the # standard {@link XMLParser} parser is used. # @return An Element instance. # @defreturn Element # @since 1.3 def fromstringlist(sequence, parser=None): if not parser: parser = XMLParser(target=TreeBuilder()) for text in sequence: parser.feed(text) return parser.close() # -------------------------------------------------------------------- ## # Generic element structure builder. This builder converts a sequence # of {@link #TreeBuilder.start}, {@link #TreeBuilder.data}, and {@link # #TreeBuilder.end} method calls to a well-formed element structure. # <p> # You can use this class to build an element structure using a custom XML # parser, or a parser for some other XML-like format. # # @param element_factory Optional element factory. This factory # is called to create new Element instances, as necessary. class TreeBuilder: def __init__(self, element_factory=None): self._data = [] # data collector self._elem = [] # element stack self._last = None # last element self._tail = None # true if we're after an end tag if element_factory is None: element_factory = Element self._factory = element_factory ## # Flushes the builder buffers, and returns the toplevel document # element. # # @return An Element instance. # @defreturn Element def close(self): assert len(self._elem) == 0, "missing end tags" assert self._last is not None, "missing toplevel element" return self._last def _flush(self): if self._data: if self._last is not None: text = "".join(self._data) if self._tail: assert self._last.tail is None, "internal error (tail)" self._last.tail = text else: assert self._last.text is None, "internal error (text)" self._last.text = text self._data = [] ## # Adds text to the current element. # # @param data A string. This should be either an 8-bit string # containing ASCII text, or a Unicode string. def data(self, data): self._data.append(data) ## # Opens a new element. # # @param tag The element name. # @param attrib A dictionary containing element attributes. # @return The opened element. # @defreturn Element def start(self, tag, attrs): self._flush() self._last = elem = self._factory(tag, attrs) if self._elem: self._elem[-1].append(elem) self._elem.append(elem) self._tail = 0 return elem ## # Closes the current element. # # @param tag The element name. # @return The closed element. # @defreturn Element def end(self, tag): self._flush() self._last = self._elem.pop() assert self._last.tag == tag,\ "end tag mismatch (expected %s, got %s)" % ( self._last.tag, tag) self._tail = 1 return self._last ## # Element structure builder for XML source data, based on the # <b>expat</b> parser. # # @keyparam target Target object. If omitted, the builder uses an # instance of the standard {@link #TreeBuilder} class. # @keyparam html Predefine HTML entities. This flag is not supported # by the current implementation. # @keyparam encoding Optional encoding. If given, the value overrides # the encoding specified in the XML file. # @see #ElementTree # @see #TreeBuilder class XMLParser: def __init__(self, html=0, target=None, encoding=None): try: from xml.parsers import expat except ImportError: try: import pyexpat as expat except ImportError: raise ImportError( "No module named expat; use SimpleXMLTreeBuilder instead" ) parser = expat.ParserCreate(encoding, "}") if target is None: target = TreeBuilder() # underscored names are provided for compatibility only self.parser = self._parser = parser self.target = self._target = target self._error = expat.error self._names = {} # name memo cache # main callbacks parser.DefaultHandlerExpand = self._default if hasattr(target, 'start'): parser.StartElementHandler = self._start if hasattr(target, 'end'): parser.EndElementHandler = self._end if hasattr(target, 'data'): parser.CharacterDataHandler = target.data # miscellaneous callbacks if hasattr(target, 'comment'): parser.CommentHandler = target.comment if hasattr(target, 'pi'): parser.ProcessingInstructionHandler = target.pi # let expat do the buffering, if supported try: parser.buffer_text = 1 except AttributeError: pass # use new-style attribute handling, if supported try: parser.ordered_attributes = 1 parser.specified_attributes = 1 if hasattr(target, 'start'): parser.StartElementHandler = self._start_list except AttributeError: pass self._doctype = None self.entity = {} try: self.version = "Expat %d.%d.%d" % expat.version_info except AttributeError: pass # unknown def _raiseerror(self, value): err = ParseError(value) err.code = value.code err.position = value.lineno, value.offset raise err def _fixname(self, key): # expand qname, and convert name string to ascii, if possible try: name = self._names[key] except KeyError: name = key if "}" in name: name = "{" + name self._names[key] = name return name def _start(self, tag, attrib_in): fixname = self._fixname tag = fixname(tag) attrib = {} for key, value in attrib_in.items(): attrib[fixname(key)] = value return self.target.start(tag, attrib) def _start_list(self, tag, attrib_in): fixname = self._fixname tag = fixname(tag) attrib = {} if attrib_in: for i in range(0, len(attrib_in), 2): attrib[fixname(attrib_in[i])] = attrib_in[i+1] return self.target.start(tag, attrib) def _end(self, tag): return self.target.end(self._fixname(tag)) def _default(self, text): prefix = text[:1] if prefix == "&": # deal with undefined entities try: data_handler = self.target.data except AttributeError: return try: data_handler(self.entity[text[1:-1]]) except KeyError: from xml.parsers import expat err = expat.error( "undefined entity %s: line %d, column %d" % (text, self.parser.ErrorLineNumber, self.parser.ErrorColumnNumber) ) err.code = 11 # XML_ERROR_UNDEFINED_ENTITY err.lineno = self.parser.ErrorLineNumber err.offset = self.parser.ErrorColumnNumber raise err elif prefix == "<" and text[:9] == "<!DOCTYPE": self._doctype = [] # inside a doctype declaration elif self._doctype is not None: # parse doctype contents if prefix == ">": self._doctype = None return text = text.strip() if not text: return self._doctype.append(text) n = len(self._doctype) if n > 2: type = self._doctype[1] if type == "PUBLIC" and n == 4: name, type, pubid, system = self._doctype if pubid: pubid = pubid[1:-1] elif type == "SYSTEM" and n == 3: name, type, system = self._doctype pubid = None else: return if hasattr(self.target, "doctype"): self.target.doctype(name, pubid, system[1:-1]) elif self.doctype != self._XMLParser__doctype: # warn about deprecated call self._XMLParser__doctype(name, pubid, system[1:-1]) self.doctype(name, pubid, system[1:-1]) self._doctype = None ## # (Deprecated) Handles a doctype declaration. # # @param name Doctype name. # @param pubid Public identifier. # @param system System identifier. def doctype(self, name, pubid, system): """This method of XMLParser is deprecated.""" warnings.warn( "This method of XMLParser is deprecated. Define doctype() " "method on the TreeBuilder target.", DeprecationWarning, ) # sentinel, if doctype is redefined in a subclass __doctype = doctype ## # Feeds data to the parser. # # @param data Encoded data. def feed(self, data): try: self.parser.Parse(data, 0) except self._error as v: self._raiseerror(v) ## # Finishes feeding data to the parser. # # @return An element structure. # @defreturn Element def close(self): try: self.parser.Parse("", 1) # end of data except self._error as v: self._raiseerror(v) try: close_handler = self.target.close except AttributeError: pass else: return close_handler() finally: # get rid of circular references del self.parser, self._parser del self.target, self._target # Import the C accelerators try: # Element, SubElement, ParseError, TreeBuilder, XMLParser from _elementtree import * except ImportError: pass else: # Overwrite 'ElementTree.parse' and 'iterparse' to use the C XMLParser class ElementTree(ElementTree): def parse(self, source, parser=None): close_source = False if not hasattr(source, 'read'): source = open(source, 'rb') close_source = True try: if parser is not None: while True: data = source.read(65536) if not data: break parser.feed(data) self._root = parser.close() else: parser = XMLParser() self._root = parser._parse(source) return self._root finally: if close_source: source.close() class iterparse: """Parses an XML section into an element tree incrementally. Reports what’s going on to the user. 'source' is a filename or file object containing XML data. 'events' is a list of events to report back. The supported events are the strings "start", "end", "start-ns" and "end-ns" (the "ns" events are used to get detailed namespace information). If 'events' is omitted, only "end" events are reported. 'parser' is an optional parser instance. If not given, the standard XMLParser parser is used. Returns an iterator providing (event, elem) pairs. """ root = None def __init__(self, file, events=None, parser=None): self._close_file = False if not hasattr(file, 'read'): file = open(file, 'rb') self._close_file = True self._file = file self._events = [] self._index = 0 self._error = None self.root = self._root = None if parser is None: parser = XMLParser(target=TreeBuilder()) self._parser = parser self._parser._setevents(self._events, events) def __next__(self): while True: try: item = self._events[self._index] self._index += 1 return item except IndexError: pass if self._error: e = self._error self._error = None raise e if self._parser is None: self.root = self._root if self._close_file: self._file.close() raise StopIteration # load event buffer del self._events[:] self._index = 0 data = self._file.read(16384) if data: try: self._parser.feed(data) except SyntaxError as exc: self._error = exc else: self._root = self._parser.close() self._parser = None def __iter__(self): return self # compatibility XMLTreeBuilder = XMLParser # workaround circular import. try: from ElementC14N import _serialize_c14n _serialize["c14n"] = _serialize_c14n except ImportError: pass
agpl-3.0
unclev/vk.unclev.ru
library/xmpp/protocol.py
1
54681
## protocol.py ## ## Copyright (C) 2003-2005 Alexey "Snake" Nezhdanov ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2, or (at your option) ## any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. # $Id: protocol.py, v1.64 2014/01/10 alkorgun Exp $ """ Protocol module contains tools that is needed for processing of xmpp-related data structures. """ import time from .simplexml import Node, XML_ls, XMLescape, ustr NS_ACTIVITY = "http://jabber.org/protocol/activity" # XEP-0108 NS_ADDRESS = "http://jabber.org/protocol/address" # XEP-0033 NS_ADMIN = "http://jabber.org/protocol/admin" # XEP-0133 NS_ADMIN_ADD_USER = NS_ADMIN + "#add-user" # XEP-0133 NS_ADMIN_DELETE_USER = NS_ADMIN + "#delete-user" # XEP-0133 NS_ADMIN_DISABLE_USER = NS_ADMIN + "#disable-user" # XEP-0133 NS_ADMIN_REENABLE_USER = NS_ADMIN + "#reenable-user" # XEP-0133 NS_ADMIN_END_USER_SESSION = NS_ADMIN + "#end-user-session" # XEP-0133 NS_ADMIN_GET_USER_PASSWORD = NS_ADMIN + "#get-user-password" # XEP-0133 NS_ADMIN_CHANGE_USER_PASSWORD = NS_ADMIN + "#change-user-password" # XEP-0133 NS_ADMIN_GET_USER_ROSTER = NS_ADMIN + "#get-user-roster" # XEP-0133 NS_ADMIN_GET_USER_LASTLOGIN = NS_ADMIN + "#get-user-lastlogin" # XEP-0133 NS_ADMIN_USER_STATS = NS_ADMIN + "#user-stats" # XEP-0133 NS_ADMIN_EDIT_BLACKLIST = NS_ADMIN + "#edit-blacklist" # XEP-0133 NS_ADMIN_EDIT_WHITELIST = NS_ADMIN + "#edit-whitelist" # XEP-0133 NS_ADMIN_REGISTERED_USERS_NUM = NS_ADMIN + "#get-registered-users-num" # XEP-0133 NS_ADMIN_DISABLED_USERS_NUM = NS_ADMIN + "#get-disabled-users-num" # XEP-0133 NS_ADMIN_ONLINE_USERS_NUM = NS_ADMIN + "#get-online-users-num" # XEP-0133 NS_ADMIN_ACTIVE_USERS_NUM = NS_ADMIN + "#get-active-users-num" # XEP-0133 NS_ADMIN_IDLE_USERS_NUM = NS_ADMIN + "#get-idle-users-num" # XEP-0133 NS_ADMIN_REGISTERED_USERS_LIST = NS_ADMIN + "#get-registered-users-list" # XEP-0133 NS_ADMIN_DISABLED_USERS_LIST = NS_ADMIN + "#get-disabled-users-list" # XEP-0133 NS_ADMIN_ONLINE_USERS_LIST = NS_ADMIN + "#get-online-users-list" # XEP-0133 NS_ADMIN_ACTIVE_USERS_LIST = NS_ADMIN + "#get-active-users-list" # XEP-0133 NS_ADMIN_IDLE_USERS_LIST = NS_ADMIN + "#get-idle-users-list" # XEP-0133 NS_ADMIN_ANNOUNCE = NS_ADMIN + "#announce" # XEP-0133 NS_ADMIN_SET_MOTD = NS_ADMIN + "#set-motd" # XEP-0133 NS_ADMIN_EDIT_MOTD = NS_ADMIN + "#edit-motd" # XEP-0133 NS_ADMIN_DELETE_MOTD = NS_ADMIN + "#delete-motd" # XEP-0133 NS_ADMIN_SET_WELCOME = NS_ADMIN + "#set-welcome" # XEP-0133 NS_ADMIN_DELETE_WELCOME = NS_ADMIN + "#delete-welcome" # XEP-0133 NS_ADMIN_EDIT_ADMIN = NS_ADMIN + "#edit-admin" # XEP-0133 NS_ADMIN_RESTART = NS_ADMIN + "#restart" # XEP-0133 NS_ADMIN_SHUTDOWN = NS_ADMIN + "#shutdown" # XEP-0133 NS_AGENTS = "jabber:iq:agents" # XEP-0094 (historical) NS_AMP = "http://jabber.org/protocol/amp" # XEP-0079 NS_AMP_ERRORS = NS_AMP + "#errors" # XEP-0079 NS_AUTH = "jabber:iq:auth" # XEP-0078 NS_AVATAR = "jabber:iq:avatar" # XEP-0008 (historical) NS_BIND = "urn:ietf:params:xml:ns:xmpp-bind" # RFC 3920 NS_BROWSE = "jabber:iq:browse" # XEP-0011 (historical) NS_BYTESTREAM = "http://jabber.org/protocol/bytestreams" # XEP-0065 NS_CAPS = "http://jabber.org/protocol/caps" # XEP-0115 NS_CAPTCHA = "urn:xmpp:captcha" # XEP-0158 NS_CHATSTATES = "http://jabber.org/protocol/chatstates" # XEP-0085 NS_CLIENT = "jabber:client" # RFC 3921 NS_COMMANDS = "http://jabber.org/protocol/commands" # XEP-0050 NS_COMPONENT_ACCEPT = "jabber:component:accept" # XEP-0114 NS_COMPONENT_1 = "http://jabberd.jabberstudio.org/ns/component/1.0" # Jabberd2 NS_COMPRESS = "http://jabber.org/protocol/compress" # XEP-0138 NS_DATA = "jabber:x:data" # XEP-0004 NS_DATA_LAYOUT = "http://jabber.org/protocol/xdata-layout" # XEP-0141 NS_DATA_VALIDATE = "http://jabber.org/protocol/xdata-validate" # XEP-0122 NS_DELAY = "jabber:x:delay" # XEP-0091 (deprecated) NS_DIALBACK = "jabber:server:dialback" # RFC 3921 NS_DISCO = "http://jabber.org/protocol/disco" # XEP-0030 NS_DISCO_INFO = NS_DISCO + "#info" # XEP-0030 NS_DISCO_ITEMS = NS_DISCO + "#items" # XEP-0030 NS_ENCRYPTED = "jabber:x:encrypted" # XEP-0027 NS_EVENT = "jabber:x:event" # XEP-0022 (deprecated) NS_FEATURE = "http://jabber.org/protocol/feature-neg" # XEP-0020 NS_FILE = "http://jabber.org/protocol/si/profile/file-transfer" # XEP-0096 NS_GATEWAY = "jabber:iq:gateway" # XEP-0100 NS_GEOLOC = "http://jabber.org/protocol/geoloc" # XEP-0080 NS_GROUPCHAT = "gc-1.0" # XEP-0045 NS_HTTP_BIND = "http://jabber.org/protocol/httpbind" # XEP-0124 NS_IBB = "http://jabber.org/protocol/ibb" # XEP-0047 NS_INVISIBLE = "presence-invisible" # Jabberd2 NS_IQ = "iq" # Jabberd2 NS_LAST = "jabber:iq:last" # XEP-0012 NS_MEDIA = "urn:xmpp:media-element" # XEP-0158 NS_MESSAGE = "message" # Jabberd2 NS_MOOD = "http://jabber.org/protocol/mood" # XEP-0107 NS_MUC = "http://jabber.org/protocol/muc" # XEP-0045 NS_MUC_ADMIN = NS_MUC + "#admin" # XEP-0045 NS_MUC_OWNER = NS_MUC + "#owner" # XEP-0045 NS_MUC_UNIQUE = NS_MUC + "#unique" # XEP-0045 NS_MUC_USER = NS_MUC + "#user" # XEP-0045 NS_MUC_REGISTER = NS_MUC + "#register" # XEP-0045 NS_MUC_REQUEST = NS_MUC + "#request" # XEP-0045 NS_MUC_ROOMCONFIG = NS_MUC + "#roomconfig" # XEP-0045 NS_MUC_ROOMINFO = NS_MUC + "#roominfo" # XEP-0045 NS_MUC_ROOMS = NS_MUC + "#rooms" # XEP-0045 NS_MUC_TRAFIC = NS_MUC + "#traffic" # XEP-0045 NS_NICK = "http://jabber.org/protocol/nick" # XEP-0172 NS_OFFLINE = "http://jabber.org/protocol/offline" # XEP-0013 NS_OOB = "jabber:x:oob" # XEP-0066 NS_PHYSLOC = "http://jabber.org/protocol/physloc" # XEP-0112 NS_PRESENCE = "presence" # Jabberd2 NS_PRIVACY = "jabber:iq:privacy" # RFC 3921 NS_PRIVATE = "jabber:iq:private" # XEP-0049 NS_PUBSUB = "http://jabber.org/protocol/pubsub" # XEP-0060 NS_RC = "http://jabber.org/protocol/rc" # XEP-0146 NS_REGISTER = "jabber:iq:register" # XEP-0077 NS_RECEIPTS = "urn:xmpp:receipts" # XEP-0184 NS_ROSTER = "jabber:iq:roster" # RFC 3921 NS_ROSTERX = "http://jabber.org/protocol/rosterx" # XEP-0144 NS_RPC = "jabber:iq:rpc" # XEP-0009 NS_SASL = "urn:ietf:params:xml:ns:xmpp-sasl" # RFC 3920 NS_SEARCH = "jabber:iq:search" # XEP-0055 NS_SERVER = "jabber:server" # RFC 3921 NS_SESSION = "urn:ietf:params:xml:ns:xmpp-session" # RFC 3921 NS_SI = "http://jabber.org/protocol/si" # XEP-0096 NS_SI_PUB = "http://jabber.org/protocol/sipub" # XEP-0137 NS_SIGNED = "jabber:x:signed" # XEP-0027 NS_SOFTWAREINFO = "urn:xmpp:dataforms:softwareinfo" # XEP-0155 NS_STANZAS = "urn:ietf:params:xml:ns:xmpp-stanzas" # RFC 3920 NS_STATS = "http://jabber.org/protocol/stats" # XEP-0039 NS_STREAMS = "http://etherx.jabber.org/streams" # RFC 3920 NS_TIME = "jabber:iq:time" # XEP-0090 (deprecated) NS_TLS = "urn:ietf:params:xml:ns:xmpp-tls" # RFC 3920 NS_URN_ATTENTION = "urn:xmpp:attention:0" # XEP-0224 NS_URN_OOB = "urn:xmpp:bob" # XEP-0158 NS_URN_TIME = "urn:xmpp:time" # XEP-0202 NS_VACATION = "http://jabber.org/protocol/vacation" # XEP-0109 NS_VCARD = "vcard-temp" # XEP-0054 NS_VCARD_UPDATE = "vcard-temp:x:update" # XEP-0153 NS_VERSION = "jabber:iq:version" # XEP-0092 NS_WAITINGLIST = "http://jabber.org/protocol/waitinglist" # XEP-0130 NS_XHTML_IM = "http://jabber.org/protocol/xhtml-im" # XEP-0071 NS_XMPP_STREAMS = "urn:ietf:params:xml:ns:xmpp-streams" # RFC 3920 NS_PING = "urn:xmpp:ping" # XEP-0199 NS_MUC_FILTER = "http://jabber.ru/muc-filter" STREAM_NOT_AUTHORIZED = NS_XMPP_STREAMS + " not-authorized" STREAM_REMOTE_CONNECTION_FAILED = NS_XMPP_STREAMS + " remote-connection-failed" SASL_MECHANISM_TOO_WEAK = NS_SASL + " mechanism-too-weak" STREAM_XML_NOT_WELL_FORMED = NS_XMPP_STREAMS + " xml-not-well-formed" ERR_JID_MALFORMED = NS_STANZAS + " jid-malformed" STREAM_SEE_OTHER_HOST = NS_XMPP_STREAMS + " see-other-host" STREAM_BAD_NAMESPACE_PREFIX = NS_XMPP_STREAMS + " bad-namespace-prefix" ERR_SERVICE_UNAVAILABLE = NS_STANZAS + " service-unavailable" STREAM_CONNECTION_TIMEOUT = NS_XMPP_STREAMS + " connection-timeout" STREAM_UNSUPPORTED_VERSION = NS_XMPP_STREAMS + " unsupported-version" STREAM_IMPROPER_ADDRESSING = NS_XMPP_STREAMS + " improper-addressing" STREAM_UNDEFINED_CONDITION = NS_XMPP_STREAMS + " undefined-condition" SASL_NOT_AUTHORIZED = NS_SASL + " not-authorized" ERR_GONE = NS_STANZAS + " gone" SASL_TEMPORARY_AUTH_FAILURE = NS_SASL + " temporary-auth-failure" ERR_REMOTE_SERVER_NOT_FOUND = NS_STANZAS + " remote-server-not-found" ERR_UNEXPECTED_REQUEST = NS_STANZAS + " unexpected-request" ERR_RECIPIENT_UNAVAILABLE = NS_STANZAS + " recipient-unavailable" ERR_CONFLICT = NS_STANZAS + " conflict" STREAM_SYSTEM_SHUTDOWN = NS_XMPP_STREAMS + " system-shutdown" STREAM_BAD_FORMAT = NS_XMPP_STREAMS + " bad-format" ERR_SUBSCRIPTION_REQUIRED = NS_STANZAS + " subscription-required" STREAM_INTERNAL_SERVER_ERROR = NS_XMPP_STREAMS + " internal-server-error" ERR_NOT_AUTHORIZED = NS_STANZAS + " not-authorized" SASL_ABORTED = NS_SASL + " aborted" ERR_REGISTRATION_REQUIRED = NS_STANZAS + " registration-required" ERR_INTERNAL_SERVER_ERROR = NS_STANZAS + " internal-server-error" SASL_INCORRECT_ENCODING = NS_SASL + " incorrect-encoding" STREAM_HOST_GONE = NS_XMPP_STREAMS + " host-gone" STREAM_POLICY_VIOLATION = NS_XMPP_STREAMS + " policy-violation" STREAM_INVALID_XML = NS_XMPP_STREAMS + " invalid-xml" STREAM_CONFLICT = NS_XMPP_STREAMS + " conflict" STREAM_RESOURCE_CONSTRAINT = NS_XMPP_STREAMS + " resource-constraint" STREAM_UNSUPPORTED_ENCODING = NS_XMPP_STREAMS + " unsupported-encoding" ERR_NOT_ALLOWED = NS_STANZAS + " not-allowed" ERR_ITEM_NOT_FOUND = NS_STANZAS + " item-not-found" ERR_NOT_ACCEPTABLE = NS_STANZAS + " not-acceptable" STREAM_INVALID_FROM = NS_XMPP_STREAMS + " invalid-from" ERR_FEATURE_NOT_IMPLEMENTED = NS_STANZAS + " feature-not-implemented" ERR_BAD_REQUEST = NS_STANZAS + " bad-request" STREAM_INVALID_ID = NS_XMPP_STREAMS + " invalid-id" STREAM_HOST_UNKNOWN = NS_XMPP_STREAMS + " host-unknown" ERR_UNDEFINED_CONDITION = NS_STANZAS + " undefined-condition" SASL_INVALID_MECHANISM = NS_SASL + " invalid-mechanism" STREAM_RESTRICTED_XML = NS_XMPP_STREAMS + " restricted-xml" ERR_RESOURCE_CONSTRAINT = NS_STANZAS + " resource-constraint" ERR_REMOTE_SERVER_TIMEOUT = NS_STANZAS + " remote-server-timeout" SASL_INVALID_AUTHZID = NS_SASL + " invalid-authzid" ERR_PAYMENT_REQUIRED = NS_STANZAS + " payment-required" STREAM_INVALID_NAMESPACE = NS_XMPP_STREAMS + " invalid-namespace" ERR_REDIRECT = NS_STANZAS + " redirect" STREAM_UNSUPPORTED_STANZA_TYPE = NS_XMPP_STREAMS + " unsupported-stanza-type" ERR_FORBIDDEN = NS_STANZAS + " forbidden" ERRORS = { "urn:ietf:params:xml:ns:xmpp-sasl not-authorized": ["", "", "The authentication failed because the initiating entity did not provide valid credentials (this includes but is not limited to the case of an unknown username); sent in reply to a <response/> element or an <auth/> element with initial response data."], "urn:ietf:params:xml:ns:xmpp-stanzas payment-required": ["402", "auth", "The requesting entity is not authorized to access the requested service because payment is required."], "urn:ietf:params:xml:ns:xmpp-sasl mechanism-too-weak": ["", "", "The mechanism requested by the initiating entity is weaker than server policy permits for that initiating entity; sent in reply to a <response/> element or an <auth/> element with initial response data."], "urn:ietf:params:xml:ns:xmpp-streams unsupported-encoding": ["", "", "The initiating entity has encoded the stream in an encoding that is not supported by the server."], "urn:ietf:params:xml:ns:xmpp-stanzas remote-server-timeout": ["504", "wait", "A remote server or service specified as part or all of the JID of the intended recipient could not be contacted within a reasonable amount of time."], "urn:ietf:params:xml:ns:xmpp-streams remote-connection-failed": ["", "", "The server is unable to properly connect to a remote resource that is required for authentication or authorization."], "urn:ietf:params:xml:ns:xmpp-streams restricted-xml": ["", "", "The entity has attempted to send restricted XML features such as a comment, processing instruction, DTD, entity reference, or unescaped character."], "urn:ietf:params:xml:ns:xmpp-streams see-other-host": ["", "", "The server will not provide service to the initiating entity but is redirecting traffic to another host."], "urn:ietf:params:xml:ns:xmpp-streams xml-not-well-formed": ["", "", "The initiating entity has sent XML that is not well-formed."], "urn:ietf:params:xml:ns:xmpp-stanzas subscription-required": ["407", "auth", "The requesting entity is not authorized to access the requested service because a subscription is required."], "urn:ietf:params:xml:ns:xmpp-streams internal-server-error": ["", "", "The server has experienced a misconfiguration or an otherwise-undefined internal error that prevents it from servicing the stream."], "urn:ietf:params:xml:ns:xmpp-sasl invalid-mechanism": ["", "", "The initiating entity did not provide a mechanism or requested a mechanism that is not supported by the receiving entity; sent in reply to an <auth/> element."], "urn:ietf:params:xml:ns:xmpp-streams policy-violation": ["", "", "The entity has violated some local service policy."], "urn:ietf:params:xml:ns:xmpp-stanzas conflict": ["409", "cancel", "Access cannot be granted because an existing resource or session exists with the same name or address."], "urn:ietf:params:xml:ns:xmpp-streams unsupported-stanza-type": ["", "", "The initiating entity has sent a first-level child of the stream that is not supported by the server."], "urn:ietf:params:xml:ns:xmpp-sasl incorrect-encoding": ["", "", "The data provided by the initiating entity could not be processed because the [BASE64]Josefsson, S., The Base16, Base32, and Base64 Data Encodings, July 2003. encoding is incorrect (e.g., because the encoding does not adhere to the definition in Section 3 of [BASE64]Josefsson, S., The Base16, Base32, and Base64 Data Encodings, July 2003.); sent in reply to a <response/> element or an <auth/> element with initial response data."], "urn:ietf:params:xml:ns:xmpp-stanzas registration-required": ["407", "auth", "The requesting entity is not authorized to access the requested service because registration is required."], "urn:ietf:params:xml:ns:xmpp-streams invalid-id": ["", "", "The stream ID or dialback ID is invalid or does not match an ID previously provided."], "urn:ietf:params:xml:ns:xmpp-sasl invalid-authzid": ["", "", "The authzid provided by the initiating entity is invalid, either because it is incorrectly formatted or because the initiating entity does not have permissions to authorize that ID; sent in reply to a <response/> element or an <auth/> element with initial response data."], "urn:ietf:params:xml:ns:xmpp-stanzas bad-request": ["400", "modify", "The sender has sent XML that is malformed or that cannot be processed."], "urn:ietf:params:xml:ns:xmpp-streams not-authorized": ["", "", "The entity has attempted to send data before the stream has been authenticated, or otherwise is not authorized to perform an action related to stream negotiation."], "urn:ietf:params:xml:ns:xmpp-stanzas forbidden": ["403", "auth", "The requesting entity does not possess the required permissions to perform the action."], "urn:ietf:params:xml:ns:xmpp-sasl temporary-auth-failure": ["", "", "The authentication failed because of a temporary error condition within the receiving entity; sent in reply to an <auth/> element or <response/> element."], "urn:ietf:params:xml:ns:xmpp-streams invalid-namespace": ["", "", "The streams namespace name is something other than \http://etherx.jabber.org/streams\" or the dialback namespace name is something other than \"jabber:server:dialback\"."], "urn:ietf:params:xml:ns:xmpp-stanzas feature-not-implemented": ["501", "cancel", "The feature requested is not implemented by the recipient or server and therefore cannot be processed."], "urn:ietf:params:xml:ns:xmpp-streams invalid-xml": ["", "", "The entity has sent invalid XML over the stream to a server that performs validation."], "urn:ietf:params:xml:ns:xmpp-stanzas item-not-found": ["404", "cancel", "The addressed JID or item requested cannot be found."], "urn:ietf:params:xml:ns:xmpp-streams host-gone": ["", "", "The value of the \"to\" attribute provided by the initiating entity in the stream header corresponds to a hostname that is no longer hosted by the server."], "urn:ietf:params:xml:ns:xmpp-stanzas recipient-unavailable": ["404", "wait", "The intended recipient is temporarily unavailable."], "urn:ietf:params:xml:ns:xmpp-stanzas not-acceptable": ["406", "cancel", "The recipient or server understands the request but is refusing to process it because it does not meet criteria defined by the recipient or server."], "urn:ietf:params:xml:ns:xmpp-streams invalid-from": ["cancel", "", "The JID or hostname provided in a \"from\" address does not match an authorized JID or validated domain negotiated between servers via SASL or dialback, or between a client and a server via authentication and resource authorization."], "urn:ietf:params:xml:ns:xmpp-streams bad-format": ["", "", "The entity has sent XML that cannot be processed."], "urn:ietf:params:xml:ns:xmpp-streams resource-constraint": ["", "", "The server lacks the system resources necessary to service the stream."], "urn:ietf:params:xml:ns:xmpp-stanzas undefined-condition": ["500", "", "The condition is undefined."], "urn:ietf:params:xml:ns:xmpp-stanzas redirect": ["302", "modify", "The recipient or server is redirecting requests for this information to another entity."], "urn:ietf:params:xml:ns:xmpp-streams bad-namespace-prefix": ["", "", "The entity has sent a namespace prefix that is unsupported, or has sent no namespace prefix on an element that requires such a prefix."], "urn:ietf:params:xml:ns:xmpp-streams system-shutdown": ["", "", "The server is being shut down and all active streams are being closed."], "urn:ietf:params:xml:ns:xmpp-streams conflict": ["", "", "The server is closing the active stream for this entity because a new stream has been initiated that conflicts with the existing stream."], "urn:ietf:params:xml:ns:xmpp-streams connection-timeout": ["", "", "The entity has not generated any traffic over the stream for some period of time."], "urn:ietf:params:xml:ns:xmpp-stanzas jid-malformed": ["400", "modify", "The value of the \"to\" attribute in the sender's stanza does not adhere to the syntax defined in Addressing Scheme."], "urn:ietf:params:xml:ns:xmpp-stanzas resource-constraint": ["500", "wait", "The server or recipient lacks the system resources necessary to service the request."], "urn:ietf:params:xml:ns:xmpp-stanzas remote-server-not-found": ["404", "cancel", "A remote server or service specified as part or all of the JID of the intended recipient does not exist."], "urn:ietf:params:xml:ns:xmpp-streams unsupported-version": ["", "", "The value of the \"version\" attribute provided by the initiating entity in the stream header specifies a version of XMPP that is not supported by the server."], "urn:ietf:params:xml:ns:xmpp-streams host-unknown": ["", "", "The value of the \"to\" attribute provided by the initiating entity in the stream header does not correspond to a hostname that is hosted by the server."], "urn:ietf:params:xml:ns:xmpp-stanzas unexpected-request": ["400", "wait", "The recipient or server understood the request but was not expecting it at this time (e.g., the request was out of order)."], "urn:ietf:params:xml:ns:xmpp-streams improper-addressing": ["", "", "A stanza sent between two servers lacks a \"to\" or \"from\" attribute (or the attribute has no value)."], "urn:ietf:params:xml:ns:xmpp-stanzas not-allowed": ["405", "cancel", "The recipient or server does not allow any entity to perform the action."], "urn:ietf:params:xml:ns:xmpp-stanzas internal-server-error": ["500", "wait", "The server could not process the stanza because of a misconfiguration or an otherwise-undefined internal server error."], "urn:ietf:params:xml:ns:xmpp-stanzas gone": ["302", "modify", "The recipient or server can no longer be contacted at this address."], "urn:ietf:params:xml:ns:xmpp-streams undefined-condition": ["", "", "The error condition is not one of those defined by the other conditions in this list."], "urn:ietf:params:xml:ns:xmpp-stanzas service-unavailable": ["503", "cancel", "The server or recipient does not currently provide the requested service."], "urn:ietf:params:xml:ns:xmpp-stanzas not-authorized": ["401", "auth", "The sender must provide proper credentials before being allowed to perform the action, or has provided improper credentials."], "urn:ietf:params:xml:ns:xmpp-sasl aborted": ["", "", "The receiving entity acknowledges an <abort/> element sent by the initiating entity; sent in reply to the <abort/> element."] } _errorcodes = { "302": "redirect", "400": "unexpected-request", "401": "not-authorized", "402": "payment-required", "403": "forbidden", "404": "remote-server-not-found", "405": "not-allowed", "406": "not-acceptable", "407": "subscription-required", "409": "conflict", "500": "undefined-condition", "501": "feature-not-implemented", "503": "service-unavailable", "504": "remote-server-timeout" } def isResultNode(node): """ Returns true if the node is a positive reply. """ return (node and node.getType() == "result") def isGetNode(node): """ Returns true if the node is a positive reply. """ return (node and node.getType() == "get") def isSetNode(node): """ Returns true if the node is a positive reply. """ return (node and node.getType() == "set") def isErrorNode(node): """ Returns true if the node is a negative reply. """ return (node and node.getType() == "error") class NodeProcessed(Exception): """ Exception that should be raised by handler when the handling should be stopped. """ class StreamError(Exception): """ Base exception class for stream errors. """ class BadFormat(StreamError): pass class BadNamespacePrefix(StreamError): pass class Conflict(StreamError): pass class ConnectionTimeout(StreamError): pass class HostGone(StreamError): pass class HostUnknown(StreamError): pass class ImproperAddressing(StreamError): pass class InternalServerError(StreamError): pass class InvalidFrom(StreamError): pass class InvalidID(StreamError): pass class InvalidNamespace(StreamError): pass class InvalidXML(StreamError): pass class NotAuthorized(StreamError): pass class PolicyViolation(StreamError): pass class RemoteConnectionFailed(StreamError): pass class ResourceConstraint(StreamError): pass class RestrictedXML(StreamError): pass class SeeOtherHost(StreamError): pass class SystemShutdown(StreamError): pass class UndefinedCondition(StreamError): pass class UnsupportedEncoding(StreamError): pass class UnsupportedStanzaType(StreamError): pass class UnsupportedVersion(StreamError): pass class XMLNotWellFormed(StreamError): pass stream_exceptions = { "bad-format": BadFormat, "bad-namespace-prefix": BadNamespacePrefix, "conflict": Conflict, "connection-timeout": ConnectionTimeout, "host-gone": HostGone, "host-unknown": HostUnknown, "improper-addressing": ImproperAddressing, "internal-server-error": InternalServerError, "invalid-from": InvalidFrom, "invalid-id": InvalidID, "invalid-namespace": InvalidNamespace, "invalid-xml": InvalidXML, "not-authorized": NotAuthorized, "policy-violation": PolicyViolation, "remote-connection-failed": RemoteConnectionFailed, "resource-constraint": ResourceConstraint, "restricted-xml": RestrictedXML, "see-other-host": SeeOtherHost, "system-shutdown": SystemShutdown, "undefined-condition": UndefinedCondition, "unsupported-encoding": UnsupportedEncoding, "unsupported-stanza-type": UnsupportedStanzaType, "unsupported-version": UnsupportedVersion, "xml-not-well-formed": XMLNotWellFormed } class JID: """ JID object. JID can be built from string, modified, compared, serialized into string. """ def __init__(self, jid=None, node="", domain="", resource=""): """ Constructor. JID can be specified as string (jid argument) or as separate parts. Examples: JID("node@domain/resource") JID(node="node", domain="domain.org") """ if not jid and not domain: raise ValueError("JID must contain at least domain name") elif isinstance(jid, self.__class__): self.node, self.domain, self.resource = jid.node, jid.domain, jid.resource elif domain: self.node, self.domain, self.resource = node, domain, resource else: if jid.find("@") + 1: self.node, jid = jid.split("@", 1) else: self.node = "" if jid.find("/") + 1: self.domain, self.resource = jid.split("/", 1) else: self.domain, self.resource = jid, "" def getNode(self): """ Return the node part of the JID. """ return self.node def setNode(self, node): """ Set the node part of the JID to new value. Specify None to remove the node part. """ self.node = node.lower() def getDomain(self): """ Return the domain part of the JID. """ return self.domain def setDomain(self, domain): """ Set the domain part of the JID to new value. """ self.domain = domain.lower() def getResource(self): """ Return the resource part of the JID. """ return self.resource def setResource(self, resource): """ Set the resource part of the JID to new value. Specify None to remove the resource part. """ self.resource = resource def getStripped(self): """ Return the bare representation of JID. I.e. string value w/o resource. """ return self.__str__(0) def __eq__(self, other): """ Compare the JID to another instance or to string for equality. """ try: other = JID(other) except ValueError: return False return self.resource == other.resource and self.__str__(0) == other.__str__(0) def __ne__(self, other): """ Compare the JID to another instance or to string for non-equality. """ return not self.__eq__(other) def bareMatch(self, other): """ Compare the node and domain parts of the JID's for equality. """ return self.__str__(0) == JID(other).__str__(0) def __str__(self, wresource=1): """ Serialize JID into string. """ jid = "@".join((self.node, self.domain)) if self.node else self.domain if wresource and self.resource: jid = "/".join((jid, self.resource)) return jid def __hash__(self): """ Produce hash of the JID, Allows to use JID objects as keys of the dictionary. """ return hash(self.__str__()) class Protocol(Node): """ A "stanza" object class. Contains methods that are common for presences, iqs and messages. """ def __init__(self, name=None, to=None, typ=None, frm=None, attrs={}, payload=[], timestamp=None, xmlns=None, node=None): """ Constructor, name is the name of the stanza i.e. "message" or "presence" or "iq". to is the value of "to" attribure, "typ" - "type" attribute frn - from attribure, attrs - other attributes mapping, payload - same meaning as for simplexml payload definition timestamp - the time value that needs to be stamped over stanza xmlns - namespace of top stanza node node - parsed or unparsed stana to be taken as prototype. """ if not attrs: attrs = {} if to: attrs["to"] = to if frm: attrs["from"] = frm if typ: attrs["type"] = typ Node.__init__(self, tag=name, attrs=attrs, payload=payload, node=node) if not node and xmlns: self.setNamespace(xmlns) if self["to"]: self.setTo(self["to"]) if self["from"]: self.setFrom(self["from"]) if node and isinstance(node, self.__class__) and self.__class__ == node.__class__ and "id" in self.attrs: del self.attrs["id"] self.timestamp = None for x in self.getTags("x", namespace=NS_DELAY): try: if not self.getTimestamp() or x.getAttr("stamp") < self.getTimestamp(): self.setTimestamp(x.getAttr("stamp")) except Exception: pass if timestamp is not None: self.setTimestamp(timestamp) # To auto-timestamp stanza just pass timestamp="" def getTo(self): """ Return value of the "to" attribute. """ try: to = self["to"] except Exception: to = None return to def getFrom(self): """ Return value of the "from" attribute. """ try: frm = self["from"] except Exception: frm = None return frm def getTimestamp(self): """ Return the timestamp in the "yyyymmddThhmmss" format. """ return self.timestamp def getID(self): """ Return the value of the "id" attribute. """ return self.getAttr("id") def setTo(self, val): """ Set the value of the "to" attribute. """ self.setAttr("to", JID(val)) def getType(self): """ Return the value of the "type" attribute. """ return self.getAttr("type") def setFrom(self, val): """ Set the value of the "from" attribute. """ self.setAttr("from", JID(val)) def setType(self, val): """ Set the value of the "type" attribute. """ self.setAttr("type", val) def setID(self, val): """ Set the value of the "id" attribute. """ self.setAttr("id", val) def getError(self): """ Return the error-condition (if present) or the textual description of the error (otherwise). """ errtag = self.getTag("error") if errtag: for tag in errtag.getChildren(): if tag.getName() != "text": return tag.getName() return errtag.getData() def getErrorCode(self): """ Return the error code. Obsolette. """ return self.getTagAttr("error", "code") def setError(self, error, code=None): """ Set the error code. Obsolette. Use error-conditions instead. """ if code: if str(code) in _errorcodes.keys(): error = ErrorNode(_errorcodes[str(code)], text=error) else: error = ErrorNode(ERR_UNDEFINED_CONDITION, code=code, typ="cancel", text=error) elif isinstance(error, basestring): error = ErrorNode(error) self.setType("error") self.addChild(node=error) def setTimestamp(self, val=None): """ Set the timestamp. timestamp should be the yyyymmddThhmmss string. """ if not val: val = time.strftime("%Y%m%dT%H:%M:%S", time.gmtime()) self.timestamp = val self.setTag("x", {"stamp": self.timestamp}, namespace=NS_DELAY) def getProperties(self): """ Return the list of namespaces to which belongs the direct childs of element. """ props = [] for child in self.getChildren(): prop = child.getNamespace() if prop not in props: props.append(prop) return props def __setitem__(self, item, val): """ Set the item "item" to the value "val". """ if item in ["to", "from"]: val = JID(val) return self.setAttr(item, val) class Message(Protocol): """ XMPP Message stanza - "push" mechanism. """ def __init__(self, to=None, body=None, typ=None, subject=None, attrs={}, frm=None, payload=[], timestamp=None, xmlns=NS_CLIENT, node=None): """ Create message object. You can specify recipient, text of message, type of message any additional attributes, sender of the message, any additional payload (f.e. jabber:x:delay element) and namespace in one go. Alternatively you can pass in the other XML object as the "node" parameted to replicate it as message. """ Protocol.__init__(self, "message", to=to, typ=typ, attrs=attrs, frm=frm, payload=payload, timestamp=timestamp, xmlns=xmlns, node=node) if body: self.setBody(body) if subject: self.setSubject(subject) def getBody(self): """ Returns text of the message. """ return self.getTagData("body") def getSubject(self): """ Returns subject of the message. """ return self.getTagData("subject") def getThread(self): """ Returns thread of the message. """ return self.getTagData("thread") def setBody(self, val): """ Sets the text of the message. """ self.setTagData("body", val) def setSubject(self, val): """ Sets the subject of the message. """ self.setTagData("subject", val) def setThread(self, val): """ Sets the thread of the message. """ self.setTagData("thread", val) def buildReply(self, text=None): """ Builds and returns another message object with specified text. The to, from type and thread properties of new message are pre-set as reply to this message. """ msg = Message(to=self.getFrom(), frm=self.getTo(), body=text) thr = self.getThread() if thr: msg.setThread(thr) return msg class Presence(Protocol): """ XMPP Presence object. """ def __init__(self, to=None, typ=None, priority=None, show=None, status=None, attrs={}, frm=None, timestamp=None, payload=[], xmlns=NS_CLIENT, node=None): """ Create presence object. You can specify recipient, type of message, priority, show and status values any additional attributes, sender of the presence, timestamp, any additional payload (f.e. jabber:x:delay element) and namespace in one go. Alternatively you can pass in the other XML object as the "node" parameted to replicate it as presence. """ Protocol.__init__(self, "presence", to=to, typ=typ, attrs=attrs, frm=frm, payload=payload, timestamp=timestamp, xmlns=xmlns, node=node) if priority: self.setPriority(priority) if show: self.setShow(show) if status: self.setStatus(status) def getPriority(self): """ Returns the priority of the message. """ return self.getTagData("priority") def getShow(self): """ Returns the show value of the message. """ return self.getTagData("show") def getStatus(self): """ Returns the status string of the message. """ return self.getTagData("status") def setPriority(self, val): """ Sets the priority of the message. """ self.setTagData("priority", val) def setShow(self, val): """ Sets the show value of the message. """ self.setTagData("show", val) def setStatus(self, val): """ Sets the status string of the message. """ self.setTagData("status", val) def _muc_getItemAttr(self, tag, attr): for xtag in self.getTags("x", namespace=NS_MUC_USER): for child in xtag.getTags(tag): return child.getAttr(attr) def _muc_getSubTagDataAttr(self, tag, attr): for xtag in self.getTags("x", namespace=NS_MUC_USER): for child in xtag.getTags("item"): for cchild in child.getTags(tag): return cchild.getData(), cchild.getAttr(attr) return None, None def getRole(self): """ Returns the presence role (for groupchat). """ return self._muc_getItemAttr("item", "role") def getAffiliation(self): """Returns the presence affiliation (for groupchat). """ return self._muc_getItemAttr("item", "affiliation") def getNick(self): """ Returns the nick value (for nick change in groupchat). """ return self._muc_getItemAttr("item", "nick") def getJid(self): """ Returns the presence jid (for groupchat). """ return self._muc_getItemAttr("item", "jid") def getReason(self): """ Returns the reason of the presence (for groupchat). """ return self._muc_getSubTagDataAttr("reason", "")[0] def getActor(self): """ Returns the reason of the presence (for groupchat). """ return self._muc_getSubTagDataAttr("actor", "jid")[1] def getStatusCode(self): """ Returns the status code of the presence (for groupchat). """ return self._muc_getItemAttr("status", "code") class Iq(Protocol): """ XMPP Iq object - get/set dialog mechanism. """ def __init__(self, typ=None, queryNS=None, attrs={}, to=None, frm=None, payload=[], xmlns=NS_CLIENT, node=None): """ Create Iq object. You can specify type, query namespace any additional attributes, recipient of the iq, sender of the iq, any additional payload (f.e. jabber:x:data node) and namespace in one go. Alternatively you can pass in the other XML object as the "node" parameted to replicate it as an iq. """ Protocol.__init__(self, "iq", to=to, typ=typ, attrs=attrs, frm=frm, xmlns=xmlns, node=node) if payload: self.setQueryPayload(payload) if queryNS: self.setQueryNS(queryNS) def getQuery(self): """ Returns the query node. """ return self.getTag("query") def getQueryNS(self): """ Returns the namespace of the "query" child element. """ tag = self.getTag("query") if tag: return tag.getNamespace() def getQuerynode(self): """ Returns the "node" attribute value of the "query" child element. """ return self.getTagAttr("query", "node") def getQueryPayload(self): """ Returns the "query" child element payload. """ tag = self.getTag("query") if tag: return tag.getPayload() def getQueryChildren(self): """ Returns the "query" child element child nodes. """ tag = self.getTag("query") if tag: return tag.getChildren() def setQuery(self, name=None): """ Changes the name of the query node, creates it if needed. Keep the existing name if none is given (use "query" if it's a creation). Returns the query node. """ query = self.getQuery() if query is None: query = self.addChild("query") if name is not None: query.setName(name) return query def setQueryNS(self, namespace): """ Set the namespace of the "query" child element. """ self.setTag("query").setNamespace(namespace) def setQueryPayload(self, payload): """ Set the "query" child element payload. """ self.setTag("query").setPayload(payload) def setQuerynode(self, node): """ Set the "node" attribute value of the "query" child element. """ self.setTagAttr("query", "node", node) def buildReply(self, typ): """ Builds and returns another Iq object of specified type. The to, from and query child node of new Iq are pre-set as reply to this Iq. """ iq = Iq(typ, to=self.getFrom(), frm=self.getTo(), attrs={"id": self.getID()}) if self.getTag("query"): iq.setQueryNS(self.getQueryNS()) if self.getTagAttr("query", "node"): iq.setQuerynode(self.getQuerynode()) return iq class ErrorNode(Node): """ XMPP-style error element. In the case of stanza error should be attached to XMPP stanza. In the case of stream-level errors should be used separately. """ def __init__(self, name, code=None, typ=None, text=None): """ Create new error node object. Mandatory parameter: name - name of error condition. Optional parameters: code, typ, text. Used for backwards compartibility with older jabber protocol. """ if name in ERRORS: cod, type, txt = ERRORS[name] ns = name.split()[0] else: cod, ns, type, txt = "500", NS_STANZAS, "cancel", "" if typ: type = typ if code: cod = code if text: txt = text Node.__init__(self, "error", {}, [Node(name)]) if type: self.setAttr("type", type) if not cod: self.setName("stream:error") if txt: self.addChild(node=Node(ns + " text", {}, [txt])) if cod: self.setAttr("code", cod) class Error(Protocol): """ Used to quickly transform received stanza into error reply. """ def __init__(self, node, error, reply=1): """ Create error reply basing on the received "node" stanza and the "error" error condition. If the "node" is not the received stanza but locally created ("to" and "from" fields needs not swapping) specify the "reply" argument as false. """ if reply: Protocol.__init__(self, to=node.getFrom(), frm=node.getTo(), node=node) else: Protocol.__init__(self, node=node) self.setError(error) if node.getType() == "error": self.__str__ = self.__dupstr__ def __dupstr__(self, dup1=None, dup2=None): """ Dummy function used as preventor of creating error node in reply to error node. I.e. you will not be able to serialize "double" error into string. """ return "" class DataField(Node): """ This class is used in the DataForm class to describe the single data item. If you are working with jabber:x:data (XEP-0004, XEP-0068, XEP-0122) then you will need to work with instances of this class. """ def __init__(self, name=None, value=None, typ=None, required=0, label=None, desc=None, options=[], node=None): """ Create new data field of specified name,value and type. Also "required", "desc" and "options" fields can be set. Alternatively other XML object can be passed in as the "node" parameted to replicate it as a new datafiled. """ Node.__init__(self, "field", node=node) if name: self.setVar(name) if isinstance(value, (list, tuple)): self.setValues(value) elif value: self.setValue(value) if typ: self.setType(typ) # elif not typ and not node: # self.setType("text-single") if required: self.setRequired(required) if label: self.setLabel(label) if desc: self.setDesc(desc) if options: self.setOptions(options) def setRequired(self, req=1): """ Change the state of the "required" flag. """ if req: self.setTag("required") else: try: self.delChild("required") except ValueError: return None def isRequired(self): """ Returns in this field a required one. """ return self.getTag("required") def setLabel(self, label): """ Set the label of this field. """ self.setAttr("label", label) def getLabel(self): """ Return the label of this field. """ return self.getAttr("label") def setDesc(self, desc): """ Set the description of this field. """ self.setTagData("desc", desc) def getDesc(self): """ Return the description of this field. """ return self.getTagData("desc") def setValue(self, val): """ Set the value of this field. """ self.setTagData("value", val) def getValue(self): return self.getTagData("value") def setValues(self, ls): """ Set the values of this field as values-list. Replaces all previous filed values! If you need to just add a value - use addValue method. """ while self.getTag("value"): self.delChild("value") for val in ls: self.addValue(val) def addValue(self, val): """ Add one more value to this field. Used in "get" iq's or such. """ self.addChild("value", {}, [val]) def getValues(self): """ Return the list of values associated with this field. """ ret = [] for tag in self.getTags("value"): ret.append(tag.getData()) return ret def getOptions(self): """ Return label-option pairs list associated with this field. """ ret = [] for tag in self.getTags("option"): ret.append([tag.getAttr("label"), tag.getTagData("value")]) return ret def setOptions(self, ls): """ Set label-option pairs list associated with this field. """ while self.getTag("option"): self.delChild("option") for opt in ls: self.addOption(opt) def addOption(self, opt): """ Add one more label-option pair to this field. """ if isinstance(opt, basestring): self.addChild("option").setTagData("value", opt) else: self.addChild("option", {"label": opt[0]}).setTagData("value", opt[1]) def getType(self): """ Get type of this field. """ return self.getAttr("type") def setType(self, val): """ Set type of this field. """ return self.setAttr("type", val) def getVar(self): """ Get "var" attribute value of this field. """ return self.getAttr("var") def setVar(self, val): """ Set "var" attribute value of this field. """ return self.setAttr("var", val) class DataReported(Node): """ This class is used in the DataForm class to describe the "reported data field" data items which are used in "multiple item form results" (as described in XEP-0004). Represents the fields that will be returned from a search. This information is useful when you try to use the jabber:iq:search namespace to return dynamic form information. """ def __init__(self, node=None): """ Create new empty "reported data" field. However, note that, according XEP-0004: * It MUST contain one or more DataFields. * Contained DataFields SHOULD possess a "type" and "label" attribute in addition to "var" attribute * Contained DataFields SHOULD NOT contain a <value/> element. Alternatively other XML object can be passed in as the "node" parameted to replicate it as a new dataitem. """ Node.__init__(self, "reported", node=node) if node: newkids = [] for n in self.getChildren(): if n.getName() == "field": newkids.append(DataField(node=n)) else: newkids.append(n) self.kids = newkids def getField(self, name): """ Return the datafield object with name "name" (if exists). """ return self.getTag("field", attrs={"var": name}) def setField(self, name, typ=None, label=None, desc=None, options=[]): """ Create if nessessary or get the existing datafield object with name "name" and return it. If created, attributes "type" and "label" are applied to new datafield. """ field = self.getField(name) if not field: field = self.addChild(node=DataField(name, None, typ, 0, label, desc=desc, options=options)) return field def asDict(self): """ Represent dataitem as simple dictionary mapping of datafield names to their values. """ ret = {} for field in self.getTags("field"): name = field.getAttr("var") typ = field.getType() if isinstance(typ, basestring) and typ.endswith("-multi"): val = [] for i in field.getTags("value"): val.append(i.getData()) else: val = field.getTagData("value") ret[name] = val if self.getTag("instructions"): ret["instructions"] = self.getInstructions() return ret def __getitem__(self, name): """ Simple dictionary interface for getting datafields values by their names. """ item = self.getField(name) if item: return item.getValue() raise IndexError("No such field") def __setitem__(self, name, val): """ Simple dictionary interface for setting datafields values by their names. """ return self.setField(name).setValue(val) class DataItem(Node): """ This class is used in the DataForm class to describe data items which are used in "multiple item form results" (as described in XEP-0004). """ def __init__(self, node=None): """ Create new empty data item. However, note that, according XEP-0004, DataItem MUST contain ALL DataFields described in DataReported. Alternatively other XML object can be passed in as the "node" parameted to replicate it as a new dataitem. """ Node.__init__(self, "item", node=node) if node: newkids = [] for n in self.getChildren(): if n.getName() == "field": newkids.append(DataField(node=n)) else: newkids.append(n) self.kids = newkids def getField(self, name): """ Return the datafield object with name "name" (if exists). """ return self.getTag("field", attrs={"var": name}) def setField(self, name, value=None, typ=None, desc=None, options=[]): """ Create if nessessary or get the existing datafield object with name "name" and return it. """ field = self.getField(name) if not field: field = self.addChild(node=DataField(name, value, typ, desc=desc, options=options)) return field def asDict(self): """ Represent dataitem as simple dictionary mapping of datafield names to their values. """ ret = {} for field in self.getTags("field"): name = field.getAttr("var") typ = field.getType() if isinstance(typ, basestring) and typ.endswith("-multi"): val = [] for i in field.getTags("value"): val.append(i.getData()) else: val = field.getTagData("value") ret[name] = val if self.getTag("instructions"): ret["instructions"] = self.getInstructions() return ret def __getitem__(self, name): """ Simple dictionary interface for getting datafields values by their names. """ item = self.getField(name) if item: return item.getValue() raise IndexError("No such field") def __setitem__(self, name, val): """ Simple dictionary interface for setting datafields values by their names. """ return self.setField(name).setValue(val) class DataForm(Node): """ DataForm class. Used for manipulating dataforms in XMPP. Relevant XEPs: 0004, 0068, 0122. Can be used in disco, pub-sub and many other applications. """ def __init__(self, typ=None, data=[], title=None, node=None): """ Create new dataform of type "typ"; "data" is the list of DataReported, DataItem and DataField instances that this dataform contains; "title" is the title string. You can specify the "node" argument as the other node to be used as base for constructing this dataform. Title and instructions is optional and SHOULD NOT contain newlines. Several instructions MAY be present. "typ" can be one of ("form" | "submit" | "cancel" | "result" ) "typ" of reply iq can be ( "result" | "set" | "set" | "result" ) respectively. "cancel" form can not contain any fields. All other forms contains AT LEAST one field. "title" MAY be included in forms of type "form" and "result". """ Node.__init__(self, "x", node=node) if node: newkids = [] for n in self.getChildren(): if n.getName() == "field": newkids.append(DataField(node=n)) elif n.getName() == "item": newkids.append(DataItem(node=n)) elif n.getName() == "reported": newkids.append(DataReported(node=n)) else: newkids.append(n) self.kids = newkids if typ: self.setType(typ) self.setNamespace(NS_DATA) if title: self.setTitle(title) if isinstance(data, dict): newdata = [] for name in data.keys(): newdata.append(DataField(name, data[name])) data = newdata for child in data: if isinstance(child, basestring): self.addInstructions(child) elif isinstance(child, DataField): self.kids.append(child) elif isinstance(child, DataItem): self.kids.append(child) elif isinstance(child, DataReported): self.kids.append(child) else: self.kids.append(DataField(node=child)) def getType(self): """ Return the type of dataform. """ return self.getAttr("type") def setType(self, typ): """ Set the type of dataform. """ self.setAttr("type", typ) def getTitle(self): """ Return the title of dataform. """ return self.getTagData("title") def setTitle(self, text): """ Set the title of dataform. """ self.setTagData("title", text) def getInstructions(self): """ Return the instructions of dataform. """ return self.getTagData("instructions") def setInstructions(self, text): """ Set the instructions of dataform. """ self.setTagData("instructions", text) def addInstructions(self, text): """ Add one more instruction to the dataform. """ self.addChild("instructions", {}, [text]) def getField(self, name): """ Return the datafield object with name "name" (if exists). """ return self.getTag("field", attrs={"var": name}) def setField(self, name, value=None, typ=None, desc=None, options=[]): """ Create if nessessary or get the existing datafield object with name "name" and return it. """ field = self.getField(name) if not field: field = self.addChild(node=DataField(name, value, typ, desc=desc, options=options)) return field def asDict(self): """ Represent dataform as simple dictionary mapping of datafield names to their values. """ ret = {} for field in self.getTags("field"): name = field.getAttr("var") typ = field.getType() if isinstance(typ, basestring) and typ.endswith("-multi"): val = [] for i in field.getTags("value"): val.append(i.getData()) else: val = field.getTagData("value") ret[name] = val if self.getTag("instructions"): ret["instructions"] = self.getInstructions() return ret def __getitem__(self, name): """ Simple dictionary interface for getting datafields values by their names. """ item = self.getField(name) if item: return item.getValue() raise IndexError("No such field") def __setitem__(self, name, val): """ Simple dictionary interface for setting datafields values by their names. """ return self.setField(name).setValue(val)
mit
Zen-CODE/kivy
kivy/uix/bubble.py
42
12590
''' Bubble ====== .. versionadded:: 1.1.0 .. image:: images/bubble.jpg :align: right The Bubble widget is a form of menu or a small popup where the menu options are stacked either vertically or horizontally. The :class:`Bubble` contains an arrow pointing in the direction you choose. Simple example -------------- .. include:: ../../examples/widgets/bubble_test.py :literal: Customize the Bubble -------------------- You can choose the direction in which the arrow points:: Bubble(arrow_pos='top_mid') The widgets added to the Bubble are ordered horizontally by default, like a Boxlayout. You can change that by:: orientation = 'vertical' To add items to the bubble:: bubble = Bubble(orientation = 'vertical') bubble.add_widget(your_widget_instance) To remove items:: bubble.remove_widget(widget) or bubble.clear_widgets() To access the list of children, use content.children:: bubble.content.children .. warning:: This is important! Do not use bubble.children To change the appearance of the bubble:: bubble.background_color = (1, 0, 0, .5) #50% translucent red bubble.border = [0, 0, 0, 0] background_image = 'path/to/background/image' arrow_image = 'path/to/arrow/image' ''' __all__ = ('Bubble', 'BubbleButton', 'BubbleContent') from kivy.uix.image import Image from kivy.uix.widget import Widget from kivy.uix.scatter import Scatter from kivy.uix.gridlayout import GridLayout from kivy.uix.boxlayout import BoxLayout from kivy.uix.button import Button from kivy.properties import ObjectProperty, StringProperty, OptionProperty, \ ListProperty, BooleanProperty from kivy.clock import Clock from kivy.base import EventLoop from kivy.metrics import dp class BubbleButton(Button): '''A button intended for use in a Bubble widget. You can use a "normal" button class, but it will not look good unless the background is changed. Rather use this BubbleButton widget that is already defined and provides a suitable background for you. ''' pass class BubbleContent(GridLayout): pass class Bubble(GridLayout): '''Bubble class. See module documentation for more information. ''' background_color = ListProperty([1, 1, 1, 1]) '''Background color, in the format (r, g, b, a). :attr:`background_color` is a :class:`~kivy.properties.ListProperty` and defaults to [1, 1, 1, 1]. ''' border = ListProperty([16, 16, 16, 16]) '''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage` graphics instruction. Used with the :attr:`background_image`. It should be used when using custom backgrounds. It must be a list of 4 values: (top, right, bottom, left). Read the BorderImage instructions for more information about how to use it. :attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults to (16, 16, 16, 16) ''' background_image = StringProperty( 'atlas://data/images/defaulttheme/bubble') '''Background image of the bubble. :attr:`background_image` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/bubble'. ''' arrow_image = StringProperty( 'atlas://data/images/defaulttheme/bubble_arrow') ''' Image of the arrow pointing to the bubble. :attr:`arrow_image` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/bubble_arrow'. ''' show_arrow = BooleanProperty(True) ''' Indicates whether to show arrow. .. versionadded:: 1.8.0 :attr:`show_arrow` is a :class:`~kivy.properties.BooleanProperty` and defaults to `True`. ''' arrow_pos = OptionProperty('bottom_mid', options=( 'left_top', 'left_mid', 'left_bottom', 'top_left', 'top_mid', 'top_right', 'right_top', 'right_mid', 'right_bottom', 'bottom_left', 'bottom_mid', 'bottom_right')) '''Specifies the position of the arrow relative to the bubble. Can be one of: left_top, left_mid, left_bottom top_left, top_mid, top_right right_top, right_mid, right_bottom bottom_left, bottom_mid, bottom_right. :attr:`arrow_pos` is a :class:`~kivy.properties.OptionProperty` and defaults to 'bottom_mid'. ''' content = ObjectProperty(None) '''This is the object where the main content of the bubble is held. :attr:`content` is a :class:`~kivy.properties.ObjectProperty` and defaults to 'None'. ''' orientation = OptionProperty('horizontal', options=('horizontal', 'vertical')) '''This specifies the manner in which the children inside bubble are arranged. Can be one of 'vertical' or 'horizontal'. :attr:`orientation` is a :class:`~kivy.properties.OptionProperty` and defaults to 'horizontal'. ''' limit_to = ObjectProperty(None, allownone=True) '''Specifies the widget to which the bubbles position is restricted. .. versionadded:: 1.6.0 :attr:`limit_to` is a :class:`~kivy.properties.ObjectProperty` and defaults to 'None'. ''' def __init__(self, **kwargs): self._prev_arrow_pos = None self._arrow_layout = BoxLayout() self._bk_img = Image( source=self.background_image, allow_stretch=True, keep_ratio=False, color=self.background_color) self.background_texture = self._bk_img.texture self._arrow_img = Image(source=self.arrow_image, allow_stretch=True, color=self.background_color) self.content = content = BubbleContent(parent=self) super(Bubble, self).__init__(**kwargs) content.parent = None self.add_widget(content) self.on_arrow_pos() def add_widget(self, *l): content = self.content if content is None: return if l[0] == content or l[0] == self._arrow_img\ or l[0] == self._arrow_layout: super(Bubble, self).add_widget(*l) else: content.add_widget(*l) def remove_widget(self, *l): content = self.content if not content: return if l[0] == content or l[0] == self._arrow_img\ or l[0] == self._arrow_layout: super(Bubble, self).remove_widget(*l) else: content.remove_widget(l[0]) def clear_widgets(self, **kwargs): content = self.content if not content: return if kwargs.get('do_super', False): super(Bubble, self).clear_widgets() else: content.clear_widgets() def on_show_arrow(self, instance, value): self._arrow_img.opacity = int(value) def on_parent(self, instance, value): Clock.schedule_once(self._update_arrow) def on_pos(self, instance, pos): lt = self.limit_to if lt: self.limit_to = None if lt is EventLoop.window: x = y = 0 top = lt.height right = lt.width else: x, y = lt.x, lt.y top, right = lt.top, lt.right self.x = max(self.x, x) self.right = min(self.right, right) self.top = min(self.top, top) self.y = max(self.y, y) self.limit_to = lt def on_background_image(self, *l): self._bk_img.source = self.background_image def on_background_color(self, *l): if self.content is None: return self._arrow_img.color = self._bk_img.color = self.background_color def on_orientation(self, *l): content = self.content if not content: return if self.orientation[0] == 'v': content.cols = 1 content.rows = 99 else: content.cols = 99 content.rows = 1 def on_arrow_image(self, *l): self._arrow_img.source = self.arrow_image def on_arrow_pos(self, *l): self_content = self.content if not self_content: Clock.schedule_once(self.on_arrow_pos) return if self_content not in self.children: Clock.schedule_once(self.on_arrow_pos) return self_arrow_pos = self.arrow_pos if self._prev_arrow_pos == self_arrow_pos: return self._prev_arrow_pos = self_arrow_pos self_arrow_layout = self._arrow_layout self_arrow_layout.clear_widgets() self_arrow_img = self._arrow_img self._sctr = self._arrow_img self.clear_widgets(do_super=True) self_content.parent = None self_arrow_img.size_hint = (1, None) self_arrow_img.height = dp(self_arrow_img.texture_size[1]) self_arrow_img.pos = 0, 0 widget_list = [] arrow_list = [] parent = self_arrow_img.parent if parent: parent.remove_widget(self_arrow_img) if self_arrow_pos[0] == 'b' or self_arrow_pos[0] == 't': self.cols = 1 self.rows = 3 self_arrow_layout.orientation = 'horizontal' self_arrow_img.width = self.width / 3 self_arrow_layout.size_hint = (1, None) self_arrow_layout.height = self_arrow_img.height if self_arrow_pos[0] == 'b': if self_arrow_pos == 'bottom_mid': widget_list = (self_content, self_arrow_img) else: if self_arrow_pos == 'bottom_left': arrow_list = (self_arrow_img, Widget(), Widget()) elif self_arrow_pos == 'bottom_right': #add two dummy widgets arrow_list = (Widget(), Widget(), self_arrow_img) widget_list = (self_content, self_arrow_layout) else: sctr = Scatter(do_translation=False, rotation=180, do_rotation=False, do_scale=False, size_hint=(None, None), size=self_arrow_img.size) sctr.add_widget(self_arrow_img) if self_arrow_pos == 'top_mid': #add two dummy widgets arrow_list = (Widget(), sctr, Widget()) elif self_arrow_pos == 'top_left': arrow_list = (sctr, Widget(), Widget()) elif self_arrow_pos == 'top_right': arrow_list = (Widget(), Widget(), sctr) widget_list = (self_arrow_layout, self_content) elif self_arrow_pos[0] == 'l' or self_arrow_pos[0] == 'r': self.cols = 3 self.rows = 1 self_arrow_img.width = self.height / 3 self_arrow_layout.orientation = 'vertical' self_arrow_layout.cols = 1 self_arrow_layout.size_hint = (None, 1) self_arrow_layout.width = self_arrow_img.height rotation = -90 if self_arrow_pos[0] == 'l' else 90 self._sctr = sctr = Scatter(do_translation=False, rotation=rotation, do_rotation=False, do_scale=False, size_hint=(None, None), size=(self_arrow_img.size)) sctr.add_widget(self_arrow_img) if self_arrow_pos[-4:] == '_top': arrow_list = (Widget(size_hint=(1, .07)), sctr, Widget(size_hint=(1, .3))) elif self_arrow_pos[-4:] == '_mid': arrow_list = (Widget(), sctr, Widget()) Clock.schedule_once(self._update_arrow) elif self_arrow_pos[-7:] == '_bottom': arrow_list = (Widget(), Widget(), sctr) if self_arrow_pos[0] == 'l': widget_list = (self_arrow_layout, self_content) else: widget_list = (self_content, self_arrow_layout) # add widgets to arrow_layout add = self_arrow_layout.add_widget for widg in arrow_list: add(widg) # add widgets to self add = self.add_widget for widg in widget_list: add(widg) def _update_arrow(self, *dt): if self.arrow_pos in ('left_mid', 'right_mid'): self._sctr.center_y = self._arrow_layout.center_y
mit
virtualopensystems/neutron
neutron/tests/unit/test_config.py
2
2478
# Copyright (c) 2012 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import mock from oslo.config import cfg from neutron.common import config # noqa from neutron.tests import base class ConfigurationTest(base.BaseTestCase): def setup_config(self): # don't use default config pass def test_defaults(self): self.assertEqual('0.0.0.0', cfg.CONF.bind_host) self.assertEqual(9696, cfg.CONF.bind_port) self.assertEqual('api-paste.ini', cfg.CONF.api_paste_config) self.assertEqual('', cfg.CONF.api_extensions_path) self.assertEqual('policy.json', cfg.CONF.policy_file) self.assertEqual('keystone', cfg.CONF.auth_strategy) self.assertIsNone(cfg.CONF.core_plugin) self.assertEqual(0, len(cfg.CONF.service_plugins)) self.assertEqual('fa:16:3e:00:00:00', cfg.CONF.base_mac) self.assertEqual(16, cfg.CONF.mac_generation_retries) self.assertTrue(cfg.CONF.allow_bulk) self.assertEqual(5, cfg.CONF.max_dns_nameservers) self.assertEqual(20, cfg.CONF.max_subnet_host_routes) relative_dir = os.path.join(os.path.dirname(__file__), '..', '..', '..') absolute_dir = os.path.abspath(relative_dir) self.assertEqual(absolute_dir, cfg.CONF.state_path) self.assertEqual(86400, cfg.CONF.dhcp_lease_duration) self.assertFalse(cfg.CONF.allow_overlapping_ips) self.assertEqual('neutron', cfg.CONF.control_exchange) def test_load_paste_app_not_found(self): self.config(api_paste_config='no_such_file.conf') with mock.patch.object(cfg.CONF, 'find_file', return_value=None) as ff: e = self.assertRaises(cfg.ConfigFilesNotFoundError, config.load_paste_app, 'app') ff.assert_called_once_with('no_such_file.conf') self.assertEqual(['no_such_file.conf'], e.config_files)
apache-2.0
janelia-idf/hybridizer
tests/adc_to_volume.py
4
5112
# -*- coding: utf-8 -*- from __future__ import print_function, division import matplotlib.pyplot as plot import numpy from numpy.polynomial.polynomial import polyfit,polyadd,Polynomial import yaml INCHES_PER_ML = 0.078 VOLTS_PER_ADC_UNIT = 0.0049 def load_numpy_data(path): with open(path,'r') as fid: header = fid.readline().rstrip().split(',') dt = numpy.dtype({'names':header,'formats':['S25']*len(header)}) numpy_data = numpy.loadtxt(path,dtype=dt,delimiter=",",skiprows=1) return numpy_data # ----------------------------------------------------------------------------------------- if __name__ == '__main__': # Load VA data data_file = 'hall_effect_data_va.csv' hall_effect_data_va = load_numpy_data(data_file) distances_va = numpy.float64(hall_effect_data_va['distance']) A1_VA = numpy.float64(hall_effect_data_va['A1']) A9_VA = numpy.float64(hall_effect_data_va['A9']) A4_VA = numpy.float64(hall_effect_data_va['A4']) A12_VA = numpy.float64(hall_effect_data_va['A12']) A2_VA = numpy.float64(hall_effect_data_va['A2']) A10_VA = numpy.float64(hall_effect_data_va['A10']) A5_VA = numpy.float64(hall_effect_data_va['A5']) A13_VA = numpy.float64(hall_effect_data_va['A13']) # Massage VA data volumes_va = distances_va/INCHES_PER_ML A1_VA = numpy.reshape(A1_VA,(-1,1)) A9_VA = numpy.reshape(A9_VA,(-1,1)) A4_VA = numpy.reshape(A4_VA,(-1,1)) A12_VA = numpy.reshape(A12_VA,(-1,1)) A2_VA = numpy.reshape(A2_VA,(-1,1)) A10_VA = numpy.reshape(A10_VA,(-1,1)) A5_VA = numpy.reshape(A5_VA,(-1,1)) A13_VA = numpy.reshape(A13_VA,(-1,1)) data_va = numpy.hstack((A1_VA,A9_VA,A4_VA,A12_VA,A2_VA,A10_VA,A5_VA,A13_VA)) data_va = data_va/VOLTS_PER_ADC_UNIT # Load OA data data_file = 'hall_effect_data_oa.csv' hall_effect_data_oa = load_numpy_data(data_file) distances_oa = numpy.float64(hall_effect_data_oa['distance']) A9_OA = numpy.float64(hall_effect_data_oa['A9']) A10_OA = numpy.float64(hall_effect_data_oa['A10']) A11_OA = numpy.float64(hall_effect_data_oa['A11']) A12_OA = numpy.float64(hall_effect_data_oa['A12']) # Massage OA data volumes_oa = distances_oa/INCHES_PER_ML A9_OA = numpy.reshape(A9_OA,(-1,1)) A10_OA = numpy.reshape(A10_OA,(-1,1)) A11_OA = numpy.reshape(A11_OA,(-1,1)) A12_OA = numpy.reshape(A12_OA,(-1,1)) data_oa = numpy.hstack((A9_OA,A10_OA,A11_OA,A12_OA)) data_oa = data_oa/VOLTS_PER_ADC_UNIT # Create figure fig = plot.figure() fig.suptitle('hall effect sensors',fontsize=14,fontweight='bold') fig.subplots_adjust(top=0.85) colors = ['b','g','r','c','m','y','k','b'] markers = ['o','o','o','o','o','o','o','^'] # Axis 1 ax1 = fig.add_subplot(121) for column_index in range(0,data_va.shape[1]): color = colors[column_index] marker = markers[column_index] ax1.plot(data_va[:,column_index],volumes_va,marker=marker,linestyle='--',color=color) # for column_index in range(0,data_oa.shape[1]): # color = colors[column_index] # marker = markers[column_index] # ax1.plot(data_oa[:,column_index],volumes_oa,marker=marker,linestyle='--',color=color) ax1.set_xlabel('mean signals (ADC units)') ax1.set_ylabel('volume (ml)') ax1.grid(True) # Axis 2 for column_index in range(0,data_va.shape[1]): data_va[:,column_index] -= data_va[:,column_index].min() MAX_VA = 120 data_va = data_va[numpy.all(data_va<MAX_VA,axis=1)] length = data_va.shape[0] volumes_va = volumes_va[-length:] # for column_index in range(0,data_oa.shape[1]): # data_oa[:,column_index] -= data_oa[:,column_index].max() ax2 = fig.add_subplot(122) for column_index in range(0,data_va.shape[1]): color = colors[column_index] marker = markers[column_index] ax2.plot(data_va[:,column_index],volumes_va,marker=marker,linestyle='--',color=color) # for column_index in range(0,data_oa.shape[1]): # color = colors[column_index] # marker = markers[column_index] # ax2.plot(data_oa[:,column_index],volumes_oa,marker=marker,linestyle='--',color=color) ax2.set_xlabel('offset mean signals (ADC units)') ax2.set_ylabel('volume (ml)') ax2.grid(True) order = 3 sum_va = None for column_index in range(0,data_va.shape[1]): coefficients_va = polyfit(data_va[:,column_index],volumes_va,order) if sum_va is None: sum_va = coefficients_va else: sum_va = polyadd(sum_va,coefficients_va) average_va = sum_va/data_va.shape[1] with open('adc_to_volume_va.yaml', 'w') as f: yaml.dump(average_va, f, default_flow_style=False) round_digits = 8 average_va = [round(i,round_digits) for i in average_va] poly = Polynomial(average_va) ys_va = poly(data_va[:,-1]) ax2.plot(data_va[:,-1],ys_va,'r',linewidth=3) ax2.text(5,7.5,r'$v = c_0 + c_1s + c_2s^2 + c_3s^3$',fontsize=20) ax2.text(5,6.5,str(average_va),fontsize=18,color='r') plot.show()
bsd-3-clause
BootstrapHeroes/django-shopify
django_shopify/shopify_app/services/plan_config_service.py
1
2510
from base import BaseService from shopify_app.models import PlanConfig from django.conf import settings from shopify_app.config import DEFAULTS from datetime import datetime from shopify_api import APIWrapper class PlanConfigService(BaseService): entity = PlanConfig def _get_charge_common_data(self, shop, plan_config): """ Returns the common data for the charge API call """ data = { "name": plan_config.name if plan_config.name else "Default", "price": plan_config.billing_amount if plan_config.billing_amount else 10.0, "return_url": "http:%s/shop/billing/?shop=%s&plan_config=%s" % (getattr(settings, "HOST", DEFAULTS["HOST"]), shop.id, plan_config.id), } if getattr(settings, "TEST", True): data["test"] = True return data def _create_charge(self, shop_model, api_entity, data): return APIWrapper(shop_model, log=True).create(api_entity, data) def one_time_charge(self, shop, plan_config): """ Generates a one time charge for this app """ data = self._get_charge_common_data(shop, plan_config) return self._create_charge(shop, "application_charge", data) def recurring_charge(self, shop, plan_config): """ Generates a recurring charge for this app """ data = self._get_charge_common_data(shop, plan_config) default_trial_days = plan_config.trial_period_days if plan_config.trial_period_days else 15 #trial days starts counting from the first install current_trial_days = (datetime.utcnow().replace(tzinfo=None) - shop.created_at.replace(tzinfo=None)).days if not current_trial_days >= default_trial_days: data["trial_days"] = default_trial_days - current_trial_days return self._create_charge(shop, "recurring_application_charge", data) def confirm_data(self, shop, plan_config): """ Makes the request to Generate either a one time charge or recurring charge and returns the response results. If there are errors in the request response it raises an exception. """ if plan_config.billing_type == "O": response = self.one_time_charge(shop, plan_config) else: response = self.recurring_charge(shop, plan_config) if "errors" in response: raise Exception(str(response["errors"])) return response
gpl-3.0
darjus-amzn/boto
boto/mturk/__init__.py
782
1108
# Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/ # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, dis- # tribute, sublicense, and/or sell copies of the Software, and to permit # persons to whom the Software is furnished to do so, subject to the fol- # lowing conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. #
mit
ChanduERP/odoo
addons/l10n_tr/__openerp__.py
259
2056
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Turkey - Accounting', 'version': '1.beta', 'category': 'Localization/Account Charts', 'description': """ Türkiye için Tek düzen hesap planı şablonu OpenERP Modülü. ========================================================== Bu modül kurulduktan sonra, Muhasebe yapılandırma sihirbazı çalışır * Sihirbaz sizden hesap planı şablonu, planın kurulacağı şirket, banka hesap bilgileriniz, ilgili para birimi gibi bilgiler isteyecek. """, 'author': 'Ahmet Altınışık', 'maintainer':'https://launchpad.net/~openerp-turkey', 'website':'https://launchpad.net/openerp-turkey', 'depends': [ 'account', 'base_vat', 'account_chart', ], 'data': [ 'account_code_template.xml', 'account_tdhp_turkey.xml', 'account_tax_code_template.xml', 'account_chart_template.xml', 'account_tax_template.xml', 'l10n_tr_wizard.xml', ], 'demo': [], 'installable': True, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
tdtrask/ansible
test/units/modules/network/nxos/test_nxos_command.py
57
4175
# (c) 2016 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import json from ansible.compat.tests.mock import patch from ansible.modules.network.nxos import nxos_command from .nxos_module import TestNxosModule, load_fixture, set_module_args class TestNxosCommandModule(TestNxosModule): module = nxos_command def setUp(self): super(TestNxosCommandModule, self).setUp() self.mock_run_commands = patch('ansible.modules.network.nxos.nxos_command.run_commands') self.run_commands = self.mock_run_commands.start() def tearDown(self): super(TestNxosCommandModule, self).tearDown() self.mock_run_commands.stop() def load_fixtures(self, commands=None, device=''): def load_from_file(*args, **kwargs): module, commands = args output = list() for item in commands: try: obj = json.loads(item['command']) command = obj['command'] except ValueError: command = item['command'] filename = '%s.txt' % str(command).replace(' ', '_') output.append(load_fixture('nxos_command', filename)) return output self.run_commands.side_effect = load_from_file def test_nxos_command_simple(self): set_module_args(dict(commands=['show version'])) result = self.execute_module() self.assertEqual(len(result['stdout']), 1) self.assertTrue(result['stdout'][0].startswith('Cisco')) def test_nxos_command_multiple(self): set_module_args(dict(commands=['show version', 'show version'])) result = self.execute_module() self.assertEqual(len(result['stdout']), 2) self.assertTrue(result['stdout'][0].startswith('Cisco')) def test_nxos_command_wait_for(self): wait_for = 'result[0] contains "NX-OS"' set_module_args(dict(commands=['show version'], wait_for=wait_for)) self.execute_module() def test_nxos_command_wait_for_fails(self): wait_for = 'result[0] contains "test string"' set_module_args(dict(commands=['show version'], wait_for=wait_for)) self.execute_module(failed=True) self.assertEqual(self.run_commands.call_count, 10) def test_nxos_command_retries(self): wait_for = 'result[0] contains "test string"' set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2)) self.execute_module(failed=True) self.assertEqual(self.run_commands.call_count, 2) def test_nxos_command_match_any(self): wait_for = ['result[0] contains "Cisco"', 'result[0] contains "test string"'] set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any')) self.execute_module() def test_nxos_command_match_all(self): wait_for = ['result[0] contains "Cisco"', 'result[0] contains "image file"'] set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all')) self.execute_module() def test_nxos_command_match_all_failure(self): wait_for = ['result[0] contains "Cisco"', 'result[0] contains "test string"'] commands = ['show version', 'show version'] set_module_args(dict(commands=commands, wait_for=wait_for, match='all')) self.execute_module(failed=True)
gpl-3.0
nickweinberg/werewolf-slackbot
rtmbot.py
1
6723
#!/usr/bin/env python import sys sys.dont_write_bytecode = True import glob import yaml import json import os import sys import time import logging from argparse import ArgumentParser from slackclient import SlackClient def dbg(debug_string): if debug: logging.info(debug_string) USER_DICT = {} class RtmBot(object): def __init__(self, token): self.last_ping = 0 self.token = token self.bot_plugins = [] self.slack_client = None self.channel = None # only want bot in one channel def connect(self): """Convenience method that creates Server instance""" self.slack_client = SlackClient(self.token) self.slack_client.rtm_connect() def start(self): self.connect() self.load_plugins() while True: for reply in self.slack_client.rtm_read(): self.input(reply) self.crons() self.output() self.autoping() time.sleep(.1) def get_users_in_channel(self): print(self.channel) channel_info = self.slack_client.api_call("channels.info", channel=self.channel) info = json.loads(channel_info) members = info['channel']['members'] print(members) self.create_user_dict(members) def autoping(self): #hardcode the interval to 3 seconds now = int(time.time()) if now > self.last_ping + 3: self.slack_client.server.ping() self.last_ping = now def input(self, data): if "type" in data: function_name = "process_" + data["type"] dbg("got {}".format(function_name)) for plugin in self.bot_plugins: plugin.register_jobs() plugin.do(function_name, data) def output(self): for plugin in self.bot_plugins: limiter = False for output in plugin.do_output(): channel = self.slack_client.server.channels.find(output[0]) if channel != None and output[1] != None: if limiter == True: time.sleep(.1) limiter = False message = output[1].encode('ascii','ignore') channel.send_message("{}".format(message)) limiter = True def crons(self): for plugin in self.bot_plugins: plugin.do_jobs() def load_plugins(self): for plugin in glob.glob(directory+'/plugins/*'): sys.path.insert(0, plugin) sys.path.insert(0, directory+'/plugins/') for plugin in glob.glob(directory+'/plugins/*.py') + glob.glob(directory+'/plugins/*/*.py'): logging.info(plugin) name = plugin.split('/')[-1][:-3] # try: self.bot_plugins.append(Plugin(name)) # except: # print "error loading plugin %s" % name class Plugin(object): def __init__(self, name, plugin_config={}): self.name = name self.jobs = [] self.module = __import__(name) self.register_jobs() self.outputs = [] if name in config: logging.info("config found for: " + name) self.module.config = config[name] if 'setup' in dir(self.module): self.module.setup() def register_jobs(self): if 'crontable' in dir(self.module): for interval, function in self.module.crontable: self.jobs.append(Job(interval, eval("self.module."+function))) logging.info(self.module.crontable) self.module.crontable = [] else: self.module.crontable = [] def do(self, function_name, data): if function_name in dir(self.module): #this makes the plugin fail with stack trace in debug mode if not debug: try: eval("self.module."+function_name)(data) except: dbg("problem in module {} {}".format(function_name, data)) else: eval("self.module."+function_name)(data) if "catch_all" in dir(self.module): try: self.module.catch_all(data) except: dbg("problem in catch all") def do_jobs(self): for job in self.jobs: job.check() def do_output(self): output = [] while True: if 'outputs' in dir(self.module): if len(self.module.outputs) > 0: logging.info("output from {}".format(self.module)) output.append(self.module.outputs.pop(0)) else: break else: self.module.outputs = [] return output class Job(object): def __init__(self, interval, function): self.function = function self.interval = interval self.lastrun = 0 def __str__(self): return "{} {} {}".format(self.function, self.interval, self.lastrun) def __repr__(self): return self.__str__() def check(self): if self.lastrun + self.interval < time.time(): if not debug: try: self.function() except: dbg("problem") else: self.function() self.lastrun = time.time() pass class UnknownChannel(Exception): pass def main_loop(): if "LOGFILE" in config: logging.basicConfig(filename=config["LOGFILE"], level=logging.INFO, format='%(asctime)s %(message)s') logging.info(directory) try: bot.start() except KeyboardInterrupt: sys.exit(0) except: logging.exception('OOPS') def parse_args(): parser = ArgumentParser() parser.add_argument( '-c', '--config', help='Full path to config file.', metavar='path' ) return parser.parse_args() if __name__ == "__main__": args = parse_args() directory = os.path.dirname(sys.argv[0]) if not directory.startswith('/'): directory = os.path.abspath("{}/{}".format(os.getcwd(), directory )) config = yaml.load(file(args.config or 'rtmbot.conf', 'r')) debug = config["DEBUG"] bot = RtmBot(config["SLACK_TOKEN"]) bot.channel = config["CHANNEL"] site_plugins = [] files_currently_downloading = [] job_hash = {} if config.has_key("DAEMON"): if config["DAEMON"]: import daemon with daemon.DaemonContext(): main_loop() main_loop()
mit
nitramkaroh/OOFEM
tools/unv2oofem/unv2oofem.py
1
12763
#!/usr/bin/python # -*- coding: utf-8 -*- from unv2x import * from abaqus2x import * from oofemctrlreader import * import time from numpy.core.defchararray import splitlines if __name__=='__main__': helpmsg=""" Usage: unv2oofem.py unvfile ctrlfile oofemfile What it does: read unvfile, create an internal FEM object structure in memory and writes the oofem native input file The ctrlfile specifies additional properties required by oofem See http://www.oofem.org/wiki/doku.php?id=unv2oofem:unv2oofem for more info. The format of ctrl file is following: (lines beginning with '#' are comments) Output file record Job description record Analysis record Domain record Output manager record ncrosssect # nmat # nbc # nic # nltf # nset # nxfemman # cross section records material records boundary condition records initial condition records load time function records extractor records set records Assignment of properties to nodes and elements is based on association with some unv group. The same mechanism is valid for assignment of boundary conditions (edge, surface) load. The syntax is following: group name1 [name2] [name3] ... nodeprop "nodal_attributes_appended_to_nodal_records" [set INT] elemprop "element_attributes_appended_to_element_records" [set INT] etype[unv_etype] oofem_etype #provides mapping between unv and oofem element types By default, all nodes will be exported, elements are exported only when associated to some group with valid element mapping Enjoy. """ print """ UNV2OOFEM: Converts UNV file from Salome to OOFEM native file format (C) 2009 Borek Patzak """ t1 = time.time() if len(sys.argv)==4: unvfile=sys.argv[1] ctrlfile=sys.argv[2] oofemfile=sys.argv[3] of=open(oofemfile,'w') # read file in FEM object structure fileExtension = unvfile.split('.') if (fileExtension[-1].lower()=='unv'): # Salome output file Parser=UNVParser(unvfile) elif (fileExtension[-1].lower()=='inp'): # Abaqus output file Parser=AbaqusParser(unvfile) else: print "Unknown extension of input file %s" % fileExtension[-1].lower() exit(0) print 'Parsing mesh file %s' % sys.argv[1], FEM=Parser.parse() print "done" print "Detected node groups:", for i in FEM.nodesets: print i.name.strip(), print print "Detected element groups:", for i in FEM.elemsets: print i.name.strip(), print # read oofem ctrl file CTRL=CTRLParser(ctrlfile, Parser.mapping()) print 'Parsing ctrl file %s' % sys.argv[2] CTRL.parse(FEM) print "done" # write files in native oofem format print 'Writing oofem file %s' % sys.argv[3] # write oofem header of.write(CTRL.header) #store elements in meshElements list. Reason: need to assign boundaryLoad to elements, which may be read after elements meshElements = [] #create auxiliary array of element numbers to be searched for boundaryLoads elemNotBoundary = [] # List for sets containing boundaries boundarySets=[]; for elem in FEM.elems:#loop through all unv elements #resolve element properties properties="" for igroup in elem.oofem_groups: #print igroup.name properties+=igroup.oofem_properties #Do output if oofem_elemtype resolved and not BoundaryLoads if ( elem.oofem_elemtype): if(CTRL.oofem_elemProp[elem.oofem_elemtype].name != 'RepresentsBoundaryLoad'): #Check if unv element and OOFEM element have the same amount of nodes if (elem.nnodes != len(CTRL.oofem_elemProp[elem.oofem_elemtype].nodeMask)): print "\nUnv element #%d has %d nodes, which should be mapped on OOFEM element \"%s\" with %d nodes" % \ (elem.id, elem.nnodes,CTRL.oofem_elemProp[elem.oofem_elemtype].name, len(CTRL.oofem_elemProp[elem.oofem_elemtype].nodeMask)) exit(0) elemNotBoundary.append(elem) dat = elem.oofem_outputData dat.append(CTRL.oofem_elemProp[elem.oofem_elemtype].name) dat.append("%-5d" % elem.id) dat.append("nodes") dat.append("%-3d" % elem.nnodes) for n in range(elem.nnodes): mask = CTRL.oofem_elemProp[elem.oofem_elemtype].nodeMask[n] try: dat.append("%-3d" % elem.cntvt[mask]) except: print "Exception in mapping nodes in unv element number %d, nodes %s" % (elem.id, elem.cntvt) exit(0) #dat.extend(["%-3d" % x for x in elem.cntvt]) dat.append(properties) meshElements.append([]) #Assign BoundaryLoads to elements (corresponds to edge and face loads). #We need to loop over all elements and to check whether they have assigned loads. This is quite time consuming but robust algorithm. for belem in FEM.elems:#loop over all elements from unv file #resolve element properties #for igroup in elem.oofem_groups:#unv element with boundary load is assigned to some ctrl element group #print belem.id, belem.oofem_elemtype, CTRL.oofem_elemProp[belem.oofem_elemtype].name if CTRL.oofem_elemProp[belem.oofem_elemtype].name == 'RepresentsBoundaryLoad':#found element, which represents boundary load nodesOnBoundary = belem.cntvt nodesOnBoundary.sort() for elem in elemNotBoundary: #loop over, e.g. triangular elements, in order to find which element belem is a boundary to cnt=0 for n in range(len(nodesOnBoundary)): if(elem.cntvt.count(int(nodesOnBoundary[n]))): cnt = cnt+1 if (cnt==len(nodesOnBoundary)):#found eligible element to which assign b.c. Now find which edge/face it is. success = 0 if(belem.type==11 or belem.type==22):#elements representing EDGE loads mask = CTRL.oofem_elemProp[elem.oofem_elemtype].edgeMask else:#face loads mask = CTRL.oofem_elemProp[elem.oofem_elemtype].faceMask for i in range(len(mask)): nodesInMask = []#list of nodes which are extracted according to mask for x in mask[i]: nodesInMask.append(elem.cntvt[x]) #We need to compare both arrays nodesInMask and nodesOnBoundary. If they contain the same node numbers, we found edge/face. nodesInMask.sort() if(nodesInMask==nodesOnBoundary):#both lists are sorted so they can be compared success = 1 #since boundary element may be in more unv groups, we need to find corresponding ctrl group for bel in belem.oofem_groups: #print "%d '%s' '%s'" % (len(belem.oofem_groups), bel.name.rstrip(), bel.oofem_groupNameForLoads) if (bel.name.rstrip() == bel.oofem_groupNameForLoads): #continue #build a new int list, which reflects load numbers and edges/faces if (len(bel.oofem_boundaryLoadsNum) > 0): loadNum = bel.oofem_boundaryLoadsNum newList=[-1]*(2*len(loadNum)) for j in range(len(loadNum)): newList[2*j] = loadNum[j] newList[2*j+1] = i+1 #print newList elem.oofem_bLoads+=newList print "Boundary load \"%s\" found for element %d " % (bel.name.rstrip('\n'), elem.id) #print bel.name, elem.id, elem.oofem_bLoads if (bel.oofem_sets): print "Set \"%s\" found for boundary of element %d " % (bel.name.rstrip('\n'), elem.id) setNum = bel.oofem_sets; # setID, element id, element side for thisSet in setNum: boundarySets.append([thisSet, elem.id, i+1]) if(success==0): print "Can not assign edge/face load \"%s\" to unv element %d" % (bel.name, elem.id) #write component record of.write('ndofman %d nelem %d ncrosssect %d nmat %d nbc %d nic %d nltf %d nset %d nxfemman %d\n' % (FEM.nnodes, len(elemNotBoundary), CTRL.ncrosssect, CTRL.nmat, CTRL.nbc, CTRL.nic, CTRL.nltf, CTRL.nset, CTRL.nxfemman)) #write nodes for node in FEM.nodes: #resolve nodal properties outputLine="node %-5d coords %-2d" % (node.id, len(node.coords)) for coord in node.coords: outputLine+= "% -8g " % coord properties="" for igroup in node.oofem_groups: if(len(properties)>0 and properties[-1]!=" "):#insert white space if necessary properties+=" " properties+=igroup.oofem_properties outputLine+=properties # write nodal record of.write(('%s\n') % (outputLine)) for elem in elemNotBoundary: str = ' '.join(elem.oofem_outputData) #Add the list of boundaryLoads if it exists if(elem.oofem_bLoads): str+=" BoundaryLoads %d " % len(elem.oofem_bLoads) str+= ' '.join(["%d" % el for el in elem.oofem_bLoads]) of.write('%s\n' % str) # write final sections sl=CTRL.footer.splitlines() for s in sl: words=s.split() #if len(words)==0:#skip empty lines #continue if (words[0].lower()=='set'): setID=int(words[1]) if (words[2].lower()=='nodes'): nodelist=[]; for nodeset in FEM.nodesets: for oofemset in nodeset.oofem_sets: if (setID==oofemset): nodelist.extend(nodeset.items) setElements=list(set(nodelist)) elif (words[2].lower()=='elements'): ellist=[] for elemset in FEM.elemsets: #print elemset.id if setID == elemset.id: ellist.extend(elemset.items) for oofemset in elemset.oofem_sets: if (setID==oofemset): ellist.extend(elemset.items) setElements=list(set(ellist)) elif (words[2].lower()=='elementboundaries'): setElements=[] for thisSet in boundarySets: if (thisSet[0]==int(words[1])): setElements.extend([thisSet[1], thisSet[2]]) of.write('%s %s %s %u ' % ( words[0], words[1], words[2], len(setElements)) ) for setElement in setElements: of.write('%u ' % setElement) of.write('\n') else: of.write('%s\n' % s) of.close() # t2 = time.time() # print "done ( %d nodes %d elements)" % (FEM.nnodes, len(elemNotBoundary)) print "Finished in %0.2f [s]" % ((t2-t1)) else: print(helpmsg)
lgpl-2.1
lashwang/pyspider
pyspider/database/sqlalchemy/taskdb.py
4
6205
#!/usr/bin/env python # -*- encoding: utf-8 -*- # vim: set et sw=4 ts=4 sts=4 ff=unix fenc=utf8: # Author: Binux<[email protected]> # http://binux.me # Created on 2014-12-04 22:33:43 import re import six import time import json import sqlalchemy.exc from sqlalchemy import (create_engine, MetaData, Table, Column, Index, Integer, String, Float, LargeBinary, func) from sqlalchemy.engine.url import make_url from pyspider.libs import utils from pyspider.database.base.taskdb import TaskDB as BaseTaskDB from .sqlalchemybase import SplitTableMixin, result2dict class TaskDB(SplitTableMixin, BaseTaskDB): __tablename__ = '' def __init__(self, url): self.table = Table('__tablename__', MetaData(), Column('taskid', String(64), primary_key=True, nullable=False), Column('project', String(64)), Column('url', String(1024)), Column('status', Integer), Column('schedule', LargeBinary), Column('fetch', LargeBinary), Column('process', LargeBinary), Column('track', LargeBinary), Column('lastcrawltime', Float(32)), Column('updatetime', Float(32)), mysql_engine='InnoDB', mysql_charset='utf8' ) self.url = make_url(url) if self.url.database: database = self.url.database self.url.database = None try: engine = create_engine(self.url, convert_unicode=True, pool_recycle=3600) conn = engine.connect() conn.execute("commit") conn.execute("CREATE DATABASE %s" % database) except sqlalchemy.exc.SQLAlchemyError: pass self.url.database = database self.engine = create_engine(url, convert_unicode=True, pool_recycle=3600) self._list_project() def _create_project(self, project): assert re.match(r'^\w+$', project) is not None if project in self.projects: return self.table.name = self._tablename(project) Index('status_%s_index' % self.table.name, self.table.c.status) self.table.create(self.engine, checkfirst=True) self.table.indexes.clear() @staticmethod def _parse(data): for key, value in list(six.iteritems(data)): if isinstance(value, six.binary_type): data[key] = utils.text(value) for each in ('schedule', 'fetch', 'process', 'track'): if each in data: if data[each]: if isinstance(data[each], bytearray): data[each] = str(data[each]) data[each] = json.loads(data[each]) else: data[each] = {} return data @staticmethod def _stringify(data): for each in ('schedule', 'fetch', 'process', 'track'): if each in data: data[each] = utils.utf8(json.dumps(data[each])) return data def load_tasks(self, status, project=None, fields=None): if project and project not in self.projects: return if project: projects = [project, ] else: projects = self.projects columns = [getattr(self.table.c, f, f) for f in fields] if fields else self.table.c for project in projects: self.table.name = self._tablename(project) for task in self.engine.execute(self.table.select() .with_only_columns(columns) .where(self.table.c.status == status)): yield self._parse(result2dict(columns, task)) def get_task(self, project, taskid, fields=None): if project not in self.projects: self._list_project() if project not in self.projects: return None self.table.name = self._tablename(project) columns = [getattr(self.table.c, f, f) for f in fields] if fields else self.table.c for each in self.engine.execute(self.table.select() .with_only_columns(columns) .limit(1) .where(self.table.c.taskid == taskid)): return self._parse(result2dict(columns, each)) def status_count(self, project): result = dict() if project not in self.projects: self._list_project() if project not in self.projects: return result self.table.name = self._tablename(project) for status, count in self.engine.execute( self.table.select() .with_only_columns((self.table.c.status, func.count(1))) .group_by(self.table.c.status)): result[status] = count return result def insert(self, project, taskid, obj={}): if project not in self.projects: self._list_project() if project not in self.projects: self._create_project(project) self._list_project() obj = dict(obj) obj['taskid'] = taskid obj['project'] = project obj['updatetime'] = time.time() self.table.name = self._tablename(project) return self.engine.execute(self.table.insert() .values(**self._stringify(obj))) def update(self, project, taskid, obj={}, **kwargs): if project not in self.projects: self._list_project() if project not in self.projects: raise LookupError self.table.name = self._tablename(project) obj = dict(obj) obj.update(kwargs) obj['updatetime'] = time.time() return self.engine.execute(self.table.update() .where(self.table.c.taskid == taskid) .values(**self._stringify(obj)))
apache-2.0
stuartsierra/password-store
contrib/importers/kedpm2pass.py
42
1568
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2012 Antoine Beaupré <[email protected]>. All Rights Reserved. # This file is licensed under the GPLv2+. Please see COPYING for more information. # # To double-check your import worked: # grep Path passwords | sed 's#^Path: ##;s/$/.gpg/' | sort > listpaths # (cd ~/.password-store/ ; find -type f ) | sort | diff -u - listpaths import re import fileinput import sys # for exit import subprocess def insert(d): path = d['Path'] del d['Path'] print "inserting " + path content = d['Password'] + "\n" del d['Password'] for k, v in d.iteritems(): content += "%s: %s\n" % (k, v) del d cmd = ["pass", "insert", "--force", "--multiline", path] process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = process.communicate(content) retcode = process.wait() if retcode: print 'command "%s" failed with exit code %d: %s' % (" ".join(cmd), retcode, stdout + stderr) sys.exit(1); d = None for line in fileinput.input(): if line == "\n": continue match = re.match("(\w+): (.*)$", line) if match: if match.group(1) == 'Path': if d is not None: insert(d) else: d = {} d[match.group(1)] = match.group(2) #print "found field: %s => %s" % (match.group(1), match.group(2)) else: print "warning: no match found on line: *%s*" % line if d is not None: insert(d)
gpl-2.0
vyscond/cocos
cocos/batch.py
3
4429
# ---------------------------------------------------------------------------- # cocos2d # Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa, # Lucio Torre # Copyright (c) 2009-2015 Richard Jones, Claudio Canepa # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the name of cocos2d nor the names of its # contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------------- """Batch Batches ======= Batches allow you to optimize the number of gl calls using pyglets batch """ from __future__ import division, print_function, unicode_literals __docformat__ = 'restructuredtext' import pyglet from pyglet.gl import * from cocos.cocosnode import CocosNode __all__ = ['BatchNode', 'BatchableNode'] def ensure_batcheable(node): if not isinstance(node, BatchableNode): raise Exception("Children node of a batch must be of class BatchableNode") for c in node.get_children(): ensure_batcheable(c) class BatchNode(CocosNode): def __init__(self): super(BatchNode, self).__init__() self.batch = pyglet.graphics.Batch() self.groups = {} def add(self, child, z=0, name=None): ensure_batcheable(child) child.set_batch(self.batch, self.groups, z) super(BatchNode, self).add(child, z, name) def visit(self): """ All children are placed in to self.batch, so nothing to visit """ if not self.visible: return glPushMatrix() self.transform() self.batch.draw() glPopMatrix() def remove(self, child): if isinstance(child, str): child_node = self.get(child) else: child_node = child child_node.set_batch(None) super(BatchNode, self).remove(child) def draw(self): pass # All drawing done in visit! class BatchableNode(CocosNode): def add(self, child, z=0, name=None): batchnode = self.get_ancestor(BatchNode) if not batchnode: # this node was addded, but theres no batchnode in the # hierarchy. so we proceed as normal super(BatchableNode, self).add(child, z, name) return ensure_batcheable(child) super(BatchableNode, self).add(child, z, name) child.set_batch(self.batch, batchnode.groups, z) def remove(self, child): if isinstance(child, str): child_node = self.get(child) else: child_node = child child_node.set_batch(None) super(BatchableNode, self).remove(child) def set_batch(self, batch, groups=None, z=0): self.batch = batch if batch is None: self.group = None else: group = groups.get(z) if group is None: group = pyglet.graphics.OrderedGroup(z) groups[z] = group self.group = group for childZ, child in self.children: child.set_batch(self.batch, groups, z + childZ)
bsd-3-clause
guludo/ardupilot-1
mk/PX4/Tools/genmsg/src/genmsg/command_line.py
217
1887
# Software License Agreement (BSD License) # # Copyright (c) 2011, Willow Garage, Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided # with the distribution. # * Neither the name of Willow Garage, Inc. nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. def includepath_to_dict(includepath): search_path = {} if includepath: for path in includepath: key = path[:path.find(':')] value = path[path.find(':')+1:] if value: search_path.setdefault(key, []).append(value) return search_path
gpl-3.0
delhivery/django
tests/forms_tests/tests/test_forms.py
46
148965
# -*- coding: utf-8 -*- from __future__ import unicode_literals import copy import datetime import json import uuid from django.core.exceptions import NON_FIELD_ERRORS from django.core.files.uploadedfile import SimpleUploadedFile from django.core.validators import RegexValidator from django.forms import ( BooleanField, CharField, CheckboxSelectMultiple, ChoiceField, DateField, DateTimeField, EmailField, FileField, FloatField, Form, HiddenInput, ImageField, IntegerField, MultipleChoiceField, MultipleHiddenInput, MultiValueField, NullBooleanField, PasswordInput, RadioSelect, Select, SplitDateTimeField, SplitHiddenDateTimeWidget, Textarea, TextInput, TimeField, ValidationError, forms, ) from django.forms.utils import ErrorList from django.http import QueryDict from django.template import Context, Template from django.test import SimpleTestCase from django.test.utils import str_prefix from django.utils import six from django.utils.datastructures import MultiValueDict from django.utils.encoding import force_text, python_2_unicode_compatible from django.utils.html import format_html from django.utils.safestring import SafeData, mark_safe class Person(Form): first_name = CharField() last_name = CharField() birthday = DateField() class PersonNew(Form): first_name = CharField(widget=TextInput(attrs={'id': 'first_name_id'})) last_name = CharField() birthday = DateField() class FormsTestCase(SimpleTestCase): # A Form is a collection of Fields. It knows how to validate a set of data and it # knows how to render itself in a couple of default ways (e.g., an HTML table). # You can pass it data in __init__(), as a dictionary. def test_form(self): # Pass a dictionary to a Form's __init__(). p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'}) self.assertTrue(p.is_bound) self.assertEqual(p.errors, {}) self.assertTrue(p.is_valid()) self.assertHTMLEqual(p.errors.as_ul(), '') self.assertEqual(p.errors.as_text(), '') self.assertEqual(p.cleaned_data["first_name"], 'John') self.assertEqual(p.cleaned_data["last_name"], 'Lennon') self.assertEqual(p.cleaned_data["birthday"], datetime.date(1940, 10, 9)) self.assertHTMLEqual(str(p['first_name']), '<input type="text" name="first_name" value="John" id="id_first_name" />') self.assertHTMLEqual(str(p['last_name']), '<input type="text" name="last_name" value="Lennon" id="id_last_name" />') self.assertHTMLEqual(str(p['birthday']), '<input type="text" name="birthday" value="1940-10-9" id="id_birthday" />') nonexistenterror = "Key u?'nonexistentfield' not found in 'Person'" with six.assertRaisesRegex(self, KeyError, nonexistenterror): p['nonexistentfield'] self.fail('Attempts to access non-existent fields should fail.') form_output = [] for boundfield in p: form_output.append(str(boundfield)) self.assertHTMLEqual('\n'.join(form_output), """<input type="text" name="first_name" value="John" id="id_first_name" /> <input type="text" name="last_name" value="Lennon" id="id_last_name" /> <input type="text" name="birthday" value="1940-10-9" id="id_birthday" />""") form_output = [] for boundfield in p: form_output.append([boundfield.label, boundfield.data]) self.assertEqual(form_output, [ ['First name', 'John'], ['Last name', 'Lennon'], ['Birthday', '1940-10-9'] ]) self.assertHTMLEqual(str(p), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr> <tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" value="Lennon" id="id_last_name" /></td></tr> <tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></td></tr>""") def test_empty_dict(self): # Empty dictionaries are valid, too. p = Person({}) self.assertTrue(p.is_bound) self.assertEqual(p.errors['first_name'], ['This field is required.']) self.assertEqual(p.errors['last_name'], ['This field is required.']) self.assertEqual(p.errors['birthday'], ['This field is required.']) self.assertFalse(p.is_valid()) self.assertEqual(p.cleaned_data, {}) self.assertHTMLEqual(str(p), """<tr><th><label for="id_first_name">First name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="first_name" id="id_first_name" /></td></tr> <tr><th><label for="id_last_name">Last name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="last_name" id="id_last_name" /></td></tr> <tr><th><label for="id_birthday">Birthday:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="birthday" id="id_birthday" /></td></tr>""") self.assertHTMLEqual(p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="first_name" id="id_first_name" /></td></tr> <tr><th><label for="id_last_name">Last name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="last_name" id="id_last_name" /></td></tr> <tr><th><label for="id_birthday">Birthday:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="birthday" id="id_birthday" /></td></tr>""") self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li> <li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li> <li><ul class="errorlist"><li>This field is required.</li></ul><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></li>""") self.assertHTMLEqual(p.as_p(), """<ul class="errorlist"><li>This field is required.</li></ul> <p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p> <ul class="errorlist"><li>This field is required.</li></ul> <p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p> <ul class="errorlist"><li>This field is required.</li></ul> <p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></p>""") def test_unbound_form(self): # If you don't pass any values to the Form's __init__(), or if you pass None, # the Form will be considered unbound and won't do any validation. Form.errors # will be an empty dictionary *but* Form.is_valid() will return False. p = Person() self.assertFalse(p.is_bound) self.assertEqual(p.errors, {}) self.assertFalse(p.is_valid()) try: p.cleaned_data self.fail('Attempts to access cleaned_data when validation fails should fail.') except AttributeError: pass self.assertHTMLEqual(str(p), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr> <tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr> <tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /></td></tr>""") self.assertHTMLEqual(p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr> <tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr> <tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /></td></tr>""") self.assertHTMLEqual(p.as_ul(), """<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li> <li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li> <li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></li>""") self.assertHTMLEqual(p.as_p(), """<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p> <p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p> <p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /></p>""") def test_unicode_values(self): # Unicode values are handled properly. p = Person({'first_name': 'John', 'last_name': '\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111', 'birthday': '1940-10-9'}) self.assertHTMLEqual(p.as_table(), '<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" value="John" id="id_first_name" /></td></tr>\n<tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></td></tr>\n<tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></td></tr>') self.assertHTMLEqual(p.as_ul(), '<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" value="John" id="id_first_name" /></li>\n<li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></li>\n<li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></li>') self.assertHTMLEqual(p.as_p(), '<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" value="John" id="id_first_name" /></p>\n<p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" value="\u0160\u0110\u0106\u017d\u0107\u017e\u0161\u0111" id="id_last_name" /></p>\n<p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" value="1940-10-9" id="id_birthday" /></p>') p = Person({'last_name': 'Lennon'}) self.assertEqual(p.errors['first_name'], ['This field is required.']) self.assertEqual(p.errors['birthday'], ['This field is required.']) self.assertFalse(p.is_valid()) self.assertDictEqual(p.errors, {'birthday': ['This field is required.'], 'first_name': ['This field is required.']}) self.assertEqual(p.cleaned_data, {'last_name': 'Lennon'}) self.assertEqual(p['first_name'].errors, ['This field is required.']) self.assertHTMLEqual(p['first_name'].errors.as_ul(), '<ul class="errorlist"><li>This field is required.</li></ul>') self.assertEqual(p['first_name'].errors.as_text(), '* This field is required.') p = Person() self.assertHTMLEqual(str(p['first_name']), '<input type="text" name="first_name" id="id_first_name" />') self.assertHTMLEqual(str(p['last_name']), '<input type="text" name="last_name" id="id_last_name" />') self.assertHTMLEqual(str(p['birthday']), '<input type="text" name="birthday" id="id_birthday" />') def test_cleaned_data_only_fields(self): # cleaned_data will always *only* contain a key for fields defined in the # Form, even if you pass extra data when you define the Form. In this # example, we pass a bunch of extra fields to the form constructor, # but cleaned_data contains only the form's fields. data = {'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9', 'extra1': 'hello', 'extra2': 'hello'} p = Person(data) self.assertTrue(p.is_valid()) self.assertEqual(p.cleaned_data['first_name'], 'John') self.assertEqual(p.cleaned_data['last_name'], 'Lennon') self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9)) def test_optional_data(self): # cleaned_data will include a key and value for *all* fields defined in the Form, # even if the Form's data didn't include a value for fields that are not # required. In this example, the data dictionary doesn't include a value for the # "nick_name" field, but cleaned_data includes it. For CharFields, it's set to the # empty string. class OptionalPersonForm(Form): first_name = CharField() last_name = CharField() nick_name = CharField(required=False) data = {'first_name': 'John', 'last_name': 'Lennon'} f = OptionalPersonForm(data) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data['nick_name'], '') self.assertEqual(f.cleaned_data['first_name'], 'John') self.assertEqual(f.cleaned_data['last_name'], 'Lennon') # For DateFields, it's set to None. class OptionalPersonForm(Form): first_name = CharField() last_name = CharField() birth_date = DateField(required=False) data = {'first_name': 'John', 'last_name': 'Lennon'} f = OptionalPersonForm(data) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data['birth_date'], None) self.assertEqual(f.cleaned_data['first_name'], 'John') self.assertEqual(f.cleaned_data['last_name'], 'Lennon') def test_auto_id(self): # "auto_id" tells the Form to add an "id" attribute to each form element. # If it's a string that contains '%s', Django will use that as a format string # into which the field's name will be inserted. It will also put a <label> around # the human-readable labels for a field. p = Person(auto_id='%s_id') self.assertHTMLEqual(p.as_table(), """<tr><th><label for="first_name_id">First name:</label></th><td><input type="text" name="first_name" id="first_name_id" /></td></tr> <tr><th><label for="last_name_id">Last name:</label></th><td><input type="text" name="last_name" id="last_name_id" /></td></tr> <tr><th><label for="birthday_id">Birthday:</label></th><td><input type="text" name="birthday" id="birthday_id" /></td></tr>""") self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" name="first_name" id="first_name_id" /></li> <li><label for="last_name_id">Last name:</label> <input type="text" name="last_name" id="last_name_id" /></li> <li><label for="birthday_id">Birthday:</label> <input type="text" name="birthday" id="birthday_id" /></li>""") self.assertHTMLEqual(p.as_p(), """<p><label for="first_name_id">First name:</label> <input type="text" name="first_name" id="first_name_id" /></p> <p><label for="last_name_id">Last name:</label> <input type="text" name="last_name" id="last_name_id" /></p> <p><label for="birthday_id">Birthday:</label> <input type="text" name="birthday" id="birthday_id" /></p>""") def test_auto_id_true(self): # If auto_id is any True value whose str() does not contain '%s', the "id" # attribute will be the name of the field. p = Person(auto_id=True) self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name">First name:</label> <input type="text" name="first_name" id="first_name" /></li> <li><label for="last_name">Last name:</label> <input type="text" name="last_name" id="last_name" /></li> <li><label for="birthday">Birthday:</label> <input type="text" name="birthday" id="birthday" /></li>""") def test_auto_id_false(self): # If auto_id is any False value, an "id" attribute won't be output unless it # was manually entered. p = Person(auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li>First name: <input type="text" name="first_name" /></li> <li>Last name: <input type="text" name="last_name" /></li> <li>Birthday: <input type="text" name="birthday" /></li>""") def test_id_on_field(self): # In this example, auto_id is False, but the "id" attribute for the "first_name" # field is given. Also note that field gets a <label>, while the others don't. p = PersonNew(auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" id="first_name_id" name="first_name" /></li> <li>Last name: <input type="text" name="last_name" /></li> <li>Birthday: <input type="text" name="birthday" /></li>""") def test_auto_id_on_form_and_field(self): # If the "id" attribute is specified in the Form and auto_id is True, the "id" # attribute in the Form gets precedence. p = PersonNew(auto_id=True) self.assertHTMLEqual(p.as_ul(), """<li><label for="first_name_id">First name:</label> <input type="text" id="first_name_id" name="first_name" /></li> <li><label for="last_name">Last name:</label> <input type="text" name="last_name" id="last_name" /></li> <li><label for="birthday">Birthday:</label> <input type="text" name="birthday" id="birthday" /></li>""") def test_various_boolean_values(self): class SignupForm(Form): email = EmailField() get_spam = BooleanField() f = SignupForm(auto_id=False) self.assertHTMLEqual(str(f['email']), '<input type="email" name="email" />') self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />') f = SignupForm({'email': '[email protected]', 'get_spam': True}, auto_id=False) self.assertHTMLEqual(str(f['email']), '<input type="email" name="email" value="[email protected]" />') self.assertHTMLEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />') # 'True' or 'true' should be rendered without a value attribute f = SignupForm({'email': '[email protected]', 'get_spam': 'True'}, auto_id=False) self.assertHTMLEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />') f = SignupForm({'email': '[email protected]', 'get_spam': 'true'}, auto_id=False) self.assertHTMLEqual(str(f['get_spam']), '<input checked="checked" type="checkbox" name="get_spam" />') # A value of 'False' or 'false' should be rendered unchecked f = SignupForm({'email': '[email protected]', 'get_spam': 'False'}, auto_id=False) self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />') f = SignupForm({'email': '[email protected]', 'get_spam': 'false'}, auto_id=False) self.assertHTMLEqual(str(f['get_spam']), '<input type="checkbox" name="get_spam" />') # A value of '0' should be interpreted as a True value (#16820) f = SignupForm({'email': '[email protected]', 'get_spam': '0'}) self.assertTrue(f.is_valid()) self.assertTrue(f.cleaned_data.get('get_spam')) def test_widget_output(self): # Any Field can have a Widget class passed to its constructor: class ContactForm(Form): subject = CharField() message = CharField(widget=Textarea) f = ContactForm(auto_id=False) self.assertHTMLEqual(str(f['subject']), '<input type="text" name="subject" />') self.assertHTMLEqual(str(f['message']), '<textarea name="message" rows="10" cols="40"></textarea>') # as_textarea(), as_text() and as_hidden() are shortcuts for changing the output # widget type: self.assertHTMLEqual(f['subject'].as_textarea(), '<textarea name="subject" rows="10" cols="40"></textarea>') self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" />') self.assertHTMLEqual(f['message'].as_hidden(), '<input type="hidden" name="message" />') # The 'widget' parameter to a Field can also be an instance: class ContactForm(Form): subject = CharField() message = CharField(widget=Textarea(attrs={'rows': 80, 'cols': 20})) f = ContactForm(auto_id=False) self.assertHTMLEqual(str(f['message']), '<textarea name="message" rows="80" cols="20"></textarea>') # Instance-level attrs are *not* carried over to as_textarea(), as_text() and # as_hidden(): self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" />') f = ContactForm({'subject': 'Hello', 'message': 'I love you.'}, auto_id=False) self.assertHTMLEqual(f['subject'].as_textarea(), '<textarea rows="10" cols="40" name="subject">Hello</textarea>') self.assertHTMLEqual(f['message'].as_text(), '<input type="text" name="message" value="I love you." />') self.assertHTMLEqual(f['message'].as_hidden(), '<input type="hidden" name="message" value="I love you." />') def test_forms_with_choices(self): # For a form with a <select>, use ChoiceField: class FrameworkForm(Form): name = CharField() language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')]) f = FrameworkForm(auto_id=False) self.assertHTMLEqual(str(f['language']), """<select name="language"> <option value="P">Python</option> <option value="J">Java</option> </select>""") f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False) self.assertHTMLEqual(str(f['language']), """<select name="language"> <option value="P" selected="selected">Python</option> <option value="J">Java</option> </select>""") # A subtlety: If one of the choices' value is the empty string and the form is # unbound, then the <option> for the empty-string choice will get selected="selected". class FrameworkForm(Form): name = CharField() language = ChoiceField(choices=[('', '------'), ('P', 'Python'), ('J', 'Java')]) f = FrameworkForm(auto_id=False) self.assertHTMLEqual(str(f['language']), """<select name="language"> <option value="" selected="selected">------</option> <option value="P">Python</option> <option value="J">Java</option> </select>""") # You can specify widget attributes in the Widget constructor. class FrameworkForm(Form): name = CharField() language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=Select(attrs={'class': 'foo'})) f = FrameworkForm(auto_id=False) self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language"> <option value="P">Python</option> <option value="J">Java</option> </select>""") f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False) self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language"> <option value="P" selected="selected">Python</option> <option value="J">Java</option> </select>""") # When passing a custom widget instance to ChoiceField, note that setting # 'choices' on the widget is meaningless. The widget will use the choices # defined on the Field, not the ones defined on the Widget. class FrameworkForm(Form): name = CharField() language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=Select(choices=[('R', 'Ruby'), ('P', 'Perl')], attrs={'class': 'foo'})) f = FrameworkForm(auto_id=False) self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language"> <option value="P">Python</option> <option value="J">Java</option> </select>""") f = FrameworkForm({'name': 'Django', 'language': 'P'}, auto_id=False) self.assertHTMLEqual(str(f['language']), """<select class="foo" name="language"> <option value="P" selected="selected">Python</option> <option value="J">Java</option> </select>""") # You can set a ChoiceField's choices after the fact. class FrameworkForm(Form): name = CharField() language = ChoiceField() f = FrameworkForm(auto_id=False) self.assertHTMLEqual(str(f['language']), """<select name="language"> </select>""") f.fields['language'].choices = [('P', 'Python'), ('J', 'Java')] self.assertHTMLEqual(str(f['language']), """<select name="language"> <option value="P">Python</option> <option value="J">Java</option> </select>""") def test_forms_with_radio(self): # Add widget=RadioSelect to use that widget with a ChoiceField. class FrameworkForm(Form): name = CharField() language = ChoiceField(choices=[('P', 'Python'), ('J', 'Java')], widget=RadioSelect) f = FrameworkForm(auto_id=False) self.assertHTMLEqual(str(f['language']), """<ul> <li><label><input type="radio" name="language" value="P" /> Python</label></li> <li><label><input type="radio" name="language" value="J" /> Java</label></li> </ul>""") self.assertHTMLEqual(f.as_table(), """<tr><th>Name:</th><td><input type="text" name="name" /></td></tr> <tr><th>Language:</th><td><ul> <li><label><input type="radio" name="language" value="P" /> Python</label></li> <li><label><input type="radio" name="language" value="J" /> Java</label></li> </ul></td></tr>""") self.assertHTMLEqual(f.as_ul(), """<li>Name: <input type="text" name="name" /></li> <li>Language: <ul> <li><label><input type="radio" name="language" value="P" /> Python</label></li> <li><label><input type="radio" name="language" value="J" /> Java</label></li> </ul></li>""") # Regarding auto_id and <label>, RadioSelect is a special case. Each radio button # gets a distinct ID, formed by appending an underscore plus the button's # zero-based index. f = FrameworkForm(auto_id='id_%s') self.assertHTMLEqual(str(f['language']), """<ul id="id_language"> <li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li> <li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li> </ul>""") # When RadioSelect is used with auto_id, and the whole form is printed using # either as_table() or as_ul(), the label for the RadioSelect will point to the # ID of the *first* radio button. self.assertHTMLEqual(f.as_table(), """<tr><th><label for="id_name">Name:</label></th><td><input type="text" name="name" id="id_name" /></td></tr> <tr><th><label for="id_language_0">Language:</label></th><td><ul id="id_language"> <li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li> <li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li> </ul></td></tr>""") self.assertHTMLEqual(f.as_ul(), """<li><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></li> <li><label for="id_language_0">Language:</label> <ul id="id_language"> <li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li> <li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li> </ul></li>""") self.assertHTMLEqual(f.as_p(), """<p><label for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></p> <p><label for="id_language_0">Language:</label> <ul id="id_language"> <li><label for="id_language_0"><input type="radio" id="id_language_0" value="P" name="language" /> Python</label></li> <li><label for="id_language_1"><input type="radio" id="id_language_1" value="J" name="language" /> Java</label></li> </ul></p>""") # Test iterating on individual radios in a template t = Template('{% for radio in form.language %}<div class="myradio">{{ radio }}</div>{% endfor %}') self.assertHTMLEqual(t.render(Context({'form': f})), """<div class="myradio"><label for="id_language_0"> <input id="id_language_0" name="language" type="radio" value="P" /> Python</label></div> <div class="myradio"><label for="id_language_1"> <input id="id_language_1" name="language" type="radio" value="J" /> Java</label></div>""") def test_form_with_iterable_boundfield(self): class BeatleForm(Form): name = ChoiceField(choices=[('john', 'John'), ('paul', 'Paul'), ('george', 'George'), ('ringo', 'Ringo')], widget=RadioSelect) f = BeatleForm(auto_id=False) self.assertHTMLEqual('\n'.join(str(bf) for bf in f['name']), """<label><input type="radio" name="name" value="john" /> John</label> <label><input type="radio" name="name" value="paul" /> Paul</label> <label><input type="radio" name="name" value="george" /> George</label> <label><input type="radio" name="name" value="ringo" /> Ringo</label>""") self.assertHTMLEqual('\n'.join('<div>%s</div>' % bf for bf in f['name']), """<div><label><input type="radio" name="name" value="john" /> John</label></div> <div><label><input type="radio" name="name" value="paul" /> Paul</label></div> <div><label><input type="radio" name="name" value="george" /> George</label></div> <div><label><input type="radio" name="name" value="ringo" /> Ringo</label></div>""") def test_form_with_noniterable_boundfield(self): # You can iterate over any BoundField, not just those with widget=RadioSelect. class BeatleForm(Form): name = CharField() f = BeatleForm(auto_id=False) self.assertHTMLEqual('\n'.join(str(bf) for bf in f['name']), '<input type="text" name="name" />') def test_forms_with_multiple_choice(self): # MultipleChoiceField is a special case, as its data is required to be a list: class SongForm(Form): name = CharField() composers = MultipleChoiceField() f = SongForm(auto_id=False) self.assertHTMLEqual(str(f['composers']), """<select multiple="multiple" name="composers"> </select>""") class SongForm(Form): name = CharField() composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')]) f = SongForm(auto_id=False) self.assertHTMLEqual(str(f['composers']), """<select multiple="multiple" name="composers"> <option value="J">John Lennon</option> <option value="P">Paul McCartney</option> </select>""") f = SongForm({'name': 'Yesterday', 'composers': ['P']}, auto_id=False) self.assertHTMLEqual(str(f['name']), '<input type="text" name="name" value="Yesterday" />') self.assertHTMLEqual(str(f['composers']), """<select multiple="multiple" name="composers"> <option value="J">John Lennon</option> <option value="P" selected="selected">Paul McCartney</option> </select>""") def test_form_with_disabled_fields(self): class PersonForm(Form): name = CharField() birthday = DateField(disabled=True) class PersonFormFieldInitial(Form): name = CharField() birthday = DateField(disabled=True, initial=datetime.date(1974, 8, 16)) # Disabled fields are generally not transmitted by user agents. # The value from the form's initial data is used. f1 = PersonForm({'name': 'John Doe'}, initial={'birthday': datetime.date(1974, 8, 16)}) f2 = PersonFormFieldInitial({'name': 'John Doe'}) for form in (f1, f2): self.assertTrue(form.is_valid()) self.assertEqual( form.cleaned_data, {'birthday': datetime.date(1974, 8, 16), 'name': 'John Doe'} ) # Values provided in the form's data are ignored. data = {'name': 'John Doe', 'birthday': '1984-11-10'} f1 = PersonForm(data, initial={'birthday': datetime.date(1974, 8, 16)}) f2 = PersonFormFieldInitial(data) for form in (f1, f2): self.assertTrue(form.is_valid()) self.assertEqual( form.cleaned_data, {'birthday': datetime.date(1974, 8, 16), 'name': 'John Doe'} ) def test_hidden_data(self): class SongForm(Form): name = CharField() composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')]) # MultipleChoiceField rendered as_hidden() is a special case. Because it can # have multiple values, its as_hidden() renders multiple <input type="hidden"> # tags. f = SongForm({'name': 'Yesterday', 'composers': ['P']}, auto_id=False) self.assertHTMLEqual(f['composers'].as_hidden(), '<input type="hidden" name="composers" value="P" />') f = SongForm({'name': 'From Me To You', 'composers': ['P', 'J']}, auto_id=False) self.assertHTMLEqual(f['composers'].as_hidden(), """<input type="hidden" name="composers" value="P" /> <input type="hidden" name="composers" value="J" />""") # DateTimeField rendered as_hidden() is special too class MessageForm(Form): when = SplitDateTimeField() f = MessageForm({'when_0': '1992-01-01', 'when_1': '01:01'}) self.assertTrue(f.is_valid()) self.assertHTMLEqual(str(f['when']), '<input type="text" name="when_0" value="1992-01-01" id="id_when_0" /><input type="text" name="when_1" value="01:01" id="id_when_1" />') self.assertHTMLEqual(f['when'].as_hidden(), '<input type="hidden" name="when_0" value="1992-01-01" id="id_when_0" /><input type="hidden" name="when_1" value="01:01" id="id_when_1" />') def test_mulitple_choice_checkbox(self): # MultipleChoiceField can also be used with the CheckboxSelectMultiple widget. class SongForm(Form): name = CharField() composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple) f = SongForm(auto_id=False) self.assertHTMLEqual(str(f['composers']), """<ul> <li><label><input type="checkbox" name="composers" value="J" /> John Lennon</label></li> <li><label><input type="checkbox" name="composers" value="P" /> Paul McCartney</label></li> </ul>""") f = SongForm({'composers': ['J']}, auto_id=False) self.assertHTMLEqual(str(f['composers']), """<ul> <li><label><input checked="checked" type="checkbox" name="composers" value="J" /> John Lennon</label></li> <li><label><input type="checkbox" name="composers" value="P" /> Paul McCartney</label></li> </ul>""") f = SongForm({'composers': ['J', 'P']}, auto_id=False) self.assertHTMLEqual(str(f['composers']), """<ul> <li><label><input checked="checked" type="checkbox" name="composers" value="J" /> John Lennon</label></li> <li><label><input checked="checked" type="checkbox" name="composers" value="P" /> Paul McCartney</label></li> </ul>""") # Test iterating on individual checkboxes in a template t = Template('{% for checkbox in form.composers %}<div class="mycheckbox">{{ checkbox }}</div>{% endfor %}') self.assertHTMLEqual(t.render(Context({'form': f})), """<div class="mycheckbox"><label> <input checked="checked" name="composers" type="checkbox" value="J" /> John Lennon</label></div> <div class="mycheckbox"><label> <input checked="checked" name="composers" type="checkbox" value="P" /> Paul McCartney</label></div>""") def test_checkbox_auto_id(self): # Regarding auto_id, CheckboxSelectMultiple is a special case. Each checkbox # gets a distinct ID, formed by appending an underscore plus the checkbox's # zero-based index. class SongForm(Form): name = CharField() composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple) f = SongForm(auto_id='%s_id') self.assertHTMLEqual(str(f['composers']), """<ul id="composers_id"> <li><label for="composers_id_0"><input type="checkbox" name="composers" value="J" id="composers_id_0" /> John Lennon</label></li> <li><label for="composers_id_1"><input type="checkbox" name="composers" value="P" id="composers_id_1" /> Paul McCartney</label></li> </ul>""") def test_multiple_choice_list_data(self): # Data for a MultipleChoiceField should be a list. QueryDict and # MultiValueDict conveniently work with this. class SongForm(Form): name = CharField() composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple) data = {'name': 'Yesterday', 'composers': ['J', 'P']} f = SongForm(data) self.assertEqual(f.errors, {}) data = QueryDict('name=Yesterday&composers=J&composers=P') f = SongForm(data) self.assertEqual(f.errors, {}) data = MultiValueDict(dict(name=['Yesterday'], composers=['J', 'P'])) f = SongForm(data) self.assertEqual(f.errors, {}) def test_multiple_hidden(self): class SongForm(Form): name = CharField() composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=CheckboxSelectMultiple) # The MultipleHiddenInput widget renders multiple values as hidden fields. class SongFormHidden(Form): name = CharField() composers = MultipleChoiceField(choices=[('J', 'John Lennon'), ('P', 'Paul McCartney')], widget=MultipleHiddenInput) f = SongFormHidden(MultiValueDict(dict(name=['Yesterday'], composers=['J', 'P'])), auto_id=False) self.assertHTMLEqual(f.as_ul(), """<li>Name: <input type="text" name="name" value="Yesterday" /><input type="hidden" name="composers" value="J" /> <input type="hidden" name="composers" value="P" /></li>""") # When using CheckboxSelectMultiple, the framework expects a list of input and # returns a list of input. f = SongForm({'name': 'Yesterday'}, auto_id=False) self.assertEqual(f.errors['composers'], ['This field is required.']) f = SongForm({'name': 'Yesterday', 'composers': ['J']}, auto_id=False) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data['composers'], ['J']) self.assertEqual(f.cleaned_data['name'], 'Yesterday') f = SongForm({'name': 'Yesterday', 'composers': ['J', 'P']}, auto_id=False) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data['composers'], ['J', 'P']) self.assertEqual(f.cleaned_data['name'], 'Yesterday') def test_escaping(self): # Validation errors are HTML-escaped when output as HTML. class EscapingForm(Form): special_name = CharField(label="<em>Special</em> Field") special_safe_name = CharField(label=mark_safe("<em>Special</em> Field")) def clean_special_name(self): raise ValidationError("Something's wrong with '%s'" % self.cleaned_data['special_name']) def clean_special_safe_name(self): raise ValidationError(mark_safe("'<b>%s</b>' is a safe string" % self.cleaned_data['special_safe_name'])) f = EscapingForm({'special_name': "Nothing to escape", 'special_safe_name': "Nothing to escape"}, auto_id=False) self.assertHTMLEqual(f.as_table(), """<tr><th>&lt;em&gt;Special&lt;/em&gt; Field:</th><td><ul class="errorlist"><li>Something&#39;s wrong with &#39;Nothing to escape&#39;</li></ul><input type="text" name="special_name" value="Nothing to escape" /></td></tr> <tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>'<b>Nothing to escape</b>' is a safe string</li></ul><input type="text" name="special_safe_name" value="Nothing to escape" /></td></tr>""") f = EscapingForm({ 'special_name': "Should escape < & > and <script>alert('xss')</script>", 'special_safe_name': "<i>Do not escape</i>" }, auto_id=False) self.assertHTMLEqual(f.as_table(), """<tr><th>&lt;em&gt;Special&lt;/em&gt; Field:</th><td><ul class="errorlist"><li>Something&#39;s wrong with &#39;Should escape &lt; &amp; &gt; and &lt;script&gt;alert(&#39;xss&#39;)&lt;/script&gt;&#39;</li></ul><input type="text" name="special_name" value="Should escape &lt; &amp; &gt; and &lt;script&gt;alert(&#39;xss&#39;)&lt;/script&gt;" /></td></tr> <tr><th><em>Special</em> Field:</th><td><ul class="errorlist"><li>'<b><i>Do not escape</i></b>' is a safe string</li></ul><input type="text" name="special_safe_name" value="&lt;i&gt;Do not escape&lt;/i&gt;" /></td></tr>""") def test_validating_multiple_fields(self): # There are a couple of ways to do multiple-field validation. If you want the # validation message to be associated with a particular field, implement the # clean_XXX() method on the Form, where XXX is the field name. As in # Field.clean(), the clean_XXX() method should return the cleaned value. In the # clean_XXX() method, you have access to self.cleaned_data, which is a dictionary # of all the data that has been cleaned *so far*, in order by the fields, # including the current field (e.g., the field XXX if you're in clean_XXX()). class UserRegistration(Form): username = CharField(max_length=10) password1 = CharField(widget=PasswordInput) password2 = CharField(widget=PasswordInput) def clean_password2(self): if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']: raise ValidationError('Please make sure your passwords match.') return self.cleaned_data['password2'] f = UserRegistration(auto_id=False) self.assertEqual(f.errors, {}) f = UserRegistration({}, auto_id=False) self.assertEqual(f.errors['username'], ['This field is required.']) self.assertEqual(f.errors['password1'], ['This field is required.']) self.assertEqual(f.errors['password2'], ['This field is required.']) f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'bar'}, auto_id=False) self.assertEqual(f.errors['password2'], ['Please make sure your passwords match.']) f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'foo'}, auto_id=False) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data['username'], 'adrian') self.assertEqual(f.cleaned_data['password1'], 'foo') self.assertEqual(f.cleaned_data['password2'], 'foo') # Another way of doing multiple-field validation is by implementing the # Form's clean() method. Usually ValidationError raised by that method # will not be associated with a particular field and will have a # special-case association with the field named '__all__'. It's # possible to associate the errors to particular field with the # Form.add_error() method or by passing a dictionary that maps each # field to one or more errors. # # Note that in Form.clean(), you have access to self.cleaned_data, a # dictionary of all the fields/values that have *not* raised a # ValidationError. Also note Form.clean() is required to return a # dictionary of all clean data. class UserRegistration(Form): username = CharField(max_length=10) password1 = CharField(widget=PasswordInput) password2 = CharField(widget=PasswordInput) def clean(self): # Test raising a ValidationError as NON_FIELD_ERRORS. if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']: raise ValidationError('Please make sure your passwords match.') # Test raising ValidationError that targets multiple fields. errors = {} if self.cleaned_data.get('password1') == 'FORBIDDEN_VALUE': errors['password1'] = 'Forbidden value.' if self.cleaned_data.get('password2') == 'FORBIDDEN_VALUE': errors['password2'] = ['Forbidden value.'] if errors: raise ValidationError(errors) # Test Form.add_error() if self.cleaned_data.get('password1') == 'FORBIDDEN_VALUE2': self.add_error(None, 'Non-field error 1.') self.add_error('password1', 'Forbidden value 2.') if self.cleaned_data.get('password2') == 'FORBIDDEN_VALUE2': self.add_error('password2', 'Forbidden value 2.') raise ValidationError('Non-field error 2.') return self.cleaned_data f = UserRegistration(auto_id=False) self.assertEqual(f.errors, {}) f = UserRegistration({}, auto_id=False) self.assertHTMLEqual(f.as_table(), """<tr><th>Username:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="username" maxlength="10" /></td></tr> <tr><th>Password1:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="password" name="password1" /></td></tr> <tr><th>Password2:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="password" name="password2" /></td></tr>""") self.assertEqual(f.errors['username'], ['This field is required.']) self.assertEqual(f.errors['password1'], ['This field is required.']) self.assertEqual(f.errors['password2'], ['This field is required.']) f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'bar'}, auto_id=False) self.assertEqual(f.errors['__all__'], ['Please make sure your passwords match.']) self.assertHTMLEqual(f.as_table(), """<tr><td colspan="2"><ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul></td></tr> <tr><th>Username:</th><td><input type="text" name="username" value="adrian" maxlength="10" /></td></tr> <tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr> <tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr>""") self.assertHTMLEqual(f.as_ul(), """<li><ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul></li> <li>Username: <input type="text" name="username" value="adrian" maxlength="10" /></li> <li>Password1: <input type="password" name="password1" /></li> <li>Password2: <input type="password" name="password2" /></li>""") f = UserRegistration({'username': 'adrian', 'password1': 'foo', 'password2': 'foo'}, auto_id=False) self.assertEqual(f.errors, {}) self.assertEqual(f.cleaned_data['username'], 'adrian') self.assertEqual(f.cleaned_data['password1'], 'foo') self.assertEqual(f.cleaned_data['password2'], 'foo') f = UserRegistration({'username': 'adrian', 'password1': 'FORBIDDEN_VALUE', 'password2': 'FORBIDDEN_VALUE'}, auto_id=False) self.assertEqual(f.errors['password1'], ['Forbidden value.']) self.assertEqual(f.errors['password2'], ['Forbidden value.']) f = UserRegistration({'username': 'adrian', 'password1': 'FORBIDDEN_VALUE2', 'password2': 'FORBIDDEN_VALUE2'}, auto_id=False) self.assertEqual(f.errors['__all__'], ['Non-field error 1.', 'Non-field error 2.']) self.assertEqual(f.errors['password1'], ['Forbidden value 2.']) self.assertEqual(f.errors['password2'], ['Forbidden value 2.']) with six.assertRaisesRegex(self, ValueError, "has no field named"): f.add_error('missing_field', 'Some error.') def test_update_error_dict(self): class CodeForm(Form): code = CharField(max_length=10) def clean(self): try: raise ValidationError({'code': [ValidationError('Code error 1.')]}) except ValidationError as e: self._errors = e.update_error_dict(self._errors) try: raise ValidationError({'code': [ValidationError('Code error 2.')]}) except ValidationError as e: self._errors = e.update_error_dict(self._errors) try: raise ValidationError({'code': forms.ErrorList(['Code error 3.'])}) except ValidationError as e: self._errors = e.update_error_dict(self._errors) try: raise ValidationError('Non-field error 1.') except ValidationError as e: self._errors = e.update_error_dict(self._errors) try: raise ValidationError([ValidationError('Non-field error 2.')]) except ValidationError as e: self._errors = e.update_error_dict(self._errors) # Ensure that the newly added list of errors is an instance of ErrorList. for field, error_list in self._errors.items(): if not isinstance(error_list, self.error_class): self._errors[field] = self.error_class(error_list) form = CodeForm({'code': 'hello'}) # Trigger validation. self.assertFalse(form.is_valid()) # Check that update_error_dict didn't lose track of the ErrorDict type. self.assertIsInstance(form._errors, forms.ErrorDict) self.assertEqual(dict(form.errors), { 'code': ['Code error 1.', 'Code error 2.', 'Code error 3.'], NON_FIELD_ERRORS: ['Non-field error 1.', 'Non-field error 2.'], }) def test_has_error(self): class UserRegistration(Form): username = CharField(max_length=10) password1 = CharField(widget=PasswordInput, min_length=5) password2 = CharField(widget=PasswordInput) def clean(self): if (self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']): raise ValidationError( 'Please make sure your passwords match.', code='password_mismatch', ) f = UserRegistration(data={}) self.assertTrue(f.has_error('password1')) self.assertTrue(f.has_error('password1', 'required')) self.assertFalse(f.has_error('password1', 'anything')) f = UserRegistration(data={'password1': 'Hi', 'password2': 'Hi'}) self.assertTrue(f.has_error('password1')) self.assertTrue(f.has_error('password1', 'min_length')) self.assertFalse(f.has_error('password1', 'anything')) self.assertFalse(f.has_error('password2')) self.assertFalse(f.has_error('password2', 'anything')) f = UserRegistration(data={'password1': 'Bonjour', 'password2': 'Hello'}) self.assertFalse(f.has_error('password1')) self.assertFalse(f.has_error('password1', 'required')) self.assertTrue(f.has_error(NON_FIELD_ERRORS)) self.assertTrue(f.has_error(NON_FIELD_ERRORS, 'password_mismatch')) self.assertFalse(f.has_error(NON_FIELD_ERRORS, 'anything')) def test_dynamic_construction(self): # It's possible to construct a Form dynamically by adding to the self.fields # dictionary in __init__(). Don't forget to call Form.__init__() within the # subclass' __init__(). class Person(Form): first_name = CharField() last_name = CharField() def __init__(self, *args, **kwargs): super(Person, self).__init__(*args, **kwargs) self.fields['birthday'] = DateField() p = Person(auto_id=False) self.assertHTMLEqual(p.as_table(), """<tr><th>First name:</th><td><input type="text" name="first_name" /></td></tr> <tr><th>Last name:</th><td><input type="text" name="last_name" /></td></tr> <tr><th>Birthday:</th><td><input type="text" name="birthday" /></td></tr>""") # Instances of a dynamic Form do not persist fields from one Form instance to # the next. class MyForm(Form): def __init__(self, data=None, auto_id=False, field_list=[]): Form.__init__(self, data, auto_id=auto_id) for field in field_list: self.fields[field[0]] = field[1] field_list = [('field1', CharField()), ('field2', CharField())] my_form = MyForm(field_list=field_list) self.assertHTMLEqual(my_form.as_table(), """<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr> <tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>""") field_list = [('field3', CharField()), ('field4', CharField())] my_form = MyForm(field_list=field_list) self.assertHTMLEqual(my_form.as_table(), """<tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr> <tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>""") class MyForm(Form): default_field_1 = CharField() default_field_2 = CharField() def __init__(self, data=None, auto_id=False, field_list=[]): Form.__init__(self, data, auto_id=auto_id) for field in field_list: self.fields[field[0]] = field[1] field_list = [('field1', CharField()), ('field2', CharField())] my_form = MyForm(field_list=field_list) self.assertHTMLEqual(my_form.as_table(), """<tr><th>Default field 1:</th><td><input type="text" name="default_field_1" /></td></tr> <tr><th>Default field 2:</th><td><input type="text" name="default_field_2" /></td></tr> <tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr> <tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr>""") field_list = [('field3', CharField()), ('field4', CharField())] my_form = MyForm(field_list=field_list) self.assertHTMLEqual(my_form.as_table(), """<tr><th>Default field 1:</th><td><input type="text" name="default_field_1" /></td></tr> <tr><th>Default field 2:</th><td><input type="text" name="default_field_2" /></td></tr> <tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr> <tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr>""") # Similarly, changes to field attributes do not persist from one Form instance # to the next. class Person(Form): first_name = CharField(required=False) last_name = CharField(required=False) def __init__(self, names_required=False, *args, **kwargs): super(Person, self).__init__(*args, **kwargs) if names_required: self.fields['first_name'].required = True self.fields['first_name'].widget.attrs['class'] = 'required' self.fields['last_name'].required = True self.fields['last_name'].widget.attrs['class'] = 'required' f = Person(names_required=False) self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (False, False)) self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({}, {})) f = Person(names_required=True) self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (True, True)) self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({'class': 'required'}, {'class': 'required'})) f = Person(names_required=False) self.assertEqual(f['first_name'].field.required, f['last_name'].field.required, (False, False)) self.assertEqual(f['first_name'].field.widget.attrs, f['last_name'].field.widget.attrs, ({}, {})) class Person(Form): first_name = CharField(max_length=30) last_name = CharField(max_length=30) def __init__(self, name_max_length=None, *args, **kwargs): super(Person, self).__init__(*args, **kwargs) if name_max_length: self.fields['first_name'].max_length = name_max_length self.fields['last_name'].max_length = name_max_length f = Person(name_max_length=None) self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (30, 30)) f = Person(name_max_length=20) self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (20, 20)) f = Person(name_max_length=None) self.assertEqual(f['first_name'].field.max_length, f['last_name'].field.max_length, (30, 30)) # Similarly, choices do not persist from one Form instance to the next. # Refs #15127. class Person(Form): first_name = CharField(required=False) last_name = CharField(required=False) gender = ChoiceField(choices=(('f', 'Female'), ('m', 'Male'))) def __init__(self, allow_unspec_gender=False, *args, **kwargs): super(Person, self).__init__(*args, **kwargs) if allow_unspec_gender: self.fields['gender'].choices += (('u', 'Unspecified'),) f = Person() self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male')]) f = Person(allow_unspec_gender=True) self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male'), ('u', 'Unspecified')]) f = Person() self.assertEqual(f['gender'].field.choices, [('f', 'Female'), ('m', 'Male')]) def test_validators_independence(self): """ Test that we are able to modify a form field validators list without polluting other forms """ from django.core.validators import MaxValueValidator class MyForm(Form): myfield = CharField(max_length=25) f1 = MyForm() f2 = MyForm() f1.fields['myfield'].validators[0] = MaxValueValidator(12) self.assertNotEqual(f1.fields['myfield'].validators[0], f2.fields['myfield'].validators[0]) def test_hidden_widget(self): # HiddenInput widgets are displayed differently in the as_table(), as_ul()) # and as_p() output of a Form -- their verbose names are not displayed, and a # separate row is not displayed. They're displayed in the last row of the # form, directly after that row's form element. class Person(Form): first_name = CharField() last_name = CharField() hidden_text = CharField(widget=HiddenInput) birthday = DateField() p = Person(auto_id=False) self.assertHTMLEqual(p.as_table(), """<tr><th>First name:</th><td><input type="text" name="first_name" /></td></tr> <tr><th>Last name:</th><td><input type="text" name="last_name" /></td></tr> <tr><th>Birthday:</th><td><input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></td></tr>""") self.assertHTMLEqual(p.as_ul(), """<li>First name: <input type="text" name="first_name" /></li> <li>Last name: <input type="text" name="last_name" /></li> <li>Birthday: <input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></li>""") self.assertHTMLEqual(p.as_p(), """<p>First name: <input type="text" name="first_name" /></p> <p>Last name: <input type="text" name="last_name" /></p> <p>Birthday: <input type="text" name="birthday" /><input type="hidden" name="hidden_text" /></p>""") # With auto_id set, a HiddenInput still gets an ID, but it doesn't get a label. p = Person(auto_id='id_%s') self.assertHTMLEqual(p.as_table(), """<tr><th><label for="id_first_name">First name:</label></th><td><input type="text" name="first_name" id="id_first_name" /></td></tr> <tr><th><label for="id_last_name">Last name:</label></th><td><input type="text" name="last_name" id="id_last_name" /></td></tr> <tr><th><label for="id_birthday">Birthday:</label></th><td><input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></td></tr>""") self.assertHTMLEqual(p.as_ul(), """<li><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></li> <li><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></li> <li><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></li>""") self.assertHTMLEqual(p.as_p(), """<p><label for="id_first_name">First name:</label> <input type="text" name="first_name" id="id_first_name" /></p> <p><label for="id_last_name">Last name:</label> <input type="text" name="last_name" id="id_last_name" /></p> <p><label for="id_birthday">Birthday:</label> <input type="text" name="birthday" id="id_birthday" /><input type="hidden" name="hidden_text" id="id_hidden_text" /></p>""") # If a field with a HiddenInput has errors, the as_table() and as_ul() output # will include the error message(s) with the text "(Hidden field [fieldname]) " # prepended. This message is displayed at the top of the output, regardless of # its field's order in the form. p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'}, auto_id=False) self.assertHTMLEqual(p.as_table(), """<tr><td colspan="2"><ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul></td></tr> <tr><th>First name:</th><td><input type="text" name="first_name" value="John" /></td></tr> <tr><th>Last name:</th><td><input type="text" name="last_name" value="Lennon" /></td></tr> <tr><th>Birthday:</th><td><input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></td></tr>""") self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul></li> <li>First name: <input type="text" name="first_name" value="John" /></li> <li>Last name: <input type="text" name="last_name" value="Lennon" /></li> <li>Birthday: <input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></li>""") self.assertHTMLEqual(p.as_p(), """<ul class="errorlist nonfield"><li>(Hidden field hidden_text) This field is required.</li></ul> <p>First name: <input type="text" name="first_name" value="John" /></p> <p>Last name: <input type="text" name="last_name" value="Lennon" /></p> <p>Birthday: <input type="text" name="birthday" value="1940-10-9" /><input type="hidden" name="hidden_text" /></p>""") # A corner case: It's possible for a form to have only HiddenInputs. class TestForm(Form): foo = CharField(widget=HiddenInput) bar = CharField(widget=HiddenInput) p = TestForm(auto_id=False) self.assertHTMLEqual(p.as_table(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />') self.assertHTMLEqual(p.as_ul(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />') self.assertHTMLEqual(p.as_p(), '<input type="hidden" name="foo" /><input type="hidden" name="bar" />') def test_field_order(self): # A Form's fields are displayed in the same order in which they were defined. class TestForm(Form): field1 = CharField() field2 = CharField() field3 = CharField() field4 = CharField() field5 = CharField() field6 = CharField() field7 = CharField() field8 = CharField() field9 = CharField() field10 = CharField() field11 = CharField() field12 = CharField() field13 = CharField() field14 = CharField() p = TestForm(auto_id=False) self.assertHTMLEqual(p.as_table(), """<tr><th>Field1:</th><td><input type="text" name="field1" /></td></tr> <tr><th>Field2:</th><td><input type="text" name="field2" /></td></tr> <tr><th>Field3:</th><td><input type="text" name="field3" /></td></tr> <tr><th>Field4:</th><td><input type="text" name="field4" /></td></tr> <tr><th>Field5:</th><td><input type="text" name="field5" /></td></tr> <tr><th>Field6:</th><td><input type="text" name="field6" /></td></tr> <tr><th>Field7:</th><td><input type="text" name="field7" /></td></tr> <tr><th>Field8:</th><td><input type="text" name="field8" /></td></tr> <tr><th>Field9:</th><td><input type="text" name="field9" /></td></tr> <tr><th>Field10:</th><td><input type="text" name="field10" /></td></tr> <tr><th>Field11:</th><td><input type="text" name="field11" /></td></tr> <tr><th>Field12:</th><td><input type="text" name="field12" /></td></tr> <tr><th>Field13:</th><td><input type="text" name="field13" /></td></tr> <tr><th>Field14:</th><td><input type="text" name="field14" /></td></tr>""") def test_explicit_field_order(self): class TestFormParent(Form): field1 = CharField() field2 = CharField() field4 = CharField() field5 = CharField() field6 = CharField() field_order = ['field6', 'field5', 'field4', 'field2', 'field1'] class TestForm(TestFormParent): field3 = CharField() field_order = ['field2', 'field4', 'field3', 'field5', 'field6'] class TestFormRemove(TestForm): field1 = None class TestFormMissing(TestForm): field_order = ['field2', 'field4', 'field3', 'field5', 'field6', 'field1'] field1 = None class TestFormInit(TestFormParent): field3 = CharField() field_order = None def __init__(self, **kwargs): super(TestFormInit, self).__init__(**kwargs) self.order_fields(field_order=TestForm.field_order) p = TestFormParent() self.assertEqual(list(p.fields.keys()), TestFormParent.field_order) p = TestFormRemove() self.assertEqual(list(p.fields.keys()), TestForm.field_order) p = TestFormMissing() self.assertEqual(list(p.fields.keys()), TestForm.field_order) p = TestForm() self.assertEqual(list(p.fields.keys()), TestFormMissing.field_order) p = TestFormInit() order = list(TestForm.field_order) + ['field1'] self.assertEqual(list(p.fields.keys()), order) TestForm.field_order = ['unknown'] p = TestForm() self.assertEqual(list(p.fields.keys()), ['field1', 'field2', 'field4', 'field5', 'field6', 'field3']) def test_form_html_attributes(self): # Some Field classes have an effect on the HTML attributes of their associated # Widget. If you set max_length in a CharField and its associated widget is # either a TextInput or PasswordInput, then the widget's rendered HTML will # include the "maxlength" attribute. class UserRegistration(Form): username = CharField(max_length=10) # uses TextInput by default password = CharField(max_length=10, widget=PasswordInput) realname = CharField(max_length=10, widget=TextInput) # redundantly define widget, just to test address = CharField() # no max_length defined here p = UserRegistration(auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /></li> <li>Password: <input type="password" name="password" maxlength="10" /></li> <li>Realname: <input type="text" name="realname" maxlength="10" /></li> <li>Address: <input type="text" name="address" /></li>""") # If you specify a custom "attrs" that includes the "maxlength" attribute, # the Field's max_length attribute will override whatever "maxlength" you specify # in "attrs". class UserRegistration(Form): username = CharField(max_length=10, widget=TextInput(attrs={'maxlength': 20})) password = CharField(max_length=10, widget=PasswordInput) p = UserRegistration(auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /></li> <li>Password: <input type="password" name="password" maxlength="10" /></li>""") def test_specifying_labels(self): # You can specify the label for a field by using the 'label' argument to a Field # class. If you don't specify 'label', Django will use the field name with # underscores converted to spaces, and the initial letter capitalized. class UserRegistration(Form): username = CharField(max_length=10, label='Your username') password1 = CharField(widget=PasswordInput) password2 = CharField(widget=PasswordInput, label='Contraseña (de nuevo)') p = UserRegistration(auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li>Your username: <input type="text" name="username" maxlength="10" /></li> <li>Password1: <input type="password" name="password1" /></li> <li>Contraseña (de nuevo): <input type="password" name="password2" /></li>""") # Labels for as_* methods will only end in a colon if they don't end in other # punctuation already. class Questions(Form): q1 = CharField(label='The first question') q2 = CharField(label='What is your name?') q3 = CharField(label='The answer to life is:') q4 = CharField(label='Answer this question!') q5 = CharField(label='The last question. Period.') self.assertHTMLEqual(Questions(auto_id=False).as_p(), """<p>The first question: <input type="text" name="q1" /></p> <p>What is your name? <input type="text" name="q2" /></p> <p>The answer to life is: <input type="text" name="q3" /></p> <p>Answer this question! <input type="text" name="q4" /></p> <p>The last question. Period. <input type="text" name="q5" /></p>""") self.assertHTMLEqual(Questions().as_p(), """<p><label for="id_q1">The first question:</label> <input type="text" name="q1" id="id_q1" /></p> <p><label for="id_q2">What is your name?</label> <input type="text" name="q2" id="id_q2" /></p> <p><label for="id_q3">The answer to life is:</label> <input type="text" name="q3" id="id_q3" /></p> <p><label for="id_q4">Answer this question!</label> <input type="text" name="q4" id="id_q4" /></p> <p><label for="id_q5">The last question. Period.</label> <input type="text" name="q5" id="id_q5" /></p>""") # If a label is set to the empty string for a field, that field won't get a label. class UserRegistration(Form): username = CharField(max_length=10, label='') password = CharField(widget=PasswordInput) p = UserRegistration(auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li> <input type="text" name="username" maxlength="10" /></li> <li>Password: <input type="password" name="password" /></li>""") p = UserRegistration(auto_id='id_%s') self.assertHTMLEqual(p.as_ul(), """<li> <input id="id_username" type="text" name="username" maxlength="10" /></li> <li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" /></li>""") # If label is None, Django will auto-create the label from the field name. This # is default behavior. class UserRegistration(Form): username = CharField(max_length=10, label=None) password = CharField(widget=PasswordInput) p = UserRegistration(auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /></li> <li>Password: <input type="password" name="password" /></li>""") p = UserRegistration(auto_id='id_%s') self.assertHTMLEqual(p.as_ul(), """<li><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" /></li> <li><label for="id_password">Password:</label> <input type="password" name="password" id="id_password" /></li>""") def test_label_suffix(self): # You can specify the 'label_suffix' argument to a Form class to modify the # punctuation symbol used at the end of a label. By default, the colon (:) is # used, and is only appended to the label if the label doesn't already end with a # punctuation symbol: ., !, ? or :. If you specify a different suffix, it will # be appended regardless of the last character of the label. class FavoriteForm(Form): color = CharField(label='Favorite color?') animal = CharField(label='Favorite animal') answer = CharField(label='Secret answer', label_suffix=' =') f = FavoriteForm(auto_id=False) self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li> <li>Favorite animal: <input type="text" name="animal" /></li> <li>Secret answer = <input type="text" name="answer" /></li>""") f = FavoriteForm(auto_id=False, label_suffix='?') self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li> <li>Favorite animal? <input type="text" name="animal" /></li> <li>Secret answer = <input type="text" name="answer" /></li>""") f = FavoriteForm(auto_id=False, label_suffix='') self.assertHTMLEqual(f.as_ul(), """<li>Favorite color? <input type="text" name="color" /></li> <li>Favorite animal <input type="text" name="animal" /></li> <li>Secret answer = <input type="text" name="answer" /></li>""") f = FavoriteForm(auto_id=False, label_suffix='\u2192') self.assertHTMLEqual(f.as_ul(), '<li>Favorite color? <input type="text" name="color" /></li>\n<li>Favorite animal\u2192 <input type="text" name="animal" /></li>\n<li>Secret answer = <input type="text" name="answer" /></li>') def test_initial_data(self): # You can specify initial data for a field by using the 'initial' argument to a # Field class. This initial data is displayed when a Form is rendered with *no* # data. It is not displayed when a Form is rendered with any data (including an # empty dictionary). Also, the initial value is *not* used if data for a # particular required field isn't provided. class UserRegistration(Form): username = CharField(max_length=10, initial='django') password = CharField(widget=PasswordInput) # Here, we're not submitting any data, so the initial value will be displayed.) p = UserRegistration(auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li> <li>Password: <input type="password" name="password" /></li>""") # Here, we're submitting data, so the initial value will *not* be displayed. p = UserRegistration({}, auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li> <li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""") p = UserRegistration({'username': ''}, auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li> <li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""") p = UserRegistration({'username': 'foo'}, auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li> <li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""") # An 'initial' value is *not* used as a fallback if data is not provided. In this # example, we don't provide a value for 'username', and the form raises a # validation error rather than using the initial value for 'username'. p = UserRegistration({'password': 'secret'}) self.assertEqual(p.errors['username'], ['This field is required.']) self.assertFalse(p.is_valid()) def test_dynamic_initial_data(self): # The previous technique dealt with "hard-coded" initial data, but it's also # possible to specify initial data after you've already created the Form class # (i.e., at runtime). Use the 'initial' parameter to the Form constructor. This # should be a dictionary containing initial values for one or more fields in the # form, keyed by field name. class UserRegistration(Form): username = CharField(max_length=10) password = CharField(widget=PasswordInput) # Here, we're not submitting any data, so the initial value will be displayed.) p = UserRegistration(initial={'username': 'django'}, auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li> <li>Password: <input type="password" name="password" /></li>""") p = UserRegistration(initial={'username': 'stephane'}, auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="stephane" maxlength="10" /></li> <li>Password: <input type="password" name="password" /></li>""") # The 'initial' parameter is meaningless if you pass data. p = UserRegistration({}, initial={'username': 'django'}, auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li> <li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""") p = UserRegistration({'username': ''}, initial={'username': 'django'}, auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li> <li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""") p = UserRegistration({'username': 'foo'}, initial={'username': 'django'}, auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li> <li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li>""") # A dynamic 'initial' value is *not* used as a fallback if data is not provided. # In this example, we don't provide a value for 'username', and the form raises a # validation error rather than using the initial value for 'username'. p = UserRegistration({'password': 'secret'}, initial={'username': 'django'}) self.assertEqual(p.errors['username'], ['This field is required.']) self.assertFalse(p.is_valid()) # If a Form defines 'initial' *and* 'initial' is passed as a parameter to Form(), # then the latter will get precedence. class UserRegistration(Form): username = CharField(max_length=10, initial='django') password = CharField(widget=PasswordInput) p = UserRegistration(initial={'username': 'babik'}, auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="babik" maxlength="10" /></li> <li>Password: <input type="password" name="password" /></li>""") def test_callable_initial_data(self): # The previous technique dealt with raw values as initial data, but it's also # possible to specify callable data. class UserRegistration(Form): username = CharField(max_length=10) password = CharField(widget=PasswordInput) options = MultipleChoiceField(choices=[('f', 'foo'), ('b', 'bar'), ('w', 'whiz')]) # We need to define functions that get called later.) def initial_django(): return 'django' def initial_stephane(): return 'stephane' def initial_options(): return ['f', 'b'] def initial_other_options(): return ['b', 'w'] # Here, we're not submitting any data, so the initial value will be displayed.) p = UserRegistration(initial={'username': initial_django, 'options': initial_options}, auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li> <li>Password: <input type="password" name="password" /></li> <li>Options: <select multiple="multiple" name="options"> <option value="f" selected="selected">foo</option> <option value="b" selected="selected">bar</option> <option value="w">whiz</option> </select></li>""") # The 'initial' parameter is meaningless if you pass data. p = UserRegistration({}, initial={'username': initial_django, 'options': initial_options}, auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li> <li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li> <li><ul class="errorlist"><li>This field is required.</li></ul>Options: <select multiple="multiple" name="options"> <option value="f">foo</option> <option value="b">bar</option> <option value="w">whiz</option> </select></li>""") p = UserRegistration({'username': ''}, initial={'username': initial_django}, auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li><ul class="errorlist"><li>This field is required.</li></ul>Username: <input type="text" name="username" maxlength="10" /></li> <li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li> <li><ul class="errorlist"><li>This field is required.</li></ul>Options: <select multiple="multiple" name="options"> <option value="f">foo</option> <option value="b">bar</option> <option value="w">whiz</option> </select></li>""") p = UserRegistration({'username': 'foo', 'options': ['f', 'b']}, initial={'username': initial_django}, auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /></li> <li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /></li> <li>Options: <select multiple="multiple" name="options"> <option value="f" selected="selected">foo</option> <option value="b" selected="selected">bar</option> <option value="w">whiz</option> </select></li>""") # A callable 'initial' value is *not* used as a fallback if data is not provided. # In this example, we don't provide a value for 'username', and the form raises a # validation error rather than using the initial value for 'username'. p = UserRegistration({'password': 'secret'}, initial={'username': initial_django, 'options': initial_options}) self.assertEqual(p.errors['username'], ['This field is required.']) self.assertFalse(p.is_valid()) # If a Form defines 'initial' *and* 'initial' is passed as a parameter to Form(), # then the latter will get precedence. class UserRegistration(Form): username = CharField(max_length=10, initial=initial_django) password = CharField(widget=PasswordInput) options = MultipleChoiceField(choices=[('f', 'foo'), ('b', 'bar'), ('w', 'whiz')], initial=initial_other_options) p = UserRegistration(auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="django" maxlength="10" /></li> <li>Password: <input type="password" name="password" /></li> <li>Options: <select multiple="multiple" name="options"> <option value="f">foo</option> <option value="b" selected="selected">bar</option> <option value="w" selected="selected">whiz</option> </select></li>""") p = UserRegistration(initial={'username': initial_stephane, 'options': initial_options}, auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="stephane" maxlength="10" /></li> <li>Password: <input type="password" name="password" /></li> <li>Options: <select multiple="multiple" name="options"> <option value="f" selected="selected">foo</option> <option value="b" selected="selected">bar</option> <option value="w">whiz</option> </select></li>""") def test_changed_data(self): class Person(Form): first_name = CharField(initial='Hans') last_name = CharField(initial='Greatel') birthday = DateField(initial=datetime.date(1974, 8, 16)) p = Person(data={'first_name': 'Hans', 'last_name': 'Scrmbl', 'birthday': '1974-08-16'}) self.assertTrue(p.is_valid()) self.assertNotIn('first_name', p.changed_data) self.assertIn('last_name', p.changed_data) self.assertNotIn('birthday', p.changed_data) # Test that field raising ValidationError is always in changed_data class PedanticField(forms.Field): def to_python(self, value): raise ValidationError('Whatever') class Person2(Person): pedantic = PedanticField(initial='whatever', show_hidden_initial=True) p = Person2(data={'first_name': 'Hans', 'last_name': 'Scrmbl', 'birthday': '1974-08-16', 'initial-pedantic': 'whatever'}) self.assertFalse(p.is_valid()) self.assertIn('pedantic', p.changed_data) def test_boundfield_values(self): # It's possible to get to the value which would be used for rendering # the widget for a field by using the BoundField's value method. class UserRegistration(Form): username = CharField(max_length=10, initial='djangonaut') password = CharField(widget=PasswordInput) unbound = UserRegistration() bound = UserRegistration({'password': 'foo'}) self.assertEqual(bound['username'].value(), None) self.assertEqual(unbound['username'].value(), 'djangonaut') self.assertEqual(bound['password'].value(), 'foo') self.assertEqual(unbound['password'].value(), None) def test_boundfield_initial_called_once(self): """ Multiple calls to BoundField().value() in an unbound form should return the same result each time (#24391). """ class MyForm(Form): name = CharField(max_length=10, initial=uuid.uuid4) form = MyForm() name = form['name'] self.assertEqual(name.value(), name.value()) # BoundField is also cached self.assertIs(form['name'], name) def test_boundfield_rendering(self): """ Python 2 issue: Test that rendering a BoundField with bytestring content doesn't lose it's safe string status (#22950). """ class CustomWidget(TextInput): def render(self, name, value, attrs=None): return format_html(str('<input{} />'), ' id=custom') class SampleForm(Form): name = CharField(widget=CustomWidget) f = SampleForm(data={'name': 'bar'}) self.assertIsInstance(force_text(f['name']), SafeData) def test_initial_datetime_values(self): now = datetime.datetime.now() # Nix microseconds (since they should be ignored). #22502 now_no_ms = now.replace(microsecond=0) if now == now_no_ms: now = now.replace(microsecond=1) def delayed_now(): return now def delayed_now_time(): return now.time() class HiddenInputWithoutMicrosec(HiddenInput): supports_microseconds = False class TextInputWithoutMicrosec(TextInput): supports_microseconds = False class DateTimeForm(Form): auto_timestamp = DateTimeField(initial=delayed_now) auto_time_only = TimeField(initial=delayed_now_time) supports_microseconds = DateTimeField(initial=delayed_now, widget=TextInput) hi_default_microsec = DateTimeField(initial=delayed_now, widget=HiddenInput) hi_without_microsec = DateTimeField(initial=delayed_now, widget=HiddenInputWithoutMicrosec) ti_without_microsec = DateTimeField(initial=delayed_now, widget=TextInputWithoutMicrosec) unbound = DateTimeForm() self.assertEqual(unbound['auto_timestamp'].value(), now_no_ms) self.assertEqual(unbound['auto_time_only'].value(), now_no_ms.time()) self.assertEqual(unbound['supports_microseconds'].value(), now) self.assertEqual(unbound['hi_default_microsec'].value(), now) self.assertEqual(unbound['hi_without_microsec'].value(), now_no_ms) self.assertEqual(unbound['ti_without_microsec'].value(), now_no_ms) def test_help_text(self): # You can specify descriptive text for a field by using the 'help_text' argument) class UserRegistration(Form): username = CharField(max_length=10, help_text='e.g., [email protected]') password = CharField(widget=PasswordInput, help_text='Wählen Sie mit Bedacht.') p = UserRegistration(auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /> <span class="helptext">e.g., [email protected]</span></li> <li>Password: <input type="password" name="password" /> <span class="helptext">Wählen Sie mit Bedacht.</span></li>""") self.assertHTMLEqual(p.as_p(), """<p>Username: <input type="text" name="username" maxlength="10" /> <span class="helptext">e.g., [email protected]</span></p> <p>Password: <input type="password" name="password" /> <span class="helptext">Wählen Sie mit Bedacht.</span></p>""") self.assertHTMLEqual(p.as_table(), """<tr><th>Username:</th><td><input type="text" name="username" maxlength="10" /><br /><span class="helptext">e.g., [email protected]</span></td></tr> <tr><th>Password:</th><td><input type="password" name="password" /><br /><span class="helptext">Wählen Sie mit Bedacht.</span></td></tr>""") # The help text is displayed whether or not data is provided for the form. p = UserRegistration({'username': 'foo'}, auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" value="foo" maxlength="10" /> <span class="helptext">e.g., [email protected]</span></li> <li><ul class="errorlist"><li>This field is required.</li></ul>Password: <input type="password" name="password" /> <span class="helptext">Wählen Sie mit Bedacht.</span></li>""") # help_text is not displayed for hidden fields. It can be used for documentation # purposes, though. class UserRegistration(Form): username = CharField(max_length=10, help_text='e.g., [email protected]') password = CharField(widget=PasswordInput) next = CharField(widget=HiddenInput, initial='/', help_text='Redirect destination') p = UserRegistration(auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li>Username: <input type="text" name="username" maxlength="10" /> <span class="helptext">e.g., [email protected]</span></li> <li>Password: <input type="password" name="password" /><input type="hidden" name="next" value="/" /></li>""") def test_subclassing_forms(self): # You can subclass a Form to add fields. The resulting form subclass will have # all of the fields of the parent Form, plus whichever fields you define in the # subclass. class Person(Form): first_name = CharField() last_name = CharField() birthday = DateField() class Musician(Person): instrument = CharField() p = Person(auto_id=False) self.assertHTMLEqual(p.as_ul(), """<li>First name: <input type="text" name="first_name" /></li> <li>Last name: <input type="text" name="last_name" /></li> <li>Birthday: <input type="text" name="birthday" /></li>""") m = Musician(auto_id=False) self.assertHTMLEqual(m.as_ul(), """<li>First name: <input type="text" name="first_name" /></li> <li>Last name: <input type="text" name="last_name" /></li> <li>Birthday: <input type="text" name="birthday" /></li> <li>Instrument: <input type="text" name="instrument" /></li>""") # Yes, you can subclass multiple forms. The fields are added in the order in # which the parent classes are listed. class Person(Form): first_name = CharField() last_name = CharField() birthday = DateField() class Instrument(Form): instrument = CharField() class Beatle(Person, Instrument): haircut_type = CharField() b = Beatle(auto_id=False) self.assertHTMLEqual(b.as_ul(), """<li>Instrument: <input type="text" name="instrument" /></li> <li>First name: <input type="text" name="first_name" /></li> <li>Last name: <input type="text" name="last_name" /></li> <li>Birthday: <input type="text" name="birthday" /></li> <li>Haircut type: <input type="text" name="haircut_type" /></li>""") def test_forms_with_prefixes(self): # Sometimes it's necessary to have multiple forms display on the same HTML page, # or multiple copies of the same form. We can accomplish this with form prefixes. # Pass the keyword argument 'prefix' to the Form constructor to use this feature. # This value will be prepended to each HTML form field name. One way to think # about this is "namespaces for HTML forms". Notice that in the data argument, # each field's key has the prefix, in this case 'person1', prepended to the # actual field name. class Person(Form): first_name = CharField() last_name = CharField() birthday = DateField() data = { 'person1-first_name': 'John', 'person1-last_name': 'Lennon', 'person1-birthday': '1940-10-9' } p = Person(data, prefix='person1') self.assertHTMLEqual(p.as_ul(), """<li><label for="id_person1-first_name">First name:</label> <input type="text" name="person1-first_name" value="John" id="id_person1-first_name" /></li> <li><label for="id_person1-last_name">Last name:</label> <input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" /></li> <li><label for="id_person1-birthday">Birthday:</label> <input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" /></li>""") self.assertHTMLEqual(str(p['first_name']), '<input type="text" name="person1-first_name" value="John" id="id_person1-first_name" />') self.assertHTMLEqual(str(p['last_name']), '<input type="text" name="person1-last_name" value="Lennon" id="id_person1-last_name" />') self.assertHTMLEqual(str(p['birthday']), '<input type="text" name="person1-birthday" value="1940-10-9" id="id_person1-birthday" />') self.assertEqual(p.errors, {}) self.assertTrue(p.is_valid()) self.assertEqual(p.cleaned_data['first_name'], 'John') self.assertEqual(p.cleaned_data['last_name'], 'Lennon') self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9)) # Let's try submitting some bad data to make sure form.errors and field.errors # work as expected. data = { 'person1-first_name': '', 'person1-last_name': '', 'person1-birthday': '' } p = Person(data, prefix='person1') self.assertEqual(p.errors['first_name'], ['This field is required.']) self.assertEqual(p.errors['last_name'], ['This field is required.']) self.assertEqual(p.errors['birthday'], ['This field is required.']) self.assertEqual(p['first_name'].errors, ['This field is required.']) try: p['person1-first_name'].errors self.fail('Attempts to access non-existent fields should fail.') except KeyError: pass # In this example, the data doesn't have a prefix, but the form requires it, so # the form doesn't "see" the fields. data = { 'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9' } p = Person(data, prefix='person1') self.assertEqual(p.errors['first_name'], ['This field is required.']) self.assertEqual(p.errors['last_name'], ['This field is required.']) self.assertEqual(p.errors['birthday'], ['This field is required.']) # With prefixes, a single data dictionary can hold data for multiple instances # of the same form. data = { 'person1-first_name': 'John', 'person1-last_name': 'Lennon', 'person1-birthday': '1940-10-9', 'person2-first_name': 'Jim', 'person2-last_name': 'Morrison', 'person2-birthday': '1943-12-8' } p1 = Person(data, prefix='person1') self.assertTrue(p1.is_valid()) self.assertEqual(p1.cleaned_data['first_name'], 'John') self.assertEqual(p1.cleaned_data['last_name'], 'Lennon') self.assertEqual(p1.cleaned_data['birthday'], datetime.date(1940, 10, 9)) p2 = Person(data, prefix='person2') self.assertTrue(p2.is_valid()) self.assertEqual(p2.cleaned_data['first_name'], 'Jim') self.assertEqual(p2.cleaned_data['last_name'], 'Morrison') self.assertEqual(p2.cleaned_data['birthday'], datetime.date(1943, 12, 8)) # By default, forms append a hyphen between the prefix and the field name, but a # form can alter that behavior by implementing the add_prefix() method. This # method takes a field name and returns the prefixed field, according to # self.prefix. class Person(Form): first_name = CharField() last_name = CharField() birthday = DateField() def add_prefix(self, field_name): return '%s-prefix-%s' % (self.prefix, field_name) if self.prefix else field_name p = Person(prefix='foo') self.assertHTMLEqual(p.as_ul(), """<li><label for="id_foo-prefix-first_name">First name:</label> <input type="text" name="foo-prefix-first_name" id="id_foo-prefix-first_name" /></li> <li><label for="id_foo-prefix-last_name">Last name:</label> <input type="text" name="foo-prefix-last_name" id="id_foo-prefix-last_name" /></li> <li><label for="id_foo-prefix-birthday">Birthday:</label> <input type="text" name="foo-prefix-birthday" id="id_foo-prefix-birthday" /></li>""") data = { 'foo-prefix-first_name': 'John', 'foo-prefix-last_name': 'Lennon', 'foo-prefix-birthday': '1940-10-9' } p = Person(data, prefix='foo') self.assertTrue(p.is_valid()) self.assertEqual(p.cleaned_data['first_name'], 'John') self.assertEqual(p.cleaned_data['last_name'], 'Lennon') self.assertEqual(p.cleaned_data['birthday'], datetime.date(1940, 10, 9)) def test_class_prefix(self): # Prefix can be also specified at the class level. class Person(Form): first_name = CharField() prefix = 'foo' p = Person() self.assertEqual(p.prefix, 'foo') p = Person(prefix='bar') self.assertEqual(p.prefix, 'bar') def test_forms_with_null_boolean(self): # NullBooleanField is a bit of a special case because its presentation (widget) # is different than its data. This is handled transparently, though. class Person(Form): name = CharField() is_cool = NullBooleanField() p = Person({'name': 'Joe'}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="1" selected="selected">Unknown</option> <option value="2">Yes</option> <option value="3">No</option> </select>""") p = Person({'name': 'Joe', 'is_cool': '1'}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="1" selected="selected">Unknown</option> <option value="2">Yes</option> <option value="3">No</option> </select>""") p = Person({'name': 'Joe', 'is_cool': '2'}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="1">Unknown</option> <option value="2" selected="selected">Yes</option> <option value="3">No</option> </select>""") p = Person({'name': 'Joe', 'is_cool': '3'}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="1">Unknown</option> <option value="2">Yes</option> <option value="3" selected="selected">No</option> </select>""") p = Person({'name': 'Joe', 'is_cool': True}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="1">Unknown</option> <option value="2" selected="selected">Yes</option> <option value="3">No</option> </select>""") p = Person({'name': 'Joe', 'is_cool': False}, auto_id=False) self.assertHTMLEqual(str(p['is_cool']), """<select name="is_cool"> <option value="1">Unknown</option> <option value="2">Yes</option> <option value="3" selected="selected">No</option> </select>""") def test_forms_with_file_fields(self): # FileFields are a special case because they take their data from the request.FILES, # not request.POST. class FileForm(Form): file1 = FileField() f = FileForm(auto_id=False) self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>') f = FileForm(data={}, files={}, auto_id=False) self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="file" name="file1" /></td></tr>') f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', b'')}, auto_id=False) self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><ul class="errorlist"><li>The submitted file is empty.</li></ul><input type="file" name="file1" /></td></tr>') f = FileForm(data={}, files={'file1': 'something that is not a file'}, auto_id=False) self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><ul class="errorlist"><li>No file was submitted. Check the encoding type on the form.</li></ul><input type="file" name="file1" /></td></tr>') f = FileForm(data={}, files={'file1': SimpleUploadedFile('name', b'some content')}, auto_id=False) self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>') self.assertTrue(f.is_valid()) f = FileForm(data={}, files={'file1': SimpleUploadedFile('我隻氣墊船裝滿晒鱔.txt', 'मेरी मँडराने वाली नाव सर्पमीनों से भरी ह'.encode('utf-8'))}, auto_id=False) self.assertHTMLEqual(f.as_table(), '<tr><th>File1:</th><td><input type="file" name="file1" /></td></tr>') def test_basic_processing_in_view(self): class UserRegistration(Form): username = CharField(max_length=10) password1 = CharField(widget=PasswordInput) password2 = CharField(widget=PasswordInput) def clean(self): if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']: raise ValidationError('Please make sure your passwords match.') return self.cleaned_data def my_function(method, post_data): if method == 'POST': form = UserRegistration(post_data, auto_id=False) else: form = UserRegistration(auto_id=False) if form.is_valid(): return 'VALID: %r' % sorted(six.iteritems(form.cleaned_data)) t = Template('<form action="" method="post">\n<table>\n{{ form }}\n</table>\n<input type="submit" />\n</form>') return t.render(Context({'form': form})) # Case 1: GET (an empty form, with no errors).) self.assertHTMLEqual(my_function('GET', {}), """<form action="" method="post"> <table> <tr><th>Username:</th><td><input type="text" name="username" maxlength="10" /></td></tr> <tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr> <tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr> </table> <input type="submit" /> </form>""") # Case 2: POST with erroneous data (a redisplayed form, with errors).) self.assertHTMLEqual(my_function('POST', {'username': 'this-is-a-long-username', 'password1': 'foo', 'password2': 'bar'}), """<form action="" method="post"> <table> <tr><td colspan="2"><ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul></td></tr> <tr><th>Username:</th><td><ul class="errorlist"><li>Ensure this value has at most 10 characters (it has 23).</li></ul><input type="text" name="username" value="this-is-a-long-username" maxlength="10" /></td></tr> <tr><th>Password1:</th><td><input type="password" name="password1" /></td></tr> <tr><th>Password2:</th><td><input type="password" name="password2" /></td></tr> </table> <input type="submit" /> </form>""") # Case 3: POST with valid data (the success message).) self.assertEqual(my_function('POST', {'username': 'adrian', 'password1': 'secret', 'password2': 'secret'}), str_prefix("VALID: [('password1', %(_)s'secret'), ('password2', %(_)s'secret'), ('username', %(_)s'adrian')]")) def test_templates_with_forms(self): class UserRegistration(Form): username = CharField(max_length=10, help_text="Good luck picking a username that doesn't already exist.") password1 = CharField(widget=PasswordInput) password2 = CharField(widget=PasswordInput) def clean(self): if self.cleaned_data.get('password1') and self.cleaned_data.get('password2') and self.cleaned_data['password1'] != self.cleaned_data['password2']: raise ValidationError('Please make sure your passwords match.') return self.cleaned_data # You have full flexibility in displaying form fields in a template. Just pass a # Form instance to the template, and use "dot" access to refer to individual # fields. Note, however, that this flexibility comes with the responsibility of # displaying all the errors, including any that might not be associated with a # particular field. t = Template('''<form action=""> {{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p> {{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p> {{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p> <input type="submit" /> </form>''') self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action=""> <p><label>Your username: <input type="text" name="username" maxlength="10" /></label></p> <p><label>Password: <input type="password" name="password1" /></label></p> <p><label>Password (again): <input type="password" name="password2" /></label></p> <input type="submit" /> </form>""") self.assertHTMLEqual(t.render(Context({'form': UserRegistration({'username': 'django'}, auto_id=False)})), """<form action=""> <p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p> <ul class="errorlist"><li>This field is required.</li></ul><p><label>Password: <input type="password" name="password1" /></label></p> <ul class="errorlist"><li>This field is required.</li></ul><p><label>Password (again): <input type="password" name="password2" /></label></p> <input type="submit" /> </form>""") # Use form.[field].label to output a field's label. You can specify the label for # a field by using the 'label' argument to a Field class. If you don't specify # 'label', Django will use the field name with underscores converted to spaces, # and the initial letter capitalized. t = Template('''<form action=""> <p><label>{{ form.username.label }}: {{ form.username }}</label></p> <p><label>{{ form.password1.label }}: {{ form.password1 }}</label></p> <p><label>{{ form.password2.label }}: {{ form.password2 }}</label></p> <input type="submit" /> </form>''') self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action=""> <p><label>Username: <input type="text" name="username" maxlength="10" /></label></p> <p><label>Password1: <input type="password" name="password1" /></label></p> <p><label>Password2: <input type="password" name="password2" /></label></p> <input type="submit" /> </form>""") # User form.[field].label_tag to output a field's label with a <label> tag # wrapped around it, but *only* if the given field has an "id" attribute. # Recall from above that passing the "auto_id" argument to a Form gives each # field an "id" attribute. t = Template('''<form action=""> <p>{{ form.username.label_tag }} {{ form.username }}</p> <p>{{ form.password1.label_tag }} {{ form.password1 }}</p> <p>{{ form.password2.label_tag }} {{ form.password2 }}</p> <input type="submit" /> </form>''') self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action=""> <p>Username: <input type="text" name="username" maxlength="10" /></p> <p>Password1: <input type="password" name="password1" /></p> <p>Password2: <input type="password" name="password2" /></p> <input type="submit" /> </form>""") self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id='id_%s')})), """<form action=""> <p><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p> <p><label for="id_password1">Password1:</label> <input type="password" name="password1" id="id_password1" /></p> <p><label for="id_password2">Password2:</label> <input type="password" name="password2" id="id_password2" /></p> <input type="submit" /> </form>""") # User form.[field].help_text to output a field's help text. If the given field # does not have help text, nothing will be output. t = Template('''<form action=""> <p>{{ form.username.label_tag }} {{ form.username }}<br />{{ form.username.help_text }}</p> <p>{{ form.password1.label_tag }} {{ form.password1 }}</p> <p>{{ form.password2.label_tag }} {{ form.password2 }}</p> <input type="submit" /> </form>''') self.assertHTMLEqual(t.render(Context({'form': UserRegistration(auto_id=False)})), """<form action=""> <p>Username: <input type="text" name="username" maxlength="10" /><br />Good luck picking a username that doesn&#39;t already exist.</p> <p>Password1: <input type="password" name="password1" /></p> <p>Password2: <input type="password" name="password2" /></p> <input type="submit" /> </form>""") self.assertEqual(Template('{{ form.password1.help_text }}').render(Context({'form': UserRegistration(auto_id=False)})), '') # To display the errors that aren't associated with a particular field -- e.g., # the errors caused by Form.clean() -- use {{ form.non_field_errors }} in the # template. If used on its own, it is displayed as a <ul> (or an empty string, if # the list of errors is empty). You can also use it in {% if %} statements. t = Template('''<form action=""> {{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p> {{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p> {{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p> <input type="submit" /> </form>''') self.assertHTMLEqual(t.render(Context({'form': UserRegistration({'username': 'django', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)})), """<form action=""> <p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p> <p><label>Password: <input type="password" name="password1" /></label></p> <p><label>Password (again): <input type="password" name="password2" /></label></p> <input type="submit" /> </form>""") t = Template('''<form action=""> {{ form.non_field_errors }} {{ form.username.errors.as_ul }}<p><label>Your username: {{ form.username }}</label></p> {{ form.password1.errors.as_ul }}<p><label>Password: {{ form.password1 }}</label></p> {{ form.password2.errors.as_ul }}<p><label>Password (again): {{ form.password2 }}</label></p> <input type="submit" /> </form>''') self.assertHTMLEqual(t.render(Context({'form': UserRegistration({'username': 'django', 'password1': 'foo', 'password2': 'bar'}, auto_id=False)})), """<form action=""> <ul class="errorlist nonfield"><li>Please make sure your passwords match.</li></ul> <p><label>Your username: <input type="text" name="username" value="django" maxlength="10" /></label></p> <p><label>Password: <input type="password" name="password1" /></label></p> <p><label>Password (again): <input type="password" name="password2" /></label></p> <input type="submit" /> </form>""") def test_empty_permitted(self): # Sometimes (pretty much in formsets) we want to allow a form to pass validation # if it is completely empty. We can accomplish this by using the empty_permitted # argument to a form constructor. class SongForm(Form): artist = CharField() name = CharField() # First let's show what happens id empty_permitted=False (the default): data = {'artist': '', 'song': ''} form = SongForm(data, empty_permitted=False) self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {'name': ['This field is required.'], 'artist': ['This field is required.']}) self.assertEqual(form.cleaned_data, {}) # Now let's show what happens when empty_permitted=True and the form is empty. form = SongForm(data, empty_permitted=True) self.assertTrue(form.is_valid()) self.assertEqual(form.errors, {}) self.assertEqual(form.cleaned_data, {}) # But if we fill in data for one of the fields, the form is no longer empty and # the whole thing must pass validation. data = {'artist': 'The Doors', 'song': ''} form = SongForm(data, empty_permitted=False) self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {'name': ['This field is required.']}) self.assertEqual(form.cleaned_data, {'artist': 'The Doors'}) # If a field is not given in the data then None is returned for its data. Lets # make sure that when checking for empty_permitted that None is treated # accordingly. data = {'artist': None, 'song': ''} form = SongForm(data, empty_permitted=True) self.assertTrue(form.is_valid()) # However, we *really* need to be sure we are checking for None as any data in # initial that returns False on a boolean call needs to be treated literally. class PriceForm(Form): amount = FloatField() qty = IntegerField() data = {'amount': '0.0', 'qty': ''} form = PriceForm(data, initial={'amount': 0.0}, empty_permitted=True) self.assertTrue(form.is_valid()) def test_extracting_hidden_and_visible(self): class SongForm(Form): token = CharField(widget=HiddenInput) artist = CharField() name = CharField() form = SongForm() self.assertEqual([f.name for f in form.hidden_fields()], ['token']) self.assertEqual([f.name for f in form.visible_fields()], ['artist', 'name']) def test_hidden_initial_gets_id(self): class MyForm(Form): field1 = CharField(max_length=50, show_hidden_initial=True) self.assertHTMLEqual(MyForm().as_table(), '<tr><th><label for="id_field1">Field1:</label></th><td><input id="id_field1" type="text" name="field1" maxlength="50" /><input type="hidden" name="initial-field1" id="initial-id_field1" /></td></tr>') def test_error_html_required_html_classes(self): class Person(Form): name = CharField() is_cool = NullBooleanField() email = EmailField(required=False) age = IntegerField() p = Person({}) p.error_css_class = 'error' p.required_css_class = 'required' self.assertHTMLEqual(p.as_ul(), """<li class="required error"><ul class="errorlist"><li>This field is required.</li></ul><label class="required" for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></li> <li class="required"><label class="required" for="id_is_cool">Is cool:</label> <select name="is_cool" id="id_is_cool"> <option value="1" selected="selected">Unknown</option> <option value="2">Yes</option> <option value="3">No</option> </select></li> <li><label for="id_email">Email:</label> <input type="email" name="email" id="id_email" /></li> <li class="required error"><ul class="errorlist"><li>This field is required.</li></ul><label class="required" for="id_age">Age:</label> <input type="number" name="age" id="id_age" /></li>""") self.assertHTMLEqual(p.as_p(), """<ul class="errorlist"><li>This field is required.</li></ul> <p class="required error"><label class="required" for="id_name">Name:</label> <input type="text" name="name" id="id_name" /></p> <p class="required"><label class="required" for="id_is_cool">Is cool:</label> <select name="is_cool" id="id_is_cool"> <option value="1" selected="selected">Unknown</option> <option value="2">Yes</option> <option value="3">No</option> </select></p> <p><label for="id_email">Email:</label> <input type="email" name="email" id="id_email" /></p> <ul class="errorlist"><li>This field is required.</li></ul> <p class="required error"><label class="required" for="id_age">Age:</label> <input type="number" name="age" id="id_age" /></p>""") self.assertHTMLEqual(p.as_table(), """<tr class="required error"><th><label class="required" for="id_name">Name:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="text" name="name" id="id_name" /></td></tr> <tr class="required"><th><label class="required" for="id_is_cool">Is cool:</label></th><td><select name="is_cool" id="id_is_cool"> <option value="1" selected="selected">Unknown</option> <option value="2">Yes</option> <option value="3">No</option> </select></td></tr> <tr><th><label for="id_email">Email:</label></th><td><input type="email" name="email" id="id_email" /></td></tr> <tr class="required error"><th><label class="required" for="id_age">Age:</label></th><td><ul class="errorlist"><li>This field is required.</li></ul><input type="number" name="age" id="id_age" /></td></tr>""") def test_label_has_required_css_class(self): """ #17922 - required_css_class is added to the label_tag() of required fields. """ class SomeForm(Form): required_css_class = 'required' field = CharField(max_length=10) field2 = IntegerField(required=False) f = SomeForm({'field': 'test'}) self.assertHTMLEqual(f['field'].label_tag(), '<label for="id_field" class="required">Field:</label>') self.assertHTMLEqual(f['field'].label_tag(attrs={'class': 'foo'}), '<label for="id_field" class="foo required">Field:</label>') self.assertHTMLEqual(f['field2'].label_tag(), '<label for="id_field2">Field2:</label>') def test_label_split_datetime_not_displayed(self): class EventForm(Form): happened_at = SplitDateTimeField(widget=SplitHiddenDateTimeWidget) form = EventForm() self.assertHTMLEqual(form.as_ul(), '<input type="hidden" name="happened_at_0" id="id_happened_at_0" /><input type="hidden" name="happened_at_1" id="id_happened_at_1" />') def test_multivalue_field_validation(self): def bad_names(value): if value == 'bad value': raise ValidationError('bad value not allowed') class NameField(MultiValueField): def __init__(self, fields=(), *args, **kwargs): fields = (CharField(label='First name', max_length=10), CharField(label='Last name', max_length=10)) super(NameField, self).__init__(fields=fields, *args, **kwargs) def compress(self, data_list): return ' '.join(data_list) class NameForm(Form): name = NameField(validators=[bad_names]) form = NameForm(data={'name': ['bad', 'value']}) form.full_clean() self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {'name': ['bad value not allowed']}) form = NameForm(data={'name': ['should be overly', 'long for the field names']}) self.assertFalse(form.is_valid()) self.assertEqual(form.errors, {'name': ['Ensure this value has at most 10 characters (it has 16).', 'Ensure this value has at most 10 characters (it has 24).']}) form = NameForm(data={'name': ['fname', 'lname']}) self.assertTrue(form.is_valid()) self.assertEqual(form.cleaned_data, {'name': 'fname lname'}) def test_multivalue_deep_copy(self): """ #19298 -- MultiValueField needs to override the default as it needs to deep-copy subfields: """ class ChoicesField(MultiValueField): def __init__(self, fields=(), *args, **kwargs): fields = (ChoiceField(label='Rank', choices=((1, 1), (2, 2))), CharField(label='Name', max_length=10)) super(ChoicesField, self).__init__(fields=fields, *args, **kwargs) field = ChoicesField() field2 = copy.deepcopy(field) self.assertIsInstance(field2, ChoicesField) self.assertIsNot(field2.fields, field.fields) self.assertIsNot(field2.fields[0].choices, field.fields[0].choices) def test_multivalue_initial_data(self): """ #23674 -- invalid initial data should not break form.changed_data() """ class DateAgeField(MultiValueField): def __init__(self, fields=(), *args, **kwargs): fields = (DateField(label="Date"), IntegerField(label="Age")) super(DateAgeField, self).__init__(fields=fields, *args, **kwargs) class DateAgeForm(Form): date_age = DateAgeField() data = {"date_age": ["1998-12-06", 16]} form = DateAgeForm(data, initial={"date_age": ["200-10-10", 14]}) self.assertTrue(form.has_changed()) def test_multivalue_optional_subfields(self): class PhoneField(MultiValueField): def __init__(self, *args, **kwargs): fields = ( CharField(label='Country Code', validators=[ RegexValidator(r'^\+[0-9]{1,2}$', message='Enter a valid country code.')]), CharField(label='Phone Number'), CharField(label='Extension', error_messages={'incomplete': 'Enter an extension.'}), CharField(label='Label', required=False, help_text='E.g. home, work.'), ) super(PhoneField, self).__init__(fields, *args, **kwargs) def compress(self, data_list): if data_list: return '%s.%s ext. %s (label: %s)' % tuple(data_list) return None # An empty value for any field will raise a `required` error on a # required `MultiValueField`. f = PhoneField() self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '') self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None) self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, []) self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, ['+61']) self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, ['+61', '287654321', '123']) self.assertEqual('+61.287654321 ext. 123 (label: Home)', f.clean(['+61', '287654321', '123', 'Home'])) self.assertRaisesMessage(ValidationError, "'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home']) # Empty values for fields will NOT raise a `required` error on an # optional `MultiValueField` f = PhoneField(required=False) self.assertIsNone(f.clean('')) self.assertIsNone(f.clean(None)) self.assertIsNone(f.clean([])) self.assertEqual('+61. ext. (label: )', f.clean(['+61'])) self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123'])) self.assertEqual('+61.287654321 ext. 123 (label: Home)', f.clean(['+61', '287654321', '123', 'Home'])) self.assertRaisesMessage(ValidationError, "'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home']) # For a required `MultiValueField` with `require_all_fields=False`, a # `required` error will only be raised if all fields are empty. Fields # can individually be required or optional. An empty value for any # required field will raise an `incomplete` error. f = PhoneField(require_all_fields=False) self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, '') self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, None) self.assertRaisesMessage(ValidationError, "'This field is required.'", f.clean, []) self.assertRaisesMessage(ValidationError, "'Enter a complete value.'", f.clean, ['+61']) self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123'])) six.assertRaisesRegex(self, ValidationError, "'Enter a complete value\.', u?'Enter an extension\.'", f.clean, ['', '', '', 'Home']) self.assertRaisesMessage(ValidationError, "'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home']) # For an optional `MultiValueField` with `require_all_fields=False`, we # don't get any `required` error but we still get `incomplete` errors. f = PhoneField(required=False, require_all_fields=False) self.assertIsNone(f.clean('')) self.assertIsNone(f.clean(None)) self.assertIsNone(f.clean([])) self.assertRaisesMessage(ValidationError, "'Enter a complete value.'", f.clean, ['+61']) self.assertEqual('+61.287654321 ext. 123 (label: )', f.clean(['+61', '287654321', '123'])) six.assertRaisesRegex(self, ValidationError, "'Enter a complete value\.', u?'Enter an extension\.'", f.clean, ['', '', '', 'Home']) self.assertRaisesMessage(ValidationError, "'Enter a valid country code.'", f.clean, ['61', '287654321', '123', 'Home']) def test_custom_empty_values(self): """ Test that form fields can customize what is considered as an empty value for themselves (#19997). """ class CustomJSONField(CharField): empty_values = [None, ''] def to_python(self, value): # Fake json.loads if value == '{}': return {} return super(CustomJSONField, self).to_python(value) class JSONForm(forms.Form): json = CustomJSONField() form = JSONForm(data={'json': '{}'}) form.full_clean() self.assertEqual(form.cleaned_data, {'json': {}}) def test_boundfield_label_tag(self): class SomeForm(Form): field = CharField() boundfield = SomeForm()['field'] testcases = [ # (args, kwargs, expected) # without anything: just print the <label> ((), {}, '<label for="id_field">Field:</label>'), # passing just one argument: overrides the field's label (('custom',), {}, '<label for="id_field">custom:</label>'), # the overridden label is escaped (('custom&',), {}, '<label for="id_field">custom&amp;:</label>'), ((mark_safe('custom&'),), {}, '<label for="id_field">custom&:</label>'), # Passing attrs to add extra attributes on the <label> ((), {'attrs': {'class': 'pretty'}}, '<label for="id_field" class="pretty">Field:</label>') ] for args, kwargs, expected in testcases: self.assertHTMLEqual(boundfield.label_tag(*args, **kwargs), expected) def test_boundfield_label_tag_no_id(self): """ If a widget has no id, label_tag just returns the text with no surrounding <label>. """ class SomeForm(Form): field = CharField() boundfield = SomeForm(auto_id='')['field'] self.assertHTMLEqual(boundfield.label_tag(), 'Field:') self.assertHTMLEqual(boundfield.label_tag('Custom&'), 'Custom&amp;:') def test_boundfield_label_tag_custom_widget_id_for_label(self): class CustomIdForLabelTextInput(TextInput): def id_for_label(self, id): return 'custom_' + id class EmptyIdForLabelTextInput(TextInput): def id_for_label(self, id): return None class SomeForm(Form): custom = CharField(widget=CustomIdForLabelTextInput) empty = CharField(widget=EmptyIdForLabelTextInput) form = SomeForm() self.assertHTMLEqual(form['custom'].label_tag(), '<label for="custom_id_custom">Custom:</label>') self.assertHTMLEqual(form['empty'].label_tag(), '<label>Empty:</label>') def test_boundfield_empty_label(self): class SomeForm(Form): field = CharField(label='') boundfield = SomeForm()['field'] self.assertHTMLEqual(boundfield.label_tag(), '<label for="id_field"></label>') def test_boundfield_id_for_label(self): class SomeForm(Form): field = CharField(label='') self.assertEqual(SomeForm()['field'].id_for_label, 'id_field') def test_boundfield_id_for_label_override_by_attrs(self): """ If an id is provided in `Widget.attrs`, it overrides the generated ID, unless it is `None`. """ class SomeForm(Form): field = CharField(widget=forms.TextInput(attrs={'id': 'myCustomID'})) field_none = CharField(widget=forms.TextInput(attrs={'id': None})) form = SomeForm() self.assertEqual(form['field'].id_for_label, 'myCustomID') self.assertEqual(form['field_none'].id_for_label, 'id_field_none') def test_label_tag_override(self): """ BoundField label_suffix (if provided) overrides Form label_suffix """ class SomeForm(Form): field = CharField() boundfield = SomeForm(label_suffix='!')['field'] self.assertHTMLEqual(boundfield.label_tag(label_suffix='$'), '<label for="id_field">Field$</label>') def test_field_name(self): """#5749 - `field_name` may be used as a key in _html_output().""" class SomeForm(Form): some_field = CharField() def as_p(self): return self._html_output( normal_row='<p id="p_%(field_name)s"></p>', error_row='%s', row_ender='</p>', help_text_html=' %s', errors_on_separate_row=True, ) form = SomeForm() self.assertHTMLEqual(form.as_p(), '<p id="p_some_field"></p>') def test_field_without_css_classes(self): """ `css_classes` may be used as a key in _html_output() (empty classes). """ class SomeForm(Form): some_field = CharField() def as_p(self): return self._html_output( normal_row='<p class="%(css_classes)s"></p>', error_row='%s', row_ender='</p>', help_text_html=' %s', errors_on_separate_row=True, ) form = SomeForm() self.assertHTMLEqual(form.as_p(), '<p class=""></p>') def test_field_with_css_class(self): """ `css_classes` may be used as a key in _html_output() (class comes from required_css_class in this case). """ class SomeForm(Form): some_field = CharField() required_css_class = 'foo' def as_p(self): return self._html_output( normal_row='<p class="%(css_classes)s"></p>', error_row='%s', row_ender='</p>', help_text_html=' %s', errors_on_separate_row=True, ) form = SomeForm() self.assertHTMLEqual(form.as_p(), '<p class="foo"></p>') def test_field_name_with_hidden_input(self): """ BaseForm._html_output() should merge all the hidden input fields and put them in the last row. """ class SomeForm(Form): hidden1 = CharField(widget=HiddenInput) custom = CharField() hidden2 = CharField(widget=HiddenInput) def as_p(self): return self._html_output( normal_row='<p%(html_class_attr)s>%(field)s %(field_name)s</p>', error_row='%s', row_ender='</p>', help_text_html=' %s', errors_on_separate_row=True, ) form = SomeForm() self.assertHTMLEqual( form.as_p(), '<p><input id="id_custom" name="custom" type="text" /> custom' '<input id="id_hidden1" name="hidden1" type="hidden" />' '<input id="id_hidden2" name="hidden2" type="hidden" /></p>' ) def test_field_name_with_hidden_input_and_non_matching_row_ender(self): """ BaseForm._html_output() should merge all the hidden input fields and put them in the last row ended with the specific row ender. """ class SomeForm(Form): hidden1 = CharField(widget=HiddenInput) custom = CharField() hidden2 = CharField(widget=HiddenInput) def as_p(self): return self._html_output( normal_row='<p%(html_class_attr)s>%(field)s %(field_name)s</p>', error_row='%s', row_ender='<hr/><hr/>', help_text_html=' %s', errors_on_separate_row=True ) form = SomeForm() self.assertHTMLEqual( form.as_p(), '<p><input id="id_custom" name="custom" type="text" /> custom</p>\n' '<input id="id_hidden1" name="hidden1" type="hidden" />' '<input id="id_hidden2" name="hidden2" type="hidden" /><hr/><hr/>' ) def test_error_dict(self): class MyForm(Form): foo = CharField() bar = CharField() def clean(self): raise ValidationError('Non-field error.', code='secret', params={'a': 1, 'b': 2}) form = MyForm({}) self.assertEqual(form.is_valid(), False) errors = form.errors.as_text() control = [ '* foo\n * This field is required.', '* bar\n * This field is required.', '* __all__\n * Non-field error.', ] for error in control: self.assertIn(error, errors) errors = form.errors.as_ul() control = [ '<li>foo<ul class="errorlist"><li>This field is required.</li></ul></li>', '<li>bar<ul class="errorlist"><li>This field is required.</li></ul></li>', '<li>__all__<ul class="errorlist nonfield"><li>Non-field error.</li></ul></li>', ] for error in control: self.assertInHTML(error, errors) errors = json.loads(form.errors.as_json()) control = { 'foo': [{'code': 'required', 'message': 'This field is required.'}], 'bar': [{'code': 'required', 'message': 'This field is required.'}], '__all__': [{'code': 'secret', 'message': 'Non-field error.'}] } self.assertEqual(errors, control) def test_error_dict_as_json_escape_html(self): """#21962 - adding html escape flag to ErrorDict""" class MyForm(Form): foo = CharField() bar = CharField() def clean(self): raise ValidationError('<p>Non-field error.</p>', code='secret', params={'a': 1, 'b': 2}) control = { 'foo': [{'code': 'required', 'message': 'This field is required.'}], 'bar': [{'code': 'required', 'message': 'This field is required.'}], '__all__': [{'code': 'secret', 'message': '<p>Non-field error.</p>'}] } form = MyForm({}) self.assertFalse(form.is_valid()) errors = json.loads(form.errors.as_json()) self.assertEqual(errors, control) errors = json.loads(form.errors.as_json(escape_html=True)) control['__all__'][0]['message'] = '&lt;p&gt;Non-field error.&lt;/p&gt;' self.assertEqual(errors, control) def test_error_list(self): e = ErrorList() e.append('Foo') e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'})) self.assertIsInstance(e, list) self.assertIn('Foo', e) self.assertIn('Foo', forms.ValidationError(e)) self.assertEqual( e.as_text(), '* Foo\n* Foobar' ) self.assertEqual( e.as_ul(), '<ul class="errorlist"><li>Foo</li><li>Foobar</li></ul>' ) self.assertEqual( json.loads(e.as_json()), [{"message": "Foo", "code": ""}, {"message": "Foobar", "code": "foobar"}] ) def test_error_list_class_not_specified(self): e = ErrorList() e.append('Foo') e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'})) self.assertEqual( e.as_ul(), '<ul class="errorlist"><li>Foo</li><li>Foobar</li></ul>' ) def test_error_list_class_has_one_class_specified(self): e = ErrorList(error_class='foobar-error-class') e.append('Foo') e.append(ValidationError('Foo%(bar)s', code='foobar', params={'bar': 'bar'})) self.assertEqual( e.as_ul(), '<ul class="errorlist foobar-error-class"><li>Foo</li><li>Foobar</li></ul>' ) def test_error_list_with_hidden_field_errors_has_correct_class(self): class Person(Form): first_name = CharField() last_name = CharField(widget=HiddenInput) p = Person({'first_name': 'John'}) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist nonfield"><li>(Hidden field last_name) This field is required.</li></ul></li><li><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" /><input id="id_last_name" name="last_name" type="hidden" /></li>""" ) self.assertHTMLEqual( p.as_p(), """<ul class="errorlist nonfield"><li>(Hidden field last_name) This field is required.</li></ul> <p><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" /><input id="id_last_name" name="last_name" type="hidden" /></p>""" ) self.assertHTMLEqual( p.as_table(), """<tr><td colspan="2"><ul class="errorlist nonfield"><li>(Hidden field last_name) This field is required.</li></ul></td></tr> <tr><th><label for="id_first_name">First name:</label></th><td><input id="id_first_name" name="first_name" type="text" value="John" /><input id="id_last_name" name="last_name" type="hidden" /></td></tr>""" ) def test_error_list_with_non_field_errors_has_correct_class(self): class Person(Form): first_name = CharField() last_name = CharField() def clean(self): raise ValidationError('Generic validation error') p = Person({'first_name': 'John', 'last_name': 'Lennon'}) self.assertHTMLEqual( str(p.non_field_errors()), '<ul class="errorlist nonfield"><li>Generic validation error</li></ul>' ) self.assertHTMLEqual( p.as_ul(), """<li><ul class="errorlist nonfield"><li>Generic validation error</li></ul></li><li><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" /></li> <li><label for="id_last_name">Last name:</label> <input id="id_last_name" name="last_name" type="text" value="Lennon" /></li>""" ) self.assertHTMLEqual( p.non_field_errors().as_text(), '* Generic validation error' ) self.assertHTMLEqual( p.as_p(), """<ul class="errorlist nonfield"><li>Generic validation error</li></ul> <p><label for="id_first_name">First name:</label> <input id="id_first_name" name="first_name" type="text" value="John" /></p> <p><label for="id_last_name">Last name:</label> <input id="id_last_name" name="last_name" type="text" value="Lennon" /></p>""" ) self.assertHTMLEqual( p.as_table(), """<tr><td colspan="2"><ul class="errorlist nonfield"><li>Generic validation error</li></ul></td></tr> <tr><th><label for="id_first_name">First name:</label></th><td><input id="id_first_name" name="first_name" type="text" value="John" /></td></tr> <tr><th><label for="id_last_name">Last name:</label></th><td><input id="id_last_name" name="last_name" type="text" value="Lennon" /></td></tr>""" ) def test_errorlist_override(self): @python_2_unicode_compatible class DivErrorList(ErrorList): def __str__(self): return self.as_divs() def as_divs(self): if not self: return '' return '<div class="errorlist">%s</div>' % ''.join( '<div class="error">%s</div>' % force_text(e) for e in self) class CommentForm(Form): name = CharField(max_length=50, required=False) email = EmailField() comment = CharField() data = dict(email='invalid') f = CommentForm(data, auto_id=False, error_class=DivErrorList) self.assertHTMLEqual(f.as_p(), """<p>Name: <input type="text" name="name" maxlength="50" /></p> <div class="errorlist"><div class="error">Enter a valid email address.</div></div> <p>Email: <input type="email" name="email" value="invalid" /></p> <div class="errorlist"><div class="error">This field is required.</div></div> <p>Comment: <input type="text" name="comment" /></p>""") def test_baseform_repr(self): """ BaseForm.__repr__() should contain some basic information about the form. """ p = Person() self.assertEqual(repr(p), "<Person bound=False, valid=Unknown, fields=(first_name;last_name;birthday)>") p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': '1940-10-9'}) self.assertEqual(repr(p), "<Person bound=True, valid=Unknown, fields=(first_name;last_name;birthday)>") p.is_valid() self.assertEqual(repr(p), "<Person bound=True, valid=True, fields=(first_name;last_name;birthday)>") p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': 'fakedate'}) p.is_valid() self.assertEqual(repr(p), "<Person bound=True, valid=False, fields=(first_name;last_name;birthday)>") def test_baseform_repr_dont_trigger_validation(self): """ BaseForm.__repr__() shouldn't trigger the form validation. """ p = Person({'first_name': 'John', 'last_name': 'Lennon', 'birthday': 'fakedate'}) repr(p) self.assertRaises(AttributeError, lambda: p.cleaned_data) self.assertFalse(p.is_valid()) self.assertEqual(p.cleaned_data, {'first_name': 'John', 'last_name': 'Lennon'}) def test_accessing_clean(self): class UserForm(Form): username = CharField(max_length=10) password = CharField(widget=PasswordInput) def clean(self): data = self.cleaned_data if not self.errors: data['username'] = data['username'].lower() return data f = UserForm({'username': 'SirRobin', 'password': 'blue'}) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data['username'], 'sirrobin') def test_changing_cleaned_data_nothing_returned(self): class UserForm(Form): username = CharField(max_length=10) password = CharField(widget=PasswordInput) def clean(self): self.cleaned_data['username'] = self.cleaned_data['username'].lower() # don't return anything f = UserForm({'username': 'SirRobin', 'password': 'blue'}) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data['username'], 'sirrobin') def test_changing_cleaned_data_in_clean(self): class UserForm(Form): username = CharField(max_length=10) password = CharField(widget=PasswordInput) def clean(self): data = self.cleaned_data # Return a different dict. We have not changed self.cleaned_data. return { 'username': data['username'].lower(), 'password': 'this_is_not_a_secret', } f = UserForm({'username': 'SirRobin', 'password': 'blue'}) self.assertTrue(f.is_valid()) self.assertEqual(f.cleaned_data['username'], 'sirrobin') def test_multipart_encoded_form(self): class FormWithoutFile(Form): username = CharField() class FormWithFile(Form): username = CharField() file = FileField() class FormWithImage(Form): image = ImageField() self.assertFalse(FormWithoutFile().is_multipart()) self.assertTrue(FormWithFile().is_multipart()) self.assertTrue(FormWithImage().is_multipart()) def test_html_safe(self): class SimpleForm(Form): username = CharField() form = SimpleForm() self.assertTrue(hasattr(SimpleForm, '__html__')) self.assertEqual(force_text(form), form.__html__()) self.assertTrue(hasattr(form['username'], '__html__')) self.assertEqual(force_text(form['username']), form['username'].__html__())
bsd-3-clause
gangadharkadam/sher
erpnext/manufacturing/doctype/bom/test_bom.py
38
1471
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import unittest import frappe test_records = frappe.get_test_records('Bom') class TestBOM(unittest.TestCase): def test_get_items(self): from erpnext.manufacturing.doctype.bom.bom import get_bom_items_as_dict items_dict = get_bom_items_as_dict(bom="BOM/_Test FG Item 2/001", qty=1, fetch_exploded=0) self.assertTrue(test_records[2]["bom_materials"][0]["item_code"] in items_dict) self.assertTrue(test_records[2]["bom_materials"][1]["item_code"] in items_dict) self.assertEquals(len(items_dict.values()), 2) def test_get_items_exploded(self): from erpnext.manufacturing.doctype.bom.bom import get_bom_items_as_dict items_dict = get_bom_items_as_dict(bom="BOM/_Test FG Item 2/001", qty=1, fetch_exploded=1) self.assertTrue(test_records[2]["bom_materials"][0]["item_code"] in items_dict) self.assertFalse(test_records[2]["bom_materials"][1]["item_code"] in items_dict) self.assertTrue(test_records[0]["bom_materials"][0]["item_code"] in items_dict) self.assertTrue(test_records[0]["bom_materials"][1]["item_code"] in items_dict) self.assertEquals(len(items_dict.values()), 3) def test_get_items_list(self): from erpnext.manufacturing.doctype.bom.bom import get_bom_items self.assertEquals(len(get_bom_items(bom="BOM/_Test FG Item 2/001", qty=1, fetch_exploded=1)), 3)
agpl-3.0
OpenTTD-Ladder/ladder-web
ladder/design/management/commands/compile_css.py
1
1028
import os from ..base import CompilerCommand, CSS_PROPERTY, CSS_STATIC_DIR class Command(CompilerCommand): static_dir = CSS_STATIC_DIR module_property = CSS_PROPERTY def queue_file(self, fname, module): return self.test_file_age(fname, ''.join([os.path.splitext(fname)[0], '.css'])) def test_file(self, name, item): stdout, _, _ = self.get_output("recess", item) failed = " error" in stdout or "Error" in stdout if failed: print "" print stdout return not failed def compile_file(self, name, item): parts = os.path.splitext(item) css_file = ''.join([parts[0], '.css']) min_css = ''.join([parts[0], '.min.css']) css_out, _, _ = self.get_output("recess", "--compile", item) min_out, _, _ = self.get_output("recess", "--compress", item) with open(css_file, 'w') as fh: fh.write(css_out) with open(min_css, 'w') as fh: fh.write(min_out) return True
gpl-2.0
google/material-design-icons
update/venv/lib/python3.9/site-packages/setuptools/command/install_lib.py
5
5023
import os import sys from itertools import product, starmap import distutils.command.install_lib as orig class install_lib(orig.install_lib): """Don't add compiled flags to filenames of non-Python files""" def initialize_options(self): orig.install_lib.initialize_options(self) self.multiarch = None self.install_layout = None def finalize_options(self): orig.install_lib.finalize_options(self) self.set_undefined_options('install',('install_layout','install_layout')) if self.install_layout == 'deb' and sys.version_info[:2] >= (3, 3): import sysconfig self.multiarch = sysconfig.get_config_var('MULTIARCH') def run(self): self.build() outfiles = self.install() if outfiles is not None: # always compile, in case we have any extension stubs to deal with self.byte_compile(outfiles) def get_exclusions(self): """ Return a collections.Sized collections.Container of paths to be excluded for single_version_externally_managed installations. """ all_packages = ( pkg for ns_pkg in self._get_SVEM_NSPs() for pkg in self._all_packages(ns_pkg) ) excl_specs = product(all_packages, self._gen_exclusion_paths()) return set(starmap(self._exclude_pkg_path, excl_specs)) def _exclude_pkg_path(self, pkg, exclusion_path): """ Given a package name and exclusion path within that package, compute the full exclusion path. """ parts = pkg.split('.') + [exclusion_path] return os.path.join(self.install_dir, *parts) @staticmethod def _all_packages(pkg_name): """ >>> list(install_lib._all_packages('foo.bar.baz')) ['foo.bar.baz', 'foo.bar', 'foo'] """ while pkg_name: yield pkg_name pkg_name, sep, child = pkg_name.rpartition('.') def _get_SVEM_NSPs(self): """ Get namespace packages (list) but only for single_version_externally_managed installations and empty otherwise. """ # TODO: is it necessary to short-circuit here? i.e. what's the cost # if get_finalized_command is called even when namespace_packages is # False? if not self.distribution.namespace_packages: return [] install_cmd = self.get_finalized_command('install') svem = install_cmd.single_version_externally_managed return self.distribution.namespace_packages if svem else [] @staticmethod def _gen_exclusion_paths(): """ Generate file paths to be excluded for namespace packages (bytecode cache files). """ # always exclude the package module itself yield '__init__.py' yield '__init__.pyc' yield '__init__.pyo' if not hasattr(sys, 'implementation'): return base = os.path.join('__pycache__', '__init__.' + sys.implementation.cache_tag) yield base + '.pyc' yield base + '.pyo' yield base + '.opt-1.pyc' yield base + '.opt-2.pyc' def copy_tree( self, infile, outfile, preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1 ): assert preserve_mode and preserve_times and not preserve_symlinks exclude = self.get_exclusions() if not exclude: import distutils.dir_util distutils.dir_util._multiarch = self.multiarch return orig.install_lib.copy_tree(self, infile, outfile) # Exclude namespace package __init__.py* files from the output from setuptools.archive_util import unpack_directory from distutils import log outfiles = [] if self.multiarch: import sysconfig ext_suffix = sysconfig.get_config_var ('EXT_SUFFIX') if ext_suffix.endswith(self.multiarch + ext_suffix[-3:]): new_suffix = None else: new_suffix = "%s-%s%s" % (ext_suffix[:-3], self.multiarch, ext_suffix[-3:]) def pf(src, dst): if dst in exclude: log.warn("Skipping installation of %s (namespace package)", dst) return False if self.multiarch and new_suffix and dst.endswith(ext_suffix) and not dst.endswith(new_suffix): dst = dst.replace(ext_suffix, new_suffix) log.info("renaming extension to %s", os.path.basename(dst)) log.info("copying %s -> %s", src, os.path.dirname(dst)) outfiles.append(dst) return dst unpack_directory(infile, outfile, pf) return outfiles def get_outputs(self): outputs = orig.install_lib.get_outputs(self) exclude = self.get_exclusions() if exclude: return [f for f in outputs if f not in exclude] return outputs
apache-2.0
stwunsch/gnuradio
gr-audio/examples/python/audio_fft.py
68
4596
#!/usr/bin/env python # # Copyright 2004,2005,2007 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, gru, audio from gnuradio import eng_notation from gnuradio.eng_option import eng_option from gnuradio.wxgui import stdgui2, fftsink2, waterfallsink2, scopesink2, form, slider from optparse import OptionParser import wx import sys class app_top_block(stdgui2.std_top_block): def __init__(self, frame, panel, vbox, argv): stdgui2.std_top_block.__init__(self, frame, panel, vbox, argv) self.frame = frame self.panel = panel parser = OptionParser(option_class=eng_option) parser.add_option("-W", "--waterfall", action="store_true", default=False, help="Enable waterfall display") parser.add_option("-S", "--oscilloscope", action="store_true", default=False, help="Enable oscilloscope display") parser.add_option("-I", "--audio-input", type="string", default="", help="pcm input device name. E.g., hw:0,0 or /dev/dsp") parser.add_option("-r", "--sample-rate", type="eng_float", default=48000, help="set sample rate to RATE (48000)") (options, args) = parser.parse_args() sample_rate = int(options.sample_rate) if len(args) != 0: parser.print_help() sys.exit(1) self.show_debug_info = True # build the graph if options.waterfall: self.scope = \ waterfallsink2.waterfall_sink_f (panel, fft_size=1024, sample_rate=sample_rate) elif options.oscilloscope: self.scope = scopesink2.scope_sink_f(panel, sample_rate=sample_rate) else: self.scope = fftsink2.fft_sink_f (panel, fft_size=1024, sample_rate=sample_rate, fft_rate=30, ref_scale=1.0, ref_level=0, y_divs=12) self.src = audio.source (sample_rate, options.audio_input) self.connect(self.src, self.scope) self._build_gui(vbox) # set initial values def _set_status_msg(self, msg): self.frame.GetStatusBar().SetStatusText(msg, 0) def _build_gui(self, vbox): def _form_set_freq(kv): return self.set_freq(kv['freq']) vbox.Add(self.scope.win, 10, wx.EXPAND) #self._build_subpanel(vbox) def _build_subpanel(self, vbox_arg): # build a secondary information panel (sometimes hidden) # FIXME figure out how to have this be a subpanel that is always # created, but has its visibility controlled by foo.Show(True/False) def _form_set_decim(kv): return self.set_decim(kv['decim']) if not(self.show_debug_info): return panel = self.panel vbox = vbox_arg myform = self.myform #panel = wx.Panel(self.panel, -1) #vbox = wx.BoxSizer(wx.VERTICAL) hbox = wx.BoxSizer(wx.HORIZONTAL) hbox.Add((5,0), 0) myform['decim'] = form.int_field( parent=panel, sizer=hbox, label="Decim", callback=myform.check_input_and_call(_form_set_decim, self._set_status_msg)) hbox.Add((5,0), 1) myform['fs@usb'] = form.static_float_field( parent=panel, sizer=hbox, label="Fs@USB") hbox.Add((5,0), 1) myform['dbname'] = form.static_text_field( parent=panel, sizer=hbox) hbox.Add((5,0), 1) myform['baseband'] = form.static_float_field( parent=panel, sizer=hbox, label="Analog BB") hbox.Add((5,0), 1) myform['ddc'] = form.static_float_field( parent=panel, sizer=hbox, label="DDC") hbox.Add((5,0), 0) vbox.Add(hbox, 0, wx.EXPAND) def main (): app = stdgui2.stdapp(app_top_block, "Audio FFT", nstatus=1) app.MainLoop() if __name__ == '__main__': main ()
gpl-3.0
FEniCS/dolfin
site-packages/dolfin_utils/documentation/extractdocumentation.py
1
7632
# -*- coding: utf-8 -*- """Script to extract documentation from docstrings in *.h files in the DOLFIN source tree.""" # Copyright (C) 2010 Anders Logg # # This file is part of DOLFIN. # # DOLFIN is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # DOLFIN is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with DOLFIN. If not, see <http://www.gnu.org/licenses/>. # # Modified by Kristian B. Oelgaard, 2011. # Modified by Marie E. Rognes, 2011. # Modified by Anders E. Johansen, 2011. # # First added: 2010-08-26 # Last changed: 2011-07-10 import os, io def extract_documentation(dolfin_dir, header, module): "Extract documentation for given header in given module" # print "Extracting documentation for %s..." % header # List of classes with documentation classnames = [] documentation = [] # Class name and parent class name classname = None parents = None # Comment and signature comment = None signature = None # Indentation of signatures indent = 0 # Iterate over each line f = io.open(os.path.join(dolfin_dir, "dolfin", module, header), encoding="utf-8") for line in f: # Check for comment if "///" in line: # We may have either "///" and "/// " if "/// " in line: c = line.split("/// ")[1].rstrip() else: c = line.split("///")[1].rstrip() # Found start of new comment if comment is None: comment = c # Continuing comment on next line else: comment += "\n" + c # Check for class # If anything goes wrong, remove the last '<' after template. elif " class " in line and not ";" in line and not "//" in line and not "template<" in line: # Get class name and parent classname = line.split(" class ")[1].split(":")[0].strip() if "public" in line: # Strip of each parent of additional commas, blanks # and newlines parents = [p.strip(", \n") for p in line.split("public")[1:]] # Remove virtual modifier parents = [p.replace("virtual ", "") for p in parents] # Store documentation # TODO: KBO: we don't check if a given classname is in the dolfin # namepace. classnames.append(classname) documentation.append((classname, parents, comment, [])) classname = None parents = None comment = None # If we encounter a '//' commented line we reset comment and proceed # This means that '///' and '//' should not be mixed when documenting # functions. elif line.lstrip()[0:2] == "//" and not line.lstrip()[0:3] == "///": comment = None continue # Check for function signature elif comment is not None: s = line.strip() # Found start of new signature if signature is None: signature = s #indent = (len(s.split("(")[0]) + 1)*" " # Continuing signature on next line else: #signature += "\n" + indent + s signature += " " + s # Signature ends when we find ";" or "{" if ";" in s or "{" in s: # Strip out last part signature = signature.split(";")[0] signature = signature.split("{")[0] signature = signature.strip() # Remove stuff Sphinx can't handle signature = signature.replace("virtual ", "") signature = signature.replace("inline ", "") # Remove ": stuff" for constructors new_s = [] for l in signature.split("::"): if not ":" in l: new_s.append(l) else: new_s.append(l.split(":")[0]) break signature = "::".join(new_s).strip() # Remove template stuff (not handled correctly by Sphinx) # e.g., 'template <typename Kernel> CGAL::Bbox_3 bbox()' in mesh/Point.h if "template" in signature: signature = ">".join(signature.split(">")[1:]).lstrip() # Only handle functions or enums, i.e. signatures that contain # '(' (or ')'). This is to avoid picking up data members. # NOTE, KBO: Should we also skip private functions? if not "(" in signature and not "enum" in signature: # Reset comment and signature comment = None signature = None continue # Skip destructors (not handled by Sphinx) destructor = "~" in signature # Get function name #function = signature.split("(")[0].split(" ")[-1] # Store documentation if len(documentation) > 0 and not destructor: documentation[-1][-1].append((signature, comment)) elif not destructor: documentation = [(None, None, None, [(signature, comment)])] # Reset comment and signature comment = None signature = None # Close file f.close() # Sort documentation alphabetically within each class # for (classname, parent, comment, function_documentation) in documentation: # function_documentation.sort() return documentation, classnames def extract_doc_representation(dolfin_dir): # Extract modules from dolfin.h modules = [] f = open(os.path.join(dolfin_dir, "dolfin", "dolfin.h")) for line in f: if line.startswith("#include <dolfin/"): module = line.split("/")[1] modules += [module] f.close() # Iterate over modules documentation = {} classnames = [] for module in modules: # if not module == "la": # continue # Extract header files from dolfin_foo.h f = open(os.path.join(dolfin_dir, "dolfin", module, "dolfin_%s.h" % module)) documentation[module] = [] for line in f: # Generate documentation for header file if line.startswith("#include <dolfin/"): header = line.split("/")[2].split(">")[0] # Skip version.h (generated from version.h.in via CMake) if header == "version.h": continue # if not header == "GenericTensor.h": # continue doc, cls = extract_documentation(dolfin_dir, header, module) documentation[module].append((header, doc)) classnames += cls return documentation, classnames if __name__ == "__main__": docs, classes = extract_doc_representation() # for c in classes: # print c # for key, doc in docs.items(): # for header, cont in doc: # print cont
lgpl-3.0
opennode/nodeconductor-openstack
src/waldur_openstack/openstack_tenant/tests/unittests/test_handlers.py
1
18087
from __future__ import unicode_literals from django.test import TestCase from django.contrib.contenttypes.models import ContentType from waldur_core.core.models import StateMixin from waldur_core.cost_tracking import models as cost_tracking_models from waldur_core.structure import models as structure_models from waldur_core.structure.tests import factories as structure_factories from waldur_openstack.openstack.tests import factories as openstack_factories from .. import factories from ... import models, apps, PriceItemTypes class BaseServicePropertyTest(TestCase): def setUp(self): self.tenant = openstack_factories.TenantFactory() self.service_settings = structure_models.ServiceSettings.objects.get( scope=self.tenant, type=apps.OpenStackTenantConfig.service_name) class SecurityGroupHandlerTest(BaseServicePropertyTest): def setUp(self): super(SecurityGroupHandlerTest, self).setUp() def test_security_group_create(self): openstack_security_group = openstack_factories.SecurityGroupFactory( tenant=self.tenant, state=StateMixin.States.CREATING ) openstack_security_rule = openstack_factories.SecurityGroupRuleFactory(security_group=openstack_security_group) self.assertEqual(models.SecurityGroup.objects.count(), 0) openstack_security_group.set_ok() openstack_security_group.save() self.assertEqual(models.SecurityGroup.objects.count(), 1) self.assertTrue(models.SecurityGroup.objects.filter( settings=self.service_settings, backend_id=openstack_security_group.backend_id ).exists()) security_group_property = models.SecurityGroup.objects.get(settings=self.service_settings, backend_id=openstack_security_group.backend_id) self.assertTrue(security_group_property.rules.filter(backend_id=openstack_security_rule.backend_id).exists()) def test_security_group_update(self): openstack_security_group = openstack_factories.SecurityGroupFactory( tenant=self.tenant, name='New name', description='New description', state=StateMixin.States.UPDATING ) security_group = factories.SecurityGroupFactory( settings=self.service_settings, backend_id=openstack_security_group.backend_id ) openstack_security_group.set_ok() openstack_security_group.save() security_group.refresh_from_db() self.assertIn(openstack_security_group.name, security_group.name) self.assertIn(openstack_security_group.description, security_group.description) def test_security_group_rules_are_updated_when_one_more_rule_is_added(self): openstack_security_group = openstack_factories.SecurityGroupFactory( tenant=self.tenant, state=StateMixin.States.UPDATING ) openstack_factories.SecurityGroupRuleFactory(security_group=openstack_security_group) security_group = factories.SecurityGroupFactory( settings=self.service_settings, backend_id=openstack_security_group.backend_id ) openstack_security_group.set_ok() openstack_security_group.save() self.assertEqual(security_group.rules.count(), 1, 'Security group rule has not been added') self.assertEqual(security_group.rules.first().protocol, openstack_security_group.rules.first().protocol) self.assertEqual(security_group.rules.first().from_port, openstack_security_group.rules.first().from_port) self.assertEqual(security_group.rules.first().to_port, openstack_security_group.rules.first().to_port) def test_security_group_is_deleted_when_openstack_security_group_is_deleted(self): openstack_security_group = openstack_factories.SecurityGroupFactory(tenant=self.tenant) factories.SecurityGroupFactory(settings=self.service_settings, backend_id=openstack_security_group.backend_id) openstack_security_group.delete() self.assertEqual(models.SecurityGroup.objects.count(), 0) def test_if_security_group_already_exists_duplicate_is_not_created(self): """ Consider the following case: there are two objects: security group as a property and security group as a resource. Property has been created by pull_security_groups method. When resource switches state, property should be created too via signal handler. But as security group already exists as a property it should not be created twice, because otherwise it violates uniqueness constraint. """ security_group = factories.SecurityGroupFactory( settings=self.service_settings, backend_id='backend_id', ) openstack_security_group = openstack_factories.SecurityGroupFactory( tenant=self.tenant, state=StateMixin.States.CREATING, backend_id=security_group.backend_id, ) openstack_security_group.set_ok() openstack_security_group.save() self.assertEqual(models.SecurityGroup.objects.count(), 1) class FloatingIPHandlerTest(BaseServicePropertyTest): def setUp(self): super(FloatingIPHandlerTest, self).setUp() def test_floating_ip_create(self): openstack_floating_ip = openstack_factories.FloatingIPFactory( tenant=self.tenant, state=StateMixin.States.CREATING ) self.assertEqual(models.FloatingIP.objects.count(), 0) openstack_floating_ip.set_ok() openstack_floating_ip.save() self.assertEqual(models.FloatingIP.objects.count(), 1) def test_floating_ip_is_not_created_if_it_already_exists(self): factories.FloatingIPFactory( settings=self.service_settings, backend_id='VALID_BACKEND_ID' ) openstack_floating_ip = openstack_factories.FloatingIPFactory( tenant=self.tenant, state=StateMixin.States.CREATING, backend_id='VALID_BACKEND_ID', ) self.assertEqual(models.FloatingIP.objects.count(), 1) openstack_floating_ip.set_ok() openstack_floating_ip.save() self.assertEqual(models.FloatingIP.objects.count(), 1) def test_floating_ip_update(self): openstack_floating_ip = openstack_factories.FloatingIPFactory( tenant=self.tenant, name='New name', state=StateMixin.States.UPDATING ) floating_ip = factories.FloatingIPFactory( settings=self.service_settings, backend_id=openstack_floating_ip.backend_id, ) openstack_floating_ip.set_ok() openstack_floating_ip.save() floating_ip.refresh_from_db() self.assertEqual(openstack_floating_ip.name, floating_ip.name) self.assertEqual(openstack_floating_ip.address, floating_ip.address) self.assertEqual(openstack_floating_ip.runtime_state, floating_ip.runtime_state) self.assertEqual(openstack_floating_ip.backend_network_id, floating_ip.backend_network_id) def test_floating_ip_delete(self): openstack_floating_ip = openstack_factories.FloatingIPFactory(tenant=self.tenant) factories.FloatingIPFactory(settings=self.service_settings, backend_id=openstack_floating_ip.backend_id) openstack_floating_ip.delete() self.assertEqual(models.FloatingIP.objects.count(), 0) class TenantChangeCredentialsTest(TestCase): def test_service_settings_password_and_username_are_updated_when_tenant_user_password_changes(self): tenant = openstack_factories.TenantFactory() service_settings = structure_models.ServiceSettings.objects.first() service_settings.scope = tenant service_settings.password = tenant.user_password service_settings.save() new_password = 'new_password' new_username = 'new_username' tenant.user_password = new_password tenant.user_username = new_username tenant.save() service_settings.refresh_from_db() self.assertEqual(service_settings.password, new_password) self.assertEqual(service_settings.username, new_username) class NetworkHandlerTest(BaseServicePropertyTest): def setUp(self): super(NetworkHandlerTest, self).setUp() def test_network_create(self): openstack_network = openstack_factories.NetworkFactory( tenant=self.tenant, state=StateMixin.States.CREATING) self.assertEqual(models.Network.objects.count(), 0) openstack_network.set_ok() openstack_network.save() self.assertTrue(models.Network.objects.filter(backend_id=openstack_network.backend_id).exists()) def test_network_update(self): openstack_network = openstack_factories.NetworkFactory( tenant=self.tenant, name='New network name', state=StateMixin.States.UPDATING ) network = factories.NetworkFactory( settings=self.service_settings, backend_id=openstack_network.backend_id, ) openstack_network.set_ok() openstack_network.save() network.refresh_from_db() self.assertEqual(openstack_network.name, network.name) self.assertEqual(openstack_network.is_external, network.is_external) self.assertEqual(openstack_network.type, network.type) self.assertEqual(openstack_network.segmentation_id, network.segmentation_id) self.assertEqual(openstack_network.backend_id, network.backend_id) def test_network_delete(self): openstack_network = openstack_factories.NetworkFactory(tenant=self.tenant) factories.NetworkFactory(settings=self.service_settings, backend_id=openstack_network.backend_id) openstack_network.delete() self.assertEqual(models.Network.objects.count(), 0) class SubNetHandlerTest(BaseServicePropertyTest): def setUp(self): super(SubNetHandlerTest, self).setUp() self.openstack_network = openstack_factories.NetworkFactory(tenant=self.tenant) self.network = factories.NetworkFactory( settings=self.service_settings, backend_id=self.openstack_network.backend_id ) def test_subnet_create(self): openstack_subnet = openstack_factories.SubNetFactory( network=self.openstack_network, state=StateMixin.States.CREATING ) self.assertEqual(models.SubNet.objects.count(), 0) openstack_subnet.set_ok() openstack_subnet.save() self.assertTrue(models.SubNet.objects.filter(backend_id=openstack_subnet.backend_id).exists()) def test_subnet_update(self): openstack_subnet = openstack_factories.SubNetFactory( network=self.openstack_network, name='New subnet name', state=StateMixin.States.UPDATING ) subnet = factories.SubNetFactory( network=self.network, settings=self.service_settings, backend_id=openstack_subnet.backend_id, ) openstack_subnet.set_ok() openstack_subnet.save() subnet.refresh_from_db() self.assertEqual(openstack_subnet.name, subnet.name) self.assertEqual(openstack_subnet.cidr, subnet.cidr) self.assertEqual(openstack_subnet.gateway_ip, subnet.gateway_ip) self.assertEqual(openstack_subnet.allocation_pools, subnet.allocation_pools) self.assertEqual(openstack_subnet.ip_version, subnet.ip_version) self.assertEqual(openstack_subnet.enable_dhcp, subnet.enable_dhcp) self.assertEqual(openstack_subnet.dns_nameservers, subnet.dns_nameservers) def test_subnet_delete(self): openstack_subnet = openstack_factories.SubNetFactory(network__tenant=self.tenant) factories.SubNetFactory(settings=self.service_settings, backend_id=openstack_subnet.backend_id) openstack_subnet.delete() self.assertEqual(models.SubNet.objects.count(), 0) class ServiceSettingsCertificationHandlerTest(TestCase): def test_openstack_tenant_service_certifications_are_update_when_tenant_settings_certification_are_added(self): tenant = openstack_factories.TenantFactory() tenant_service1 = factories.OpenStackTenantServiceFactory(settings__scope=tenant) tenant_service2 = factories.OpenStackTenantServiceFactory(settings__scope=tenant) self.assertEqual(tenant_service1.settings.certifications.count(), 0) self.assertEqual(tenant_service2.settings.certifications.count(), 0) new_certification = structure_factories.ServiceCertificationFactory() tenant.service_project_link.service.settings.certifications.add(new_certification) self.assertTrue(tenant_service1.settings.certifications.filter(pk__in=[new_certification.pk]).exists()) self.assertTrue(tenant_service2.settings.certifications.filter(pk__in=[new_certification.pk]).exists()) def test_openstack_tenant_service_certifications_are_removed_if_tenant_settings_certifications_are_removed(self): tenant = openstack_factories.TenantFactory() tenant_service = factories.OpenStackTenantServiceFactory(settings__scope=tenant) new_certification = structure_factories.ServiceCertificationFactory() tenant.service_project_link.service.settings.certifications.add(new_certification) self.assertEqual(tenant_service.settings.certifications.count(), 1) tenant.service_project_link.service.settings.certifications.clear() self.assertEqual(tenant.service_project_link.service.settings.certifications.count(), 0) self.assertEquals(tenant_service.settings.certifications.count(), 0) class CopyCertificationsTest(TestCase): def test_openstack_tenant_settings_certifications_are_copied_from_openstack_settings(self): tenant = openstack_factories.TenantFactory() certifications = structure_factories.ServiceCertificationFactory.create_batch(2) tenant.service_project_link.service.settings.certifications.add(*certifications) settings = factories.OpenStackTenantServiceSettingsFactory(scope=tenant) certifications_pk = [c.pk for c in certifications] self.assertEqual(settings.certifications.filter(pk__in=certifications_pk).count(), 2) def test_openstack_tenant_settings_certifications_are_not_copied_on_update(self): tenant = openstack_factories.TenantFactory() certification = structure_factories.ServiceCertificationFactory() tenant.service_project_link.service.settings.certifications.add(certification) settings = factories.OpenStackTenantServiceSettingsFactory(scope=tenant) self.assertEquals(settings.certifications.count(), 1) settings.name = 'new_name' settings.save() self.assertEquals(settings.certifications.count(), 1) self.assertEquals(settings.certifications.first().pk, certification.pk) def test_openstack_tenant_settings_certifications_are_not_copied_if_scope_is_not_tenant(self): instance = factories.InstanceFactory() certification = structure_factories.ServiceCertificationFactory() instance.service_project_link.service.settings.certifications.add(certification) settings = factories.OpenStackTenantServiceSettingsFactory(scope=instance) self.assertFalse(settings.certifications.exists()) def test_openstack_tenant_settings_certifications_are_not_copied_if_scope_is_None(self): settings = factories.OpenStackTenantServiceSettingsFactory(scope=None) self.assertFalse(settings.certifications.exists()) class CreateServiceFromTenantTest(TestCase): def test_service_is_created_on_tenant_creation(self): tenant = openstack_factories.TenantFactory() self.assertTrue(structure_models.ServiceSettings.objects.filter(scope=tenant).exists()) service_settings = structure_models.ServiceSettings.objects.get( scope=tenant, type=apps.OpenStackTenantConfig.service_name, ) self.assertEquals(service_settings.name, tenant.name) self.assertEquals(service_settings.customer, tenant.service_project_link.project.customer) self.assertEquals(service_settings.username, tenant.user_username) self.assertEquals(service_settings.password, tenant.user_password) self.assertEquals(service_settings.domain, tenant.service_project_link.service.settings.domain) self.assertEquals(service_settings.backend_url, tenant.service_project_link.service.settings.backend_url) self.assertEquals(service_settings.type, apps.OpenStackTenantConfig.service_name) self.assertEquals(service_settings.options['tenant_id'], tenant.backend_id) self.assertEquals(service_settings.options['availability_zone'], tenant.availability_zone) self.assertTrue(models.OpenStackTenantService.objects.filter( settings=service_settings, customer=tenant.service_project_link.project.customer ).exists()) service = models.OpenStackTenantService.objects.get( settings=service_settings, customer=tenant.service_project_link.project.customer, ) self.assertTrue(models.OpenStackTenantServiceProjectLink.objects.filter( service=service, project=tenant.service_project_link.project, ).exists()) class FlavorPriceListItemTest(TestCase): def setUp(self): self.flavor = factories.FlavorFactory() self.content_type = ContentType.objects.get_for_model(models.Instance) def test_price_list_item_is_created_on_flavor_creation(self): cost_tracking_models.DefaultPriceListItem.objects.get( resource_content_type=self.content_type, item_type=PriceItemTypes.FLAVOR, key=self.flavor.name, )
mit
MayankGo/ec2-api
ec2api/config.py
1
2017
# Copyright 2014 # The Cloudscaling Group, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_db import options from oslo_log import log from ec2api import paths from ec2api import version CONF = cfg.CONF _DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('ec2api.sqlite') _DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'boto=WARN', 'qpid=WARN', 'sqlalchemy=WARN', 'suds=INFO', 'oslo.messaging=INFO', 'iso8601=WARN', 'requests.packages.urllib3.connectionpool=WARN', 'urllib3.connectionpool=WARN', 'websocket=WARN', 'keystonemiddleware=WARN', 'routes.middleware=WARN', 'stevedore=WARN', 'glanceclient=WARN'] _DEFAULT_LOGGING_CONTEXT_FORMAT = ('%(asctime)s.%(msecs)03d %(process)d ' '%(levelname)s %(name)s [%(request_id)s ' '%(user_identity)s] %(instance)s' '%(message)s') def parse_args(argv, default_config_files=None): log.set_defaults(_DEFAULT_LOGGING_CONTEXT_FORMAT, _DEFAULT_LOG_LEVELS) log.register_options(CONF) options.set_defaults(CONF, connection=_DEFAULT_SQL_CONNECTION, sqlite_db='ec2api.sqlite') cfg.CONF(argv[1:], project='ec2api', version=version.version_info.version_string(), default_config_files=default_config_files)
apache-2.0
kockamester/diliproject
vendor/doctrine/orm/docs/en/conf.py
2448
6497
# -*- coding: utf-8 -*- # # Doctrine 2 ORM documentation build configuration file, created by # sphinx-quickstart on Fri Dec 3 18:10:24 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.append(os.path.abspath('_exts')) # -- General configuration ----------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['configurationblock'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Doctrine 2 ORM' copyright = u'2010-12, Doctrine Project Team' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '2' # The full version, including alpha/beta/rc tags. release = '2' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. language = 'en' # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. #unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'doctrine' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['_theme'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_use_modindex = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'Doctrine2ORMdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'Doctrine2ORM.tex', u'Doctrine 2 ORM Documentation', u'Doctrine Project Team', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_use_modindex = True primary_domain = "dcorm" def linkcode_resolve(domain, info): if domain == 'dcorm': return 'http://' return None
mit
jaredkoontz/leetcode
Python/design-snake-game.py
3
2231
# Time: O(1) per move # Space: O(s), s is the current length of the snake. from collections import deque class SnakeGame(object): def __init__(self, width,height,food): """ Initialize your data structure here. @param width - screen width @param height - screen height @param food - A list of food positions E.g food = [[1,1], [1,0]] means the first food is positioned at [1,1], the second is at [1,0]. :type width: int :type height: int :type food: List[List[int]] """ self.__width = width self.__height = height self.__score = 0 self.__food = deque(food) self.__snake = deque([(0, 0)]) self.__direction = {"U":(-1, 0), "L":(0, -1), "R":(0, 1), "D":(1, 0)}; self.__lookup = collections.defaultdict(int) self.__lookup[(0, 0)] += 1 def move(self, direction): """ Moves the snake. @param direction - 'U' = Up, 'L' = Left, 'R' = Right, 'D' = Down @return The game's score after the move. Return -1 if game over. Game over when snake crosses the screen boundary or bites its body. :type direction: str :rtype: int """ def valid(x, y): return 0 <= x < self.__height and \ 0 <= y < self.__width and \ (x, y) not in self.__lookup d = self.__direction[direction] x, y = self.__snake[-1][0] + d[0], self.__snake[-1][1] + d[1] tail = self.__snake[-1] self.__lookup[self.__snake[0]] -= 1 if self.__lookup[self.__snake[0]] == 0: self.__lookup.pop(self.__snake[0]) self.__snake.popleft() if not valid(x, y): return -1 elif self.__food and (self.__food[0][0], self.__food[0][1]) == (x, y): self.__score += 1 self.__food.popleft() self.__snake.appendleft(tail) self.__lookup[tail] += 1 self.__snake += (x, y), self.__lookup[(x, y)] += 1 return self.__score # Your SnakeGame object will be instantiated and called as such: # obj = SnakeGame(width, height, food) # param_1 = obj.move(direction)
mit
mikebenfield/scipy
scipy/fftpack/tests/test_import.py
49
1352
"""Test possibility of patching fftpack with pyfftw. No module source outside of scipy.fftpack should contain an import of the form `from scipy.fftpack import ...`, so that a simple replacement of scipy.fftpack by the corresponding fftw interface completely swaps the two FFT implementations. Because this simply inspects source files, we only need to run the test on one version of Python. """ import sys if sys.version_info >= (3, 4): from pathlib import Path import re import tokenize from numpy.testing import TestCase, assert_, run_module_suite import scipy class TestFFTPackImport(TestCase): def test_fftpack_import(self): base = Path(scipy.__file__).parent regexp = r"\s*from.+\.fftpack import .*\n" for path in base.rglob("*.py"): if base / "fftpack" in path.parents: continue # use tokenize to auto-detect encoding on systems where no # default encoding is defined (e.g. LANG='C') with tokenize.open(str(path)) as file: assert_(all(not re.fullmatch(regexp, line) for line in file), "{0} contains an import from fftpack".format(path)) if __name__ == "__main__": run_module_suite(argv=sys.argv)
bsd-3-clause
watermelo/libcloud
libcloud/test/dns/test_godaddy.py
14
7283
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import unittest from libcloud.utils.py3 import httplib from libcloud.dns.drivers.godaddy import GoDaddyDNSDriver from libcloud.test import MockHttp from libcloud.test.file_fixtures import DNSFileFixtures from libcloud.test.secrets import DNS_PARAMS_GODADDY from libcloud.dns.base import Zone, RecordType class GoDaddyTests(unittest.TestCase): def setUp(self): GoDaddyMockHttp.type = None GoDaddyDNSDriver.connectionCls.conn_classes = ( None, GoDaddyMockHttp) self.driver = GoDaddyDNSDriver(*DNS_PARAMS_GODADDY) def assertHasKeys(self, dictionary, keys): for key in keys: self.assertTrue(key in dictionary, 'key "%s" not in dictionary' % (key)) def test_list_zones(self): zones = self.driver.list_zones() self.assertEqual(len(zones), 5) self.assertEqual(zones[0].id, '177184419') self.assertEqual(zones[0].domain, 'aperture-platform.com') def test_ex_check_availability(self): check = self.driver.ex_check_availability("wazzlewobbleflooble.com") self.assertEqual(check.available, True) self.assertEqual(check.price, 14.99) def test_ex_list_tlds(self): tlds = self.driver.ex_list_tlds() self.assertEqual(len(tlds), 331) self.assertEqual(tlds[0].name, 'academy') self.assertEqual(tlds[0].type, 'GENERIC') def test_ex_get_purchase_schema(self): schema = self.driver.ex_get_purchase_schema('com') self.assertEqual(schema['id'], 'https://api.godaddy.com/DomainPurchase#') def test_ex_get_agreements(self): ags = self.driver.ex_get_agreements('com') self.assertEqual(len(ags), 1) self.assertEqual(ags[0].title, 'Domain Name Registration Agreement') def test_ex_purchase_domain(self): fixtures = DNSFileFixtures('godaddy') document = fixtures.load('purchase_request.json') order = self.driver.ex_purchase_domain(document) self.assertEqual(order.order_id, 1) def test_list_records(self): zone = Zone(id='177184419', domain='aperture-platform.com', type='master', ttl=None, driver=self.driver) records = self.driver.list_records(zone) self.assertEqual(len(records), 14) self.assertEqual(records[0].type, RecordType.A) self.assertEqual(records[0].name, '@') self.assertEqual(records[0].data, '50.63.202.42') self.assertEqual(records[0].id, '@:A') def test_get_record(self): record = self.driver.get_record( 'aperture-platform.com', 'www:A') self.assertEqual(record.id, 'www:A') self.assertEqual(record.name, 'www') self.assertEqual(record.type, RecordType.A) self.assertEqual(record.data, '50.63.202.42') def test_create_record(self): zone = Zone(id='177184419', domain='aperture-platform.com', type='master', ttl=None, driver=self.driver) record = self.driver.create_record( zone=zone, name='www', type=RecordType.A, data='50.63.202.42' ) self.assertEqual(record.id, 'www:A') self.assertEqual(record.name, 'www') self.assertEqual(record.type, RecordType.A) self.assertEqual(record.data, '50.63.202.42') def test_update_record(self): record = self.driver.get_record( 'aperture-platform.com', 'www:A') record = self.driver.update_record( record=record, name='www', type=RecordType.A, data='50.63.202.22' ) self.assertEqual(record.id, 'www:A') self.assertEqual(record.name, 'www') self.assertEqual(record.type, RecordType.A) self.assertEqual(record.data, '50.63.202.22') def test_get_zone(self): zone = self.driver.get_zone('aperture-platform.com') self.assertEqual(zone.id, '177184419') self.assertEqual(zone.domain, 'aperture-platform.com') def test_delete_zone(self): zone = Zone(id='177184419', domain='aperture-platform.com', type='master', ttl=None, driver=self.driver) self.driver.delete_zone(zone) class GoDaddyMockHttp(MockHttp): fixtures = DNSFileFixtures('godaddy') def _v1_domains(self, method, url, body, headers): body = self.fixtures.load('v1_domains.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v1_domains_aperture_platform_com(self, method, url, body, headers): body = self.fixtures.load('v1_domains_aperture_platform_com.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v1_domains_aperture_platform_com_records(self, method, url, body, headers): body = self.fixtures.load('v1_domains_aperture_platform_com_records.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v1_domains_available(self, method, url, body, headers): body = self.fixtures.load('v1_domains_available.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v1_domains_tlds(self, method, url, body, headers): body = self.fixtures.load('v1_domains_tlds.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v1_domains_aperture_platform_com_records_A_www(self, method, url, body, headers): body = self.fixtures.load('v1_domains_aperture_platform_com_records_A_www.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v1_domains_purchase_schema_com(self, method, url, body, headers): body = self.fixtures.load('v1_domains_purchase_schema_com.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v1_domains_agreements(self, method, url, body, headers): body = self.fixtures.load('v1_domains_agreements.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _v1_domains_purchase(self, method, url, body, headers): body = self.fixtures.load('v1_domains_purchase.json') return (httplib.OK, body, {}, httplib.responses[httplib.OK]) if __name__ == '__main__': sys.exit(unittest.main())
apache-2.0
Bachaco-ve/odoo
addons/account/company.py
384
2814
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv class res_company(osv.osv): _inherit = "res.company" _columns = { 'expects_chart_of_accounts': fields.boolean('Expects a Chart of Accounts'), 'tax_calculation_rounding_method': fields.selection([ ('round_per_line', 'Round per Line'), ('round_globally', 'Round Globally'), ], 'Tax Calculation Rounding Method', help="If you select 'Round per Line' : for each tax, the tax amount will first be computed and rounded for each PO/SO/invoice line and then these rounded amounts will be summed, leading to the total amount for that tax. If you select 'Round Globally': for each tax, the tax amount will be computed for each PO/SO/invoice line, then these amounts will be summed and eventually this total tax amount will be rounded. If you sell with tax included, you should choose 'Round per line' because you certainly want the sum of your tax-included line subtotals to be equal to the total amount with taxes."), 'paypal_account': fields.char("Paypal Account", size=128, help="Paypal username (usually email) for receiving online payments."), 'overdue_msg': fields.text('Overdue Payments Message', translate=True), } _defaults = { 'expects_chart_of_accounts': True, 'tax_calculation_rounding_method': 'round_per_line', 'overdue_msg': '''Dear Sir/Madam, Our records indicate that some payments on your account are still due. Please find details below. If the amount has already been paid, please disregard this notice. Otherwise, please forward us the total amount stated below. If you have any queries regarding your account, Please contact us. Thank you in advance for your cooperation. Best Regards,''' } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
helinwang/Paddle
paddle/api/test/util.py
20
1752
# Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import numpy as np from py_paddle import swig_paddle def doubleEqual(a, b): return abs(a - b) < 1e-5 def __readFromFile(): for i in xrange(10002): label = np.random.randint(0, 9) sample = np.random.rand(784) + 0.1 * label yield sample, label def loadMNISTTrainData(batch_size=100): if not hasattr(loadMNISTTrainData, "gen"): generator = __readFromFile() loadMNISTTrainData.gen = generator else: generator = loadMNISTTrainData.gen args = swig_paddle.Arguments.createArguments(2) # batch_size = 100 dense_slot = [] id_slot = [] atEnd = False for _ in xrange(batch_size): try: result = generator.next() dense_slot.extend(result[0]) id_slot.append(result[1]) except StopIteration: atEnd = True del loadMNISTTrainData.gen break dense_slot = swig_paddle.Matrix.createDense(dense_slot, batch_size, 784) id_slot = swig_paddle.IVector.create(id_slot) args.setSlotValue(0, dense_slot) args.setSlotIds(1, id_slot) return args, atEnd
apache-2.0
hmendozap/master-arbeit-projects
autosk_dev_test/component/RegDeepNet.py
1
19049
import numpy as np import scipy.sparse as sp from HPOlibConfigSpace.configuration_space import ConfigurationSpace from HPOlibConfigSpace.conditions import EqualsCondition, InCondition from HPOlibConfigSpace.hyperparameters import UniformFloatHyperparameter, \ UniformIntegerHyperparameter, CategoricalHyperparameter, Constant from autosklearn.pipeline.components.base import AutoSklearnRegressionAlgorithm from autosklearn.pipeline.constants import * class RegDeepNet(AutoSklearnRegressionAlgorithm): def __init__(self, number_epochs, batch_size, num_layers, dropout_output, learning_rate, solver, lambda2, random_state=None, **kwargs): self.number_epochs = number_epochs self.batch_size = batch_size self.num_layers = ord(num_layers) - ord('a') self.dropout_output = dropout_output self.learning_rate = learning_rate self.lambda2 = lambda2 self.solver = solver # Also taken from **kwargs. Because the assigned # arguments are the minimum parameters to run # the iterative net. IMO. self.lr_policy = kwargs.get("lr_policy", "fixed") self.momentum = kwargs.get("momentum", 0.99) self.beta1 = 1 - kwargs.get("beta1", 0.1) self.beta2 = 1 - kwargs.get("beta2", 0.01) self.rho = kwargs.get("rho", 0.95) self.gamma = kwargs.get("gamma", 0.01) self.power = kwargs.get("power", 1.0) self.epoch_step = kwargs.get("epoch_step", 1) # Empty features and shape self.n_features = None self.input_shape = None self.m_issparse = False self.m_isbinary = False self.m_ismultilabel = False self.m_isregression = True # TODO: Should one add a try-except here? self.num_units_per_layer = [] self.dropout_per_layer = [] self.activation_per_layer = [] self.weight_init_layer = [] self.std_per_layer = [] self.leakiness_per_layer = [] self.tanh_alpha_per_layer = [] self.tanh_beta_per_layer = [] for i in range(1, self.num_layers): self.num_units_per_layer.append(int(kwargs.get("num_units_layer_" + str(i), 128))) self.dropout_per_layer.append(float(kwargs.get("dropout_layer_" + str(i), 0.5))) self.activation_per_layer.append(kwargs.get("activation_layer_" + str(i), 'relu')) self.weight_init_layer.append(kwargs.get("weight_init_" + str(i), 'he_normal')) self.std_per_layer.append(float(kwargs.get("std_layer_" + str(i), 0.005))) self.leakiness_per_layer.append(float(kwargs.get("leakiness_layer_" + str(i), 1. / 3.))) self.tanh_alpha_per_layer.append(float(kwargs.get("tanh_alpha_layer_" + str(i), 2. / 3.))) self.tanh_beta_per_layer.append(float(kwargs.get("tanh_beta_layer_" + str(i), 1.7159))) self.estimator = None self.random_state = random_state def _prefit(self, X, y): self.batch_size = int(self.batch_size) self.n_features = X.shape[1] self.input_shape = (self.batch_size, self.n_features) assert len(self.num_units_per_layer) == self.num_layers - 1,\ "Number of created layers is different than actual layers" assert len(self.dropout_per_layer) == self.num_layers - 1,\ "Number of created layers is different than actual layers" self.num_output_units = 1 # Regression # Normalize the output self.mean_y = np.mean(y) self.std_y = np.std(y) y = (y - self.mean_y) / self.std_y if len(y.shape) == 1: y = y[:, np.newaxis] self.m_issparse = sp.issparse(X) return X, y def fit(self, X, y): Xf, yf = self._prefit(X, y) from implementation import FeedForwardNet self.estimator = FeedForwardNet.FeedForwardNet(batch_size=self.batch_size, input_shape=self.input_shape, num_layers=self.num_layers, num_units_per_layer=self.num_units_per_layer, dropout_per_layer=self.dropout_per_layer, activation_per_layer=self.activation_per_layer, weight_init_per_layer=self.weight_init_layer, std_per_layer=self.std_per_layer, leakiness_per_layer=self.leakiness_per_layer, tanh_alpha_per_layer=self.tanh_alpha_per_layer, tanh_beta_per_layer=self.tanh_beta_per_layer, num_output_units=self.num_output_units, dropout_output=self.dropout_output, learning_rate=self.learning_rate, lr_policy=self.lr_policy, lambda2=self.lambda2, momentum=self.momentum, beta1=self.beta1, beta2=self.beta2, rho=self.rho, solver=self.solver, num_epochs=self.number_epochs, gamma=self.gamma, power=self.power, epoch_step=self.epoch_step, is_sparse=self.m_issparse, is_binary=self.m_isbinary, is_multilabel=self.m_ismultilabel, is_regression=self.m_isregression, random_state=self.random_state) self.estimator.fit(Xf, yf) return self def predict(self, X): if self.estimator is None: raise NotImplementedError preds = self.estimator.predict(X, self.m_issparse) return preds * self.std_y + self.mean_y def predict_proba(self, X): if self.estimator is None: raise NotImplementedError() return self.estimator.predict_proba(X, self.m_issparse) @staticmethod def get_properties(dataset_properties=None): return {'shortname': 'reg_feed_nn', 'name': 'Regression Feed Forward Neural Network', 'handles_regression': True, 'handles_classification': False, 'handles_multiclass': False, 'handles_multilabel': False, 'is_deterministic': True, 'input': (DENSE, SPARSE, UNSIGNED_DATA), 'output': (PREDICTIONS,)} @staticmethod def get_hyperparameter_search_space(dataset_properties=None): max_num_layers = 7 # Maximum number of layers coded # Hacky way to condition layers params based on the number of layers # 'c'=1, 'd'=2, 'e'=3 ,'f'=4', g ='5', h='6' + output_layer layer_choices = [chr(i) for i in range(ord('c'), ord('b') + max_num_layers)] batch_size = UniformIntegerHyperparameter("batch_size", 32, 4096, log=True, default=32) number_epochs = UniformIntegerHyperparameter("number_epochs", 2, 80, default=5) num_layers = CategoricalHyperparameter("num_layers", choices=layer_choices, default='c') lr = UniformFloatHyperparameter("learning_rate", 1e-6, 1.0, log=True, default=0.01) l2 = UniformFloatHyperparameter("lambda2", 1e-7, 1e-2, log=True, default=1e-4) dropout_output = UniformFloatHyperparameter("dropout_output", 0.0, 0.99, default=0.5) # Define basic hyperparameters and define the config space # basic means that are independent from the number of layers cs = ConfigurationSpace() cs.add_hyperparameter(number_epochs) cs.add_hyperparameter(batch_size) cs.add_hyperparameter(num_layers) cs.add_hyperparameter(lr) cs.add_hyperparameter(l2) cs.add_hyperparameter(dropout_output) # Define parameters with different child parameters and conditions solver_choices = ["adam", "adadelta", "adagrad", "sgd", "momentum", "nesterov", "smorm3s"] solver = CategoricalHyperparameter(name="solver", choices=solver_choices, default="smorm3s") beta1 = UniformFloatHyperparameter("beta1", 1e-4, 0.1, log=True, default=0.1) beta2 = UniformFloatHyperparameter("beta2", 1e-4, 0.1, log=True, default=0.01) rho = UniformFloatHyperparameter("rho", 0.05, 0.99, log=True, default=0.95) momentum = UniformFloatHyperparameter("momentum", 0.3, 0.999, default=0.9) # TODO: Add policy based on this sklearn sgd policy_choices = ['fixed', 'inv', 'exp', 'step'] lr_policy = CategoricalHyperparameter(name="lr_policy", choices=policy_choices, default='fixed') gamma = UniformFloatHyperparameter(name="gamma", lower=1e-3, upper=1e-1, default=1e-2) power = UniformFloatHyperparameter("power", 0.0, 1.0, default=0.5) epoch_step = UniformIntegerHyperparameter("epoch_step", 2, 20, default=5) cs.add_hyperparameter(solver) cs.add_hyperparameter(beta1) cs.add_hyperparameter(beta2) cs.add_hyperparameter(momentum) cs.add_hyperparameter(rho) cs.add_hyperparameter(lr_policy) cs.add_hyperparameter(gamma) cs.add_hyperparameter(power) cs.add_hyperparameter(epoch_step) # Define parameters that are needed it for each layer output_activation_choices = ['softmax', 'sigmoid', 'softplus', 'tanh'] activations_choices = ['sigmoid', 'tanh', 'scaledTanh', 'elu', 'relu', 'leaky', 'linear'] weight_choices = ['constant', 'normal', 'uniform', 'glorot_normal', 'glorot_uniform', 'he_normal', 'he_uniform', 'ortogonal', 'sparse'] # Iterate over parameters that are used in each layer for i in range(1, max_num_layers): layer_units = UniformIntegerHyperparameter("num_units_layer_" + str(i), 64, 4096, log=True, default=128) cs.add_hyperparameter(layer_units) layer_dropout = UniformFloatHyperparameter("dropout_layer_" + str(i), 0.0, 0.99, default=0.5) cs.add_hyperparameter(layer_dropout) weight_initialization = CategoricalHyperparameter('weight_init_' + str(i), choices=weight_choices, default='he_normal') cs.add_hyperparameter(weight_initialization) layer_std = UniformFloatHyperparameter("std_layer_" + str(i), 1e-6, 0.1, log=True, default=0.005) cs.add_hyperparameter(layer_std) layer_activation = CategoricalHyperparameter("activation_layer_" + str(i), choices=activations_choices, default="relu") cs.add_hyperparameter(layer_activation) layer_leakiness = UniformFloatHyperparameter('leakiness_layer_' + str(i), 0.01, 0.99, default=0.3) cs.add_hyperparameter(layer_leakiness) layer_tanh_alpha = UniformFloatHyperparameter('tanh_alpha_layer_' + str(i), 0.5, 1.0, default=2. / 3.) cs.add_hyperparameter(layer_tanh_alpha) layer_tanh_beta = UniformFloatHyperparameter('tanh_beta_layer_' + str(i), 1.1, 3.0, log=True, default=1.7159) cs.add_hyperparameter(layer_tanh_beta) # TODO: Could be in a function in a new module for i in range(2, max_num_layers): # Condition layers parameter on layer choice layer_unit_param = cs.get_hyperparameter("num_units_layer_" + str(i)) layer_cond = InCondition(child=layer_unit_param, parent=num_layers, values=[l for l in layer_choices[i - 1:]]) cs.add_condition(layer_cond) # Condition dropout parameter on layer choice layer_dropout_param = cs.get_hyperparameter("dropout_layer_" + str(i)) layer_cond = InCondition(child=layer_dropout_param, parent=num_layers, values=[l for l in layer_choices[i - 1:]]) cs.add_condition(layer_cond) # Condition weight initialization on layer choice layer_weight_param = cs.get_hyperparameter("weight_init_" + str(i)) layer_cond = InCondition(child=layer_weight_param, parent=num_layers, values=[l for l in layer_choices[i - 1:]]) cs.add_condition(layer_cond) # Condition std parameter on weight layer initialization choice layer_std_param = cs.get_hyperparameter("std_layer_" + str(i)) weight_cond = EqualsCondition(child=layer_std_param, parent=layer_weight_param, value='normal') cs.add_condition(weight_cond) # Condition activation parameter on layer choice layer_activation_param = cs.get_hyperparameter("activation_layer_" + str(i)) layer_cond = InCondition(child=layer_activation_param, parent=num_layers, values=[l for l in layer_choices[i - 1:]]) cs.add_condition(layer_cond) # Condition leakiness on activation choice layer_leakiness_param = cs.get_hyperparameter("leakiness_layer_" + str(i)) activation_cond = EqualsCondition(child=layer_leakiness_param, parent=layer_activation_param, value='leaky') cs.add_condition(activation_cond) # Condition tanh on activation choice layer_tanh_alpha_param = cs.get_hyperparameter("tanh_alpha_layer_" + str(i)) activation_cond = EqualsCondition(child=layer_tanh_alpha_param, parent=layer_activation_param, value='scaledTanh') cs.add_condition(activation_cond) layer_tanh_beta_param = cs.get_hyperparameter("tanh_beta_layer_" + str(i)) activation_cond = EqualsCondition(child=layer_tanh_beta_param, parent=layer_activation_param, value='scaledTanh') cs.add_condition(activation_cond) # Conditioning on solver momentum_depends_on_solver = InCondition(momentum, solver, values=["momentum", "nesterov"]) beta1_depends_on_solver = EqualsCondition(beta1, solver, "adam") beta2_depends_on_solver = EqualsCondition(beta2, solver, "adam") rho_depends_on_solver = EqualsCondition(rho, solver, "adadelta") cs.add_condition(momentum_depends_on_solver) cs.add_condition(beta1_depends_on_solver) cs.add_condition(beta2_depends_on_solver) cs.add_condition(rho_depends_on_solver) # Conditioning on learning rate policy lr_policy_depends_on_solver = InCondition(lr_policy, solver, ["adadelta", "adagrad", "sgd", "momentum", "nesterov"]) gamma_depends_on_policy = InCondition(child=gamma, parent=lr_policy, values=["inv", "exp", "step"]) power_depends_on_policy = EqualsCondition(power, lr_policy, "inv") epoch_step_depends_on_policy = EqualsCondition(epoch_step, lr_policy, "step") cs.add_condition(lr_policy_depends_on_solver) cs.add_condition(gamma_depends_on_policy) cs.add_condition(power_depends_on_policy) cs.add_condition(epoch_step_depends_on_policy) return cs
mit
RohitDas/cubeproject
lib/django/utils/ipv6.py
26
7971
# This code was mostly based on ipaddr-py # Copyright 2007 Google Inc. https://github.com/google/ipaddr-py # Licensed under the Apache License, Version 2.0 (the "License"). from django.core.exceptions import ValidationError from django.utils.six.moves import range from django.utils.translation import ugettext_lazy as _ def clean_ipv6_address(ip_str, unpack_ipv4=False, error_message=_("This is not a valid IPv6 address.")): """ Cleans an IPv6 address string. Validity is checked by calling is_valid_ipv6_address() - if an invalid address is passed, ValidationError is raised. Replaces the longest continuous zero-sequence with "::" and removes leading zeroes and makes sure all hextets are lowercase. Args: ip_str: A valid IPv6 address. unpack_ipv4: if an IPv4-mapped address is found, return the plain IPv4 address (default=False). error_message: An error message used in the ValidationError. Returns: A compressed IPv6 address, or the same value """ best_doublecolon_start = -1 best_doublecolon_len = 0 doublecolon_start = -1 doublecolon_len = 0 if not is_valid_ipv6_address(ip_str): raise ValidationError(error_message, code='invalid') # This algorithm can only handle fully exploded # IP strings ip_str = _explode_shorthand_ip_string(ip_str) ip_str = _sanitize_ipv4_mapping(ip_str) # If needed, unpack the IPv4 and return straight away # - no need in running the rest of the algorithm if unpack_ipv4: ipv4_unpacked = _unpack_ipv4(ip_str) if ipv4_unpacked: return ipv4_unpacked hextets = ip_str.split(":") for index in range(len(hextets)): # Remove leading zeroes hextets[index] = hextets[index].lstrip('0') if not hextets[index]: hextets[index] = '0' # Determine best hextet to compress if hextets[index] == '0': doublecolon_len += 1 if doublecolon_start == -1: # Start of a sequence of zeros. doublecolon_start = index if doublecolon_len > best_doublecolon_len: # This is the longest sequence of zeros so far. best_doublecolon_len = doublecolon_len best_doublecolon_start = doublecolon_start else: doublecolon_len = 0 doublecolon_start = -1 # Compress the most suitable hextet if best_doublecolon_len > 1: best_doublecolon_end = (best_doublecolon_start + best_doublecolon_len) # For zeros at the end of the address. if best_doublecolon_end == len(hextets): hextets += [''] hextets[best_doublecolon_start:best_doublecolon_end] = [''] # For zeros at the beginning of the address. if best_doublecolon_start == 0: hextets = [''] + hextets result = ":".join(hextets) return result.lower() def _sanitize_ipv4_mapping(ip_str): """ Sanitize IPv4 mapping in an expanded IPv6 address. This converts ::ffff:0a0a:0a0a to ::ffff:10.10.10.10. If there is nothing to sanitize, returns an unchanged string. Args: ip_str: A string, the expanded IPv6 address. Returns: The sanitized output string, if applicable. """ if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'): # not an ipv4 mapping return ip_str hextets = ip_str.split(':') if '.' in hextets[-1]: # already sanitized return ip_str ipv4_address = "%d.%d.%d.%d" % ( int(hextets[6][0:2], 16), int(hextets[6][2:4], 16), int(hextets[7][0:2], 16), int(hextets[7][2:4], 16), ) result = ':'.join(hextets[0:6]) result += ':' + ipv4_address return result def _unpack_ipv4(ip_str): """ Unpack an IPv4 address that was mapped in a compressed IPv6 address. This converts 0000:0000:0000:0000:0000:ffff:10.10.10.10 to 10.10.10.10. If there is nothing to sanitize, returns None. Args: ip_str: A string, the expanded IPv6 address. Returns: The unpacked IPv4 address, or None if there was nothing to unpack. """ if not ip_str.lower().startswith('0000:0000:0000:0000:0000:ffff:'): return None return ip_str.rsplit(':', 1)[1] def is_valid_ipv6_address(ip_str): """ Ensure we have a valid IPv6 address. Args: ip_str: A string, the IPv6 address. Returns: A boolean, True if this is a valid IPv6 address. """ from django.core.validators import validate_ipv4_address # We need to have at least one ':'. if ':' not in ip_str: return False # We can only have one '::' shortener. if ip_str.count('::') > 1: return False # '::' should be encompassed by start, digits or end. if ':::' in ip_str: return False # A single colon can neither start nor end an address. if ((ip_str.startswith(':') and not ip_str.startswith('::')) or (ip_str.endswith(':') and not ip_str.endswith('::'))): return False # We can never have more than 7 ':' (1::2:3:4:5:6:7:8 is invalid) if ip_str.count(':') > 7: return False # If we have no concatenation, we need to have 8 fields with 7 ':'. if '::' not in ip_str and ip_str.count(':') != 7: # We might have an IPv4 mapped address. if ip_str.count('.') != 3: return False ip_str = _explode_shorthand_ip_string(ip_str) # Now that we have that all squared away, let's check that each of the # hextets are between 0x0 and 0xFFFF. for hextet in ip_str.split(':'): if hextet.count('.') == 3: # If we have an IPv4 mapped address, the IPv4 portion has to # be at the end of the IPv6 portion. if not ip_str.split(':')[-1] == hextet: return False try: validate_ipv4_address(hextet) except ValidationError: return False else: try: # a value error here means that we got a bad hextet, # something like 0xzzzz if int(hextet, 16) < 0x0 or int(hextet, 16) > 0xFFFF: return False except ValueError: return False return True def _explode_shorthand_ip_string(ip_str): """ Expand a shortened IPv6 address. Args: ip_str: A string, the IPv6 address. Returns: A string, the expanded IPv6 address. """ if not _is_shorthand_ip(ip_str): # We've already got a longhand ip_str. return ip_str new_ip = [] hextet = ip_str.split('::') # If there is a ::, we need to expand it with zeroes # to get to 8 hextets - unless there is a dot in the last hextet, # meaning we're doing v4-mapping if '.' in ip_str.split(':')[-1]: fill_to = 7 else: fill_to = 8 if len(hextet) > 1: sep = len(hextet[0].split(':')) + len(hextet[1].split(':')) new_ip = hextet[0].split(':') for __ in range(fill_to - sep): new_ip.append('0000') new_ip += hextet[1].split(':') else: new_ip = ip_str.split(':') # Now need to make sure every hextet is 4 lower case characters. # If a hextet is < 4 characters, we've got missing leading 0's. ret_ip = [] for hextet in new_ip: ret_ip.append(('0' * (4 - len(hextet)) + hextet).lower()) return ':'.join(ret_ip) def _is_shorthand_ip(ip_str): """Determine if the address is shortened. Args: ip_str: A string, the IPv6 address. Returns: A boolean, True if the address is shortened. """ if ip_str.count('::') == 1: return True if any(len(x) < 4 for x in ip_str.split(':')): return True return False
bsd-3-clause
simplyvikram/google-chartwrapper
templating/djangoproj/views.py
9
1670
# -*- coding: utf-8 -*- from django.shortcuts import render_to_response def example(request): greek_elections = [ { 'type': 'Pie3D', 'title': 'Greek Elections 2009', 'data': [43.92, 33.48, 7.54, 5.63, 4.60, 2.53, 2.3], 'labels': 'ΠΑΣΟΚ|ΝΔ|ΚΚΕ|ΛΑΟΣ|ΣΥΡΙΖΑ|Οικολόγοι Πράσινοι|Λοιποί', 'colors': '0ab927|005ac0|ff0000|100077|ffd000|99cc33|888888' }, { 'type': 'Pie3D', 'title': 'Greek Elections 2007', 'data': [41.83, 38.10, 8.15, 5.04, 3.80, 1.05, 2.03], 'labels': 'ΝΔ|ΠΑΣΟΚ|ΚΚΕ|ΣΥΡΙΖΑ|ΛΑΟΣ|Οικολόγοι Πράσινοι|Λοιποί', 'colors': '005ac0|0ab927|ff0000|ffd000|100077|99cc33|888888' }, { 'type': 'Pie3D', 'title': 'Greek Elections 2004', 'data': [45.4, 40.5, 5.9, 3.3, 2.2, 1.8, 0.9], 'labels': 'ΝΔ|ΠΑΣΟΚ|ΚΚΕ|ΣΥΡΙΖΑ|ΛΑΟΣ|ΔΗΚΚΙ|Λοιποί', 'colors': '005ac0|0ab927|ff0000|ffd000|100077|ff7f00|888888' } ] for g in greek_elections: g['legend'] = map(unicode, g['data']) return render_to_response('example.html',{ 'venndata': [100,80,60,30,30,30,10], 'piedata':[60,40], 'bhgdata':['el','or'], '20q': ['Animals','Vegetables','Minerals'], 'qrstr':'''To the human eye QR Codes look like hieroglyphics, but they can be read by any device that has the appropriate software installed.''', 'temps':'max 25°|min 15°', 'elections': greek_elections })
bsd-3-clause
tiagocardosos/stoq
stoqlib/domain/parameter.py
2
2126
# -*- coding: utf-8 -*- # vi:si:et:sw=4:sts=4:ts=4 ## ## Copyright (C) 2006-2007 Async Open Source <http://www.async.com.br> ## All rights reserved ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU Lesser General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU Lesser General Public License for more details. ## ## You should have received a copy of the GNU Lesser General Public License ## along with this program; if not, write to the Free Software ## Foundation, http://www.gnu.org/ ## ## Author(s): Stoq Team <[email protected]> ## ## """ Domain classes for handling parameters """ # pylint: enable=E1101 from stoqlib.database.properties import BoolCol, UnicodeCol from stoqlib.domain.base import Domain from stoqlib.lib.translation import stoqlib_gettext as _ class ParameterData(Domain): """ Class to store system parameters. See also: `schema <http://doc.stoq.com.br/schema/tables/parameter_data.html>`__ """ __storm_table__ = 'parameter_data' #: name of the parameter we want to query on field_name = UnicodeCol() #: current result(or value) of this parameter field_value = UnicodeCol() #: the item can't be edited through an editor. is_editable = BoolCol() def get_group(self): from stoqlib.lib.parameters import sysparam return sysparam.get_detail_by_name(self.field_name).group def get_short_description(self): from stoqlib.lib.parameters import sysparam return sysparam.get_detail_by_name(self.field_name).short_desc def get_field_value(self): # FIXME: This is a workaround to handle some parameters which are # locale specific. if self.field_value: return _(self.field_value) return self.field_value
gpl-2.0
develru/InformationDevicePy
modules/weatherdata.py
1
4214
""" Copyright (C) 2016 Richard Schwalk This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ from enum import Enum, unique from PyQt5.QtCore import Qt, QAbstractListModel, QObject, QVariant, QDateTime @unique class RoleNames(Enum): TempRole = Qt.UserRole DescriptionRole = Qt.UserRole + 1 TimeRole = Qt.UserRole + 2 IconRole = Qt.UserRole + 3 class ForecastDataModel(QAbstractListModel, QObject): """Docstring for ForecastDataModel. """ def __init__(self, parent=None): super(ForecastDataModel, self).__init__(parent) self._role_names = { RoleNames.TempRole.value: b'temp', RoleNames.DescriptionRole.value: b'description', RoleNames.TimeRole.value: b'time', RoleNames.IconRole.value: b'icon', } self._data = [] def rowCount(self, parent=None, *args, **kwargs): return len(self._data) def data(self, QModelIndex, role=None): row = QModelIndex.row() if row < 0 or row >= len(self._data): return QVariant() if role == RoleNames.IconRole.value: return self._data[row].icon elif role == RoleNames.TempRole.value: return ForecastDataModel.format_temp(self._data[row]) elif role == RoleNames.DescriptionRole.value: return self._data[row].description elif role == RoleNames.TimeRole.value: return ForecastDataModel.format_time(self._data[row]) return QVariant() def set_all_data(self, data): self.beginResetModel() self._data.clear() self._data = data self.endResetModel() @staticmethod def format_temp(weather): return '{0} °C / {1} °C'.format(weather.temp_max, weather.temp_min) @staticmethod def format_time(weather): dt = QDateTime.fromTime_t(weather.time) return dt.toString('dddd') def roleNames(self): return self._role_names class BaseWeatherData: def __init__(self): self._description = '' self._icon = '' @property def description(self): return self._description @description.setter def description(self, value): self._description = value @property def icon(self): return self._icon @icon.setter def icon(self, value): self._icon = value class CurrentWeatherData(BaseWeatherData): def __init__(self): super(CurrentWeatherData, self).__init__() self._temperature = 0 self._location = '' @property def temperature(self): return self._temperature @temperature.setter def temperature(self, value): self._temperature = value @property def location_name(self): return self._location @location_name.setter def location_name(self, value): self._location = value class WeatherForecastData(BaseWeatherData): """Docstring for WeatherForecastData. """ def __init__(self): """TODO: to be defined1. """ super(WeatherForecastData, self).__init__() self._temp_min = 0 self._temp_max = 0 self._time = 0 @property def temp_min(self): return self._temp_min @temp_min.setter def temp_min(self, value): self._temp_min = value @property def temp_max(self): return self._temp_max @temp_max.setter def temp_max(self, value): self._temp_max = value @property def time(self): return self._time @time.setter def time(self, value): self._time = value
gpl-3.0
tdubourg/downsizing-game
transactions.py
1
10019
from enum import Enum from utils import i, d Resources = Enum("CASH", "VOTE", "TRUST") class AbstractTransaction(object): """Transaction interface""" last_id = -1 player_1 = None player_2 = None @staticmethod def next_id(): # @TODO thread safety? AbstractTransaction.last_id += 1 return AbstractTransaction.last_id def __init__(self): super(AbstractTransaction, self).__init__() self._id = AbstractTransaction.next_id() def is_valid(self, judge): """ Is the transaction valid? :return (bool, AbstractTransaction) (True, CopyOfTheTransaction) if the transaction is valid (False, None) if the transaction is invalid The copy of the transaction is to be used to avoid the transaction being modified between validation and application This is a non-abstract method """ return self._is_valid(judge) def apply(self, judge): """ Apply the transaction to the players' resources Abstract method. Has to be overridden by children """ raise NotImplementedError() def clone(self): """ Returns a clone of current object. A clone is a 1-to-1 copy of the current object. Abstract method """ raise NotImplementedError() def _is_valid(self, judge): """ Internal use only. Validates in-place the current transaction This is not a private method, but a protected abstract one. It has to be implemented by children, but will be called by parent's is_valid() method unless is_valid() is overridden """ raise NotImplementedError() def __str__(self): return "Transaction, id=" + str(self._id) def get_data(self): return self.__dict__ class UnidirectionalTransaction(AbstractTransaction): """ UnidirectionalTransaction are IMMEDIATE and unidirectional (transfer is done from one player to the other, no payback). """ def __init__(self, player_from, player_to, resource_type, amount): """ :param player_from: int :param player_to: int :param resource_type: Resource :param amount: int """ super(UnidirectionalTransaction, self).__init__() self.player_from = player_from self.player_to = player_to # Just so that we respect the interface: self.player_1 = self.player_from self.player_2 = self.player_to self.resource_type = resource_type try: self.amount = int(amount) except ValueError: self.amount = -1 # So that the transaction is invalid def _is_valid(self, judge): if self.resource_type not in Resources: d("Invalid resource type") return False if self.amount < 0: d("Invalid amount") return False if not judge.is_valid_player(self.player_from) or not judge.is_valid_player(self.player_to): d("Invalid player_from or player_to") return False if not judge.has_enough_resource(self.player_from, self.resource_type, self.amount): d("player_from does not have enough resources to pay.") return False return True def apply(self, judge): i("Transaction", self._id, "is being applied.") if not judge.clock.has_still(1): raise Exception("Not enough remaining rounds") return False players_resources = judge.game.players_resources players_resources[self.player_from][self.resource_type] -= self.amount players_resources[self.player_to][self.resource_type] += self.amount judge.current_player_transactions += 1 judge.clock.tick() def clone(self): return UnidirectionalTransaction( self.player_from, self.player_to, self.resource_type, self.amount ) def __str__(self): return \ super(UnidirectionalTransaction, self).__str__() \ + "\n\t\t\tdirection=Unidirectional" \ + "\n\t\t\tplayer_from=" + str(self.player_from) \ + "\n\t\t\tplayer_to=" + str(self.player_to) \ + "\n\t\t\tresource_type=" + str(self.resource_type) \ + "\n\t\t\tamount=" + str(self.amount) class BidirectionalTransaction(AbstractTransaction): """ BidirectionalTransaction are immediate bidirectional transactions. It models a "trade" where there is a transfer of resources from a player to the other and the other pays this resources using another resource and thus making a transfer as well. """ def __init__(self, player_1, player_2, rtype_1to2, amount_1to2, rtype_2to1, amount_2to1): super(BidirectionalTransaction, self).__init__() self.transaction_1to2 = UnidirectionalTransaction(player_1, player_2, rtype_1to2, amount_1to2) self.transaction_2to1 = UnidirectionalTransaction(player_2, player_1, rtype_2to1, amount_2to1) # To respect the interface self.player_1 = player_1 self.player_2 = player_2 def _is_valid(self, judge): # Note: We already recreated the unidirectional internal transactions so we use the no-copy/in-place # validation method return self.transaction_1to2._is_valid(judge) and self.transaction_2to1._is_valid(judge) def apply(self, judge): if not judge.clock.has_still(1): raise Exception("Not enough remaining rounds") return False self.transaction_1to2.apply(judge) self.transaction_2to1.apply(judge) def clone(self): return BidirectionalTransaction( self.transaction_1to2.player_from, self.transaction_1to2.player_to, self.transaction_1to2.resource_type, self.transaction_1to2.amount, self.transaction_2to1.resource_type, self.transaction_2to1.amount ) def __str__(self): return \ super(BidirectionalTransaction, self).__str__() \ + "\n\t\tdirection=Bidirectional" \ + "\n\t\ttransaction_1to2=" + str(self.transaction_1to2) \ + "\n\t\ttransaction_2to1=" + str(self.transaction_2to1) def get_data(self): data = dict(self.__dict__) data['transaction_1to2'] = self.transaction_1to2.get_data() data['transaction_2to1'] = self.transaction_2to1.get_data() return data class ScheduledUnidirectionalTransaction(UnidirectionalTransaction): """ A ScheduledUnidirectionalTransaction is a scheduled transaction, that is unidirectional... """ def __init__(self, player_from, player_to, resource_type, amount, deadline): self._deadline = deadline super(ScheduledUnidirectionalTransaction, self).__init__(player_from, player_to, resource_type, amount) def is_valid(self, judge): # First, execute parent's checks if not super(ScheduledUnidirectionalTransaction, self).is_valid(judge): return False # If nothing went wrong, execute additional checks # We are going to check that the player can indeed play before the round it specifiedclass ScheduledUnidirectionalTransaction(UnidirectionalTransaction): return judge.is_valid_delay() def clone(self): return ScheduledUnidirectionalTransaction( self.player_from, self.player_to, self.resource_type, self.amount, self._deadline ) def __str__(self): return \ super(ScheduledUnidirectionalTransaction, self).__str__() \ + "\n\t\t\tdeadline=" + str(self._deadline) class ScheduledBidirectionalTransaction(BidirectionalTransaction): """ A ScheduledBidirectionalTransaction is a transaction that contains at least one ScheduledUnidirectionalTransaction """ def __init__(self, player_1, player_2, rtype_1to2, amount_1to2, deadline_1to2, rtype_2to1, amount_2to1, deadline_2to1): if deadline_1to2 is None and deadline_2to1 is None: raise ValueError("At least one of the deadlines should not be None. At least one of the transactions have to be scheduled") super(ScheduledBidirectionalTransaction, self).__init__(player_1, player_2, rtype_1to2, amount_1to2, rtype_2to1, amount_2to1) if deadline_1to2 is not None: self.transaction_1to2 = ScheduledUnidirectionalTransaction(player_1, player_2, rtype_1to2, amount_1to2, deadline_1to2) else: self.transaction_1to2 = UnidirectionalTransaction(player_1, player_2, rtype_1to2, amount_1to2) if deadline_2to1 is not None: self.transaction_2to1 = ScheduledUnidirectionalTransaction(player_2, player_1, rtype_2to1, amount_2to1, deadline_2to1) else: self.transaction_2to1 = UnidirectionalTransaction(player_2, player_1, rtype_2to1, amount_2to1) def is_valid(self, judge): # First, execute parent's checks if not super(ScheduledBidirectionalTransaction, self).is_valid(judge): return False return True def clone(self): return ScheduledBidirectionalTransaction( self.transaction_1to2.player_from, self.transaction_1to2.player_to, self.transaction_1to2.resource_type, self.transaction_1to2.amount, self.transaction_1to2._deadline \ if isinstance(self.transaction_1to2, ScheduledUnidirectionalTransaction) \ else None, self.transaction_2to1.resource_type, self.transaction_2to1.amount, self.transaction_2to1._deadline \ if isinstance(self.transaction_2to1, ScheduledUnidirectionalTransaction) \ else None, )
lgpl-3.0
derekgreene/topic-stability
unsupervised/hungarian.py
5
19460
#!/usr/bin/python """ Implementation of the Hungarian (Munkres) Algorithm using Python and NumPy References: http://www.ams.jhu.edu/~castello/362/Handouts/hungarian.pdf http://weber.ucsd.edu/~vcrawfor/hungar.pdf http://en.wikipedia.org/wiki/Hungarian_algorithm http://www.public.iastate.edu/~ddoty/HungarianAlgorithm.html http://www.clapper.org/software/python/munkres/ """ # Module Information. __version__ = "1.1.1" __author__ = "Thom Dedecko" __url__ = "http://github.com/tdedecko/hungarian-algorithm" __copyright__ = "(c) 2010 Thom Dedecko" __license__ = "MIT License" class HungarianError(Exception): pass # Import numpy. Error if fails try: import numpy as np except ImportError: raise HungarianError("NumPy is not installed.") class Hungarian: """ Implementation of the Hungarian (Munkres) Algorithm using np. Usage: hungarian = Hungarian(cost_matrix) hungarian.calculate() or hungarian = Hungarian() hungarian.calculate(cost_matrix) Handle Profit matrix: hungarian = Hungarian(profit_matrix, is_profit_matrix=True) or cost_matrix = Hungarian.make_cost_matrix(profit_matrix) The matrix will be automatically padded if it is not square. For that numpy's resize function is used, which automatically adds 0's to any row/column that is added Get results and total potential after calculation: hungarian.get_results() hungarian.get_total_potential() """ def __init__(self, input_matrix=None, is_profit_matrix=False): """ input_matrix is a List of Lists. input_matrix is assumed to be a cost matrix unless is_profit_matrix is True. """ if input_matrix is not None: # Save input my_matrix = np.array(input_matrix) self._input_matrix = np.array(input_matrix) self._maxColumn = my_matrix.shape[1] self._maxRow = my_matrix.shape[0] # Adds 0s if any columns/rows are added. Otherwise stays unaltered matrix_size = max(self._maxColumn, self._maxRow) my_matrix.resize(matrix_size, matrix_size) # Convert matrix to profit matrix if necessary if is_profit_matrix: my_matrix = self.make_cost_matrix(my_matrix) self._cost_matrix = my_matrix self._size = len(my_matrix) self._shape = my_matrix.shape # Results from algorithm. self._results = [] self._totalPotential = 0 else: self._cost_matrix = None def get_results(self): """Get results after calculation.""" return self._results def get_total_potential(self): """Returns expected value after calculation.""" return self._totalPotential def calculate(self, input_matrix=None, is_profit_matrix=False): """ Implementation of the Hungarian (Munkres) Algorithm. input_matrix is a List of Lists. input_matrix is assumed to be a cost matrix unless is_profit_matrix is True. """ # Handle invalid and new matrix inputs. if input_matrix is None and self._cost_matrix is None: raise HungarianError("Invalid input") elif input_matrix is not None: self.__init__(input_matrix, is_profit_matrix) result_matrix = self._cost_matrix.copy() # Step 1: Subtract row mins from each row. for index, row in enumerate(result_matrix): result_matrix[index] -= row.min() # Step 2: Subtract column mins from each column. for index, column in enumerate(result_matrix.T): result_matrix[:, index] -= column.min() # Step 3: Use minimum number of lines to cover all zeros in the matrix. # If the total covered rows+columns is not equal to the matrix size then adjust matrix and repeat. total_covered = 0 while total_covered < self._size: # Find minimum number of lines to cover all zeros in the matrix and find total covered rows and columns. cover_zeros = CoverZeros(result_matrix) covered_rows = cover_zeros.get_covered_rows() covered_columns = cover_zeros.get_covered_columns() total_covered = len(covered_rows) + len(covered_columns) # if the total covered rows+columns is not equal to the matrix size then adjust it by min uncovered num (m). if total_covered < self._size: result_matrix = self._adjust_matrix_by_min_uncovered_num(result_matrix, covered_rows, covered_columns) # Step 4: Starting with the top row, work your way downwards as you make assignments. # Find single zeros in rows or columns. # Add them to final result and remove them and their associated row/column from the matrix. expected_results = min(self._maxColumn, self._maxRow) zero_locations = (result_matrix == 0) while len(self._results) != expected_results: # If number of zeros in the matrix is zero before finding all the results then an error has occurred. if not zero_locations.any(): raise HungarianError("Unable to find results. Algorithm has failed.") # Find results and mark rows and columns for deletion matched_rows, matched_columns = self.__find_matches(zero_locations) # Make arbitrary selection total_matched = len(matched_rows) + len(matched_columns) if total_matched == 0: matched_rows, matched_columns = self.select_arbitrary_match(zero_locations) # Delete rows and columns for row in matched_rows: zero_locations[row] = False for column in matched_columns: zero_locations[:, column] = False # Save Results self.__set_results(zip(matched_rows, matched_columns)) # Calculate total potential value = 0 for row, column in self._results: value += self._input_matrix[row, column] self._totalPotential = value @staticmethod def make_cost_matrix(profit_matrix): """ Converts a profit matrix into a cost matrix. Expects NumPy objects as input. """ # subtract profit matrix from a matrix made of the max value of the profit matrix matrix_shape = profit_matrix.shape offset_matrix = np.ones(matrix_shape) * profit_matrix.max() cost_matrix = offset_matrix - profit_matrix return cost_matrix def _adjust_matrix_by_min_uncovered_num(self, result_matrix, covered_rows, covered_columns): """Subtract m from every uncovered number and add m to every element covered with two lines.""" # Calculate minimum uncovered number (m) elements = [] for row_index, row in enumerate(result_matrix): if row_index not in covered_rows: for index, element in enumerate(row): if index not in covered_columns: elements.append(element) min_uncovered_num = min(elements) # Add m to every covered element adjusted_matrix = result_matrix for row in covered_rows: adjusted_matrix[row] += min_uncovered_num for column in covered_columns: adjusted_matrix[:, column] += min_uncovered_num # Subtract m from every element m_matrix = np.ones(self._shape) * min_uncovered_num adjusted_matrix -= m_matrix return adjusted_matrix def __find_matches(self, zero_locations): """Returns rows and columns with matches in them.""" marked_rows = np.array([], dtype=int) marked_columns = np.array([], dtype=int) # Mark rows and columns with matches # Iterate over rows for index, row in enumerate(zero_locations): row_index = np.array([index]) if np.sum(row) == 1: column_index, = np.where(row) marked_rows, marked_columns = self.__mark_rows_and_columns(marked_rows, marked_columns, row_index, column_index) # Iterate over columns for index, column in enumerate(zero_locations.T): column_index = np.array([index]) if np.sum(column) == 1: row_index, = np.where(column) marked_rows, marked_columns = self.__mark_rows_and_columns(marked_rows, marked_columns, row_index, column_index) return marked_rows, marked_columns @staticmethod def __mark_rows_and_columns(marked_rows, marked_columns, row_index, column_index): """Check if column or row is marked. If not marked then mark it.""" new_marked_rows = marked_rows new_marked_columns = marked_columns if not (marked_rows == row_index).any() and not (marked_columns == column_index).any(): new_marked_rows = np.insert(marked_rows, len(marked_rows), row_index) new_marked_columns = np.insert(marked_columns, len(marked_columns), column_index) return new_marked_rows, new_marked_columns @staticmethod def select_arbitrary_match(zero_locations): """Selects row column combination with minimum number of zeros in it.""" # Count number of zeros in row and column combinations rows, columns = np.where(zero_locations) zero_count = [] for index, row in enumerate(rows): total_zeros = np.sum(zero_locations[row]) + np.sum(zero_locations[:, columns[index]]) zero_count.append(total_zeros) # Get the row column combination with the minimum number of zeros. indices = zero_count.index(min(zero_count)) row = np.array([rows[indices]]) column = np.array([columns[indices]]) return row, column def __set_results(self, result_lists): """Set results during calculation.""" # Check if results values are out of bound from input matrix (because of matrix being padded). # Add results to results list. for result in result_lists: row, column = result if row < self._maxRow and column < self._maxColumn: new_result = (int(row), int(column)) self._results.append(new_result) class CoverZeros: """ Use minimum number of lines to cover all zeros in the matrix. Algorithm based on: http://weber.ucsd.edu/~vcrawfor/hungar.pdf """ def __init__(self, matrix): """ Input a matrix and save it as a boolean matrix to designate zero locations. Run calculation procedure to generate results. """ # Find zeros in matrix self._zero_locations = (matrix == 0) self._shape = matrix.shape # Choices starts without any choices made. self._choices = np.zeros(self._shape, dtype=bool) self._marked_rows = [] self._marked_columns = [] # marks rows and columns self.__calculate() # Draw lines through all unmarked rows and all marked columns. self._covered_rows = list(set(range(self._shape[0])) - set(self._marked_rows)) self._covered_columns = self._marked_columns def get_covered_rows(self): """Return list of covered rows.""" return self._covered_rows def get_covered_columns(self): """Return list of covered columns.""" return self._covered_columns def __calculate(self): """ Calculates minimum number of lines necessary to cover all zeros in a matrix. Algorithm based on: http://weber.ucsd.edu/~vcrawfor/hungar.pdf """ while True: # Erase all marks. self._marked_rows = [] self._marked_columns = [] # Mark all rows in which no choice has been made. for index, row in enumerate(self._choices): if not row.any(): self._marked_rows.append(index) # If no marked rows then finish. if not self._marked_rows: return True # Mark all columns not already marked which have zeros in marked rows. num_marked_columns = self.__mark_new_columns_with_zeros_in_marked_rows() # If no new marked columns then finish. if num_marked_columns == 0: return True # While there is some choice in every marked column. while self.__choice_in_all_marked_columns(): # Some Choice in every marked column. # Mark all rows not already marked which have choices in marked columns. num_marked_rows = self.__mark_new_rows_with_choices_in_marked_columns() # If no new marks then Finish. if num_marked_rows == 0: return True # Mark all columns not already marked which have zeros in marked rows. num_marked_columns = self.__mark_new_columns_with_zeros_in_marked_rows() # If no new marked columns then finish. if num_marked_columns == 0: return True # No choice in one or more marked columns. # Find a marked column that does not have a choice. choice_column_index = self.__find_marked_column_without_choice() while choice_column_index is not None: # Find a zero in the column indexed that does not have a row with a choice. choice_row_index = self.__find_row_without_choice(choice_column_index) # Check if an available row was found. new_choice_column_index = None if choice_row_index is None: # Find a good row to accomodate swap. Find its column pair. choice_row_index, new_choice_column_index = \ self.__find_best_choice_row_and_new_column(choice_column_index) # Delete old choice. self._choices[choice_row_index, new_choice_column_index] = False # Set zero to choice. self._choices[choice_row_index, choice_column_index] = True # Loop again if choice is added to a row with a choice already in it. choice_column_index = new_choice_column_index def __mark_new_columns_with_zeros_in_marked_rows(self): """Mark all columns not already marked which have zeros in marked rows.""" num_marked_columns = 0 for index, column in enumerate(self._zero_locations.T): if index not in self._marked_columns: if column.any(): row_indices, = np.where(column) zeros_in_marked_rows = (set(self._marked_rows) & set(row_indices)) != set([]) if zeros_in_marked_rows: self._marked_columns.append(index) num_marked_columns += 1 return num_marked_columns def __mark_new_rows_with_choices_in_marked_columns(self): """Mark all rows not already marked which have choices in marked columns.""" num_marked_rows = 0 for index, row in enumerate(self._choices): if index not in self._marked_rows: if row.any(): column_index, = np.where(row) if column_index in self._marked_columns: self._marked_rows.append(index) num_marked_rows += 1 return num_marked_rows def __choice_in_all_marked_columns(self): """Return Boolean True if there is a choice in all marked columns. Returns boolean False otherwise.""" for column_index in self._marked_columns: if not self._choices[:, column_index].any(): return False return True def __find_marked_column_without_choice(self): """Find a marked column that does not have a choice.""" for column_index in self._marked_columns: if not self._choices[:, column_index].any(): return column_index raise HungarianError( "Could not find a column without a choice. Failed to cover matrix zeros. Algorithm has failed.") def __find_row_without_choice(self, choice_column_index): """Find a row without a choice in it for the column indexed. If a row does not exist then return None.""" row_indices, = np.where(self._zero_locations[:, choice_column_index]) for row_index in row_indices: if not self._choices[row_index].any(): return row_index # All rows have choices. Return None. return None def __find_best_choice_row_and_new_column(self, choice_column_index): """ Find a row index to use for the choice so that the column that needs to be changed is optimal. Return a random row and column if unable to find an optimal selection. """ row_indices, = np.where(self._zero_locations[:, choice_column_index]) for row_index in row_indices: column_indices, = np.where(self._choices[row_index]) column_index = column_indices[0] if self.__find_row_without_choice(column_index) is not None: return row_index, column_index # Cannot find optimal row and column. Return a random row and column. from random import shuffle shuffle(row_indices) column_index, = np.where(self._choices[row_indices[0]]) return row_indices[0], column_index[0] if __name__ == '__main__': profit_matrix = [ [62, 75, 80, 93, 95, 97], [75, 80, 82, 85, 71, 97], [80, 75, 81, 98, 90, 97], [78, 82, 84, 80, 50, 98], [90, 85, 85, 80, 85, 99], [65, 75, 80, 75, 68, 96]] hungarian = Hungarian(profit_matrix, is_profit_matrix=True) hungarian.calculate() print("Expected value:\t\t543") print("Calculated value:\t", hungarian.get_total_potential()) # = 543 print("Expected results:\n\t[(0, 4), (2, 3), (5, 5), (4, 0), (1, 1), (3, 2)]") print("Results:\n\t", hungarian.get_results()) print("-" * 80) cost_matrix = [ [4, 2, 8], [4, 3, 7], [3, 1, 6]] hungarian = Hungarian(cost_matrix) print('calculating...') hungarian.calculate() print("Expected value:\t\t12") print("Calculated value:\t", hungarian.get_total_potential()) # = 12 print("Expected results:\n\t[(0, 1), (1, 0), (2, 2)]") print("Results:\n\t", hungarian.get_results()) print("-" * 80) profit_matrix = [ [62, 75, 80, 93, 0, 97], [75, 0, 82, 85, 71, 97], [80, 75, 81, 0, 90, 97], [78, 82, 0, 80, 50, 98], [0, 85, 85, 80, 85, 99], [65, 75, 80, 75, 68, 0]] hungarian = Hungarian() hungarian.calculate(profit_matrix, is_profit_matrix=True) print("Expected value:\t\t523") print("Calculated value:\t", hungarian.get_total_potential()) # = 523 print("Expected results:\n\t[(0, 3), (2, 4), (3, 0), (5, 2), (1, 5), (4, 1)]") print("Results:\n\t", hungarian.get_results()) print("-" * 80)
apache-2.0
openai/baselines
baselines/gail/dataset/mujoco_dset.py
1
4448
''' Data structure of the input .npz: the data is save in python dictionary format with keys: 'acs', 'ep_rets', 'rews', 'obs' the values of each item is a list storing the expert trajectory sequentially a transition can be: (data['obs'][t], data['acs'][t], data['obs'][t+1]) and get reward data['rews'][t] ''' from baselines import logger import numpy as np class Dset(object): def __init__(self, inputs, labels, randomize): self.inputs = inputs self.labels = labels assert len(self.inputs) == len(self.labels) self.randomize = randomize self.num_pairs = len(inputs) self.init_pointer() def init_pointer(self): self.pointer = 0 if self.randomize: idx = np.arange(self.num_pairs) np.random.shuffle(idx) self.inputs = self.inputs[idx, :] self.labels = self.labels[idx, :] def get_next_batch(self, batch_size): # if batch_size is negative -> return all if batch_size < 0: return self.inputs, self.labels if self.pointer + batch_size >= self.num_pairs: self.init_pointer() end = self.pointer + batch_size inputs = self.inputs[self.pointer:end, :] labels = self.labels[self.pointer:end, :] self.pointer = end return inputs, labels class Mujoco_Dset(object): def __init__(self, expert_path, train_fraction=0.7, traj_limitation=-1, randomize=True): traj_data = np.load(expert_path) if traj_limitation < 0: traj_limitation = len(traj_data['obs']) obs = traj_data['obs'][:traj_limitation] acs = traj_data['acs'][:traj_limitation] # obs, acs: shape (N, L, ) + S where N = # episodes, L = episode length # and S is the environment observation/action space. # Flatten to (N * L, prod(S)) if len(obs.shape) > 2: self.obs = np.reshape(obs, [-1, np.prod(obs.shape[2:])]) self.acs = np.reshape(acs, [-1, np.prod(acs.shape[2:])]) else: self.obs = np.vstack(obs) self.acs = np.vstack(acs) self.rets = traj_data['ep_rets'][:traj_limitation] self.avg_ret = sum(self.rets)/len(self.rets) self.std_ret = np.std(np.array(self.rets)) if len(self.acs) > 2: self.acs = np.squeeze(self.acs) assert len(self.obs) == len(self.acs) self.num_traj = min(traj_limitation, len(traj_data['obs'])) self.num_transition = len(self.obs) self.randomize = randomize self.dset = Dset(self.obs, self.acs, self.randomize) # for behavior cloning self.train_set = Dset(self.obs[:int(self.num_transition*train_fraction), :], self.acs[:int(self.num_transition*train_fraction), :], self.randomize) self.val_set = Dset(self.obs[int(self.num_transition*train_fraction):, :], self.acs[int(self.num_transition*train_fraction):, :], self.randomize) self.log_info() def log_info(self): logger.log("Total trajectories: %d" % self.num_traj) logger.log("Total transitions: %d" % self.num_transition) logger.log("Average returns: %f" % self.avg_ret) logger.log("Std for returns: %f" % self.std_ret) def get_next_batch(self, batch_size, split=None): if split is None: return self.dset.get_next_batch(batch_size) elif split == 'train': return self.train_set.get_next_batch(batch_size) elif split == 'val': return self.val_set.get_next_batch(batch_size) else: raise NotImplementedError def plot(self): import matplotlib.pyplot as plt plt.hist(self.rets) plt.savefig("histogram_rets.png") plt.close() def test(expert_path, traj_limitation, plot): dset = Mujoco_Dset(expert_path, traj_limitation=traj_limitation) if plot: dset.plot() if __name__ == '__main__': import argparse parser = argparse.ArgumentParser() parser.add_argument("--expert_path", type=str, default="../data/deterministic.trpo.Hopper.0.00.npz") parser.add_argument("--traj_limitation", type=int, default=None) parser.add_argument("--plot", type=bool, default=False) args = parser.parse_args() test(args.expert_path, args.traj_limitation, args.plot)
mit
rhelmer/socorro-lib
socorro/lib/context_tools.py
10
1083
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import os from contextlib import contextmanager from socorro.lib.util import FakeLogger #-------------------------------------------------------------------------- @contextmanager def temp_file_context(raw_dump_path, logger=None): """this contextmanager implements conditionally deleting a pathname at the end of a context if the pathname indicates that it is a temp file by having the word 'TEMPORARY' embedded in it.""" try: yield raw_dump_path finally: if 'TEMPORARY' in raw_dump_path: try: os.unlink(raw_dump_path) except OSError: if logger is None: logger = FakeLogger() logger.warning( 'unable to delete %s. manual deletion is required.', raw_dump_path, exc_info=True )
mpl-2.0
gbbr/superdesk
server/app.py
8
1307
#!/usr/bin/env python # -*- coding: utf-8; -*- # # This file is part of Superdesk. # # Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors. # # For the full copyright and license information, please see the # AUTHORS and LICENSE files distributed with this source code, or # at https://www.sourcefabric.org/superdesk/license import os import settings from superdesk.factory import get_app as superdesk_app if os.environ.get('NEW_RELIC_LICENSE_KEY'): try: import newrelic.agent newrelic.agent.initialize(os.path.abspath(os.path.join(os.path.dirname(__file__), 'newrelic.ini'))) except ImportError: pass def get_app(config=None): """App factory. :param config: configuration that can override config from `settings.py` :return: a new SuperdeskEve app instance """ if config is None: config = {} config['APP_ABSPATH'] = os.path.abspath(os.path.dirname(__file__)) for key in dir(settings): if key.isupper(): config.setdefault(key, getattr(settings, key)) app = superdesk_app(config) return app if __name__ == '__main__': debug = True host = '0.0.0.0' port = int(os.environ.get('PORT', '5000')) app = get_app() app.run(host=host, port=port, debug=debug, use_reloader=debug)
agpl-3.0
murali-munna/scikit-learn
sklearn/learning_curve.py
110
13467
"""Utilities to evaluate models with respect to a variable """ # Author: Alexander Fabisch <[email protected]> # # License: BSD 3 clause import warnings import numpy as np from .base import is_classifier, clone from .cross_validation import check_cv from .externals.joblib import Parallel, delayed from .cross_validation import _safe_split, _score, _fit_and_score from .metrics.scorer import check_scoring from .utils import indexable from .utils.fixes import astype __all__ = ['learning_curve', 'validation_curve'] def learning_curve(estimator, X, y, train_sizes=np.linspace(0.1, 1.0, 5), cv=None, scoring=None, exploit_incremental_learning=False, n_jobs=1, pre_dispatch="all", verbose=0): """Learning curve. Determines cross-validated training and test scores for different training set sizes. A cross-validation generator splits the whole dataset k times in training and test data. Subsets of the training set with varying sizes will be used to train the estimator and a score for each training subset size and the test set will be computed. Afterwards, the scores will be averaged over all k runs for each training subset size. Read more in the :ref:`User Guide <learning_curves>`. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples) or (n_samples, n_features), optional Target relative to X for classification or regression; None for unsupervised learning. train_sizes : array-like, shape (n_ticks,), dtype float or int Relative or absolute numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of the maximum size of the training set (that is determined by the selected validation method), i.e. it has to be within (0, 1]. Otherwise it is interpreted as absolute sizes of the training sets. Note that for classification the number of samples usually have to be big enough to contain at least one sample from each class. (default: np.linspace(0.1, 1.0, 5)) cv : integer, cross-validation generator, optional If an integer is passed, it is the number of folds (defaults to 3). Specific cross-validation objects can be passed, see sklearn.cross_validation module for the list of possible objects scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. exploit_incremental_learning : boolean, optional, default: False If the estimator supports incremental learning, this will be used to speed up fitting for different training set sizes. n_jobs : integer, optional Number of jobs to run in parallel (default 1). pre_dispatch : integer or string, optional Number of predispatched jobs for parallel execution (default is all). The option can reduce the allocated memory. The string can be an expression like '2*n_jobs'. verbose : integer, optional Controls the verbosity: the higher, the more messages. Returns ------- train_sizes_abs : array, shape = (n_unique_ticks,), dtype int Numbers of training examples that has been used to generate the learning curve. Note that the number of ticks might be less than n_ticks because duplicate entries will be removed. train_scores : array, shape (n_ticks, n_cv_folds) Scores on training sets. test_scores : array, shape (n_ticks, n_cv_folds) Scores on test set. Notes ----- See :ref:`examples/model_selection/plot_learning_curve.py <example_model_selection_plot_learning_curve.py>` """ if exploit_incremental_learning and not hasattr(estimator, "partial_fit"): raise ValueError("An estimator must support the partial_fit interface " "to exploit incremental learning") X, y = indexable(X, y) # Make a list since we will be iterating multiple times over the folds cv = list(check_cv(cv, X, y, classifier=is_classifier(estimator))) scorer = check_scoring(estimator, scoring=scoring) # HACK as long as boolean indices are allowed in cv generators if cv[0][0].dtype == bool: new_cv = [] for i in range(len(cv)): new_cv.append((np.nonzero(cv[i][0])[0], np.nonzero(cv[i][1])[0])) cv = new_cv n_max_training_samples = len(cv[0][0]) # Because the lengths of folds can be significantly different, it is # not guaranteed that we use all of the available training data when we # use the first 'n_max_training_samples' samples. train_sizes_abs = _translate_train_sizes(train_sizes, n_max_training_samples) n_unique_ticks = train_sizes_abs.shape[0] if verbose > 0: print("[learning_curve] Training set sizes: " + str(train_sizes_abs)) parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) if exploit_incremental_learning: classes = np.unique(y) if is_classifier(estimator) else None out = parallel(delayed(_incremental_fit_estimator)( clone(estimator), X, y, classes, train, test, train_sizes_abs, scorer, verbose) for train, test in cv) else: out = parallel(delayed(_fit_and_score)( clone(estimator), X, y, scorer, train[:n_train_samples], test, verbose, parameters=None, fit_params=None, return_train_score=True) for train, test in cv for n_train_samples in train_sizes_abs) out = np.array(out)[:, :2] n_cv_folds = out.shape[0] // n_unique_ticks out = out.reshape(n_cv_folds, n_unique_ticks, 2) out = np.asarray(out).transpose((2, 1, 0)) return train_sizes_abs, out[0], out[1] def _translate_train_sizes(train_sizes, n_max_training_samples): """Determine absolute sizes of training subsets and validate 'train_sizes'. Examples: _translate_train_sizes([0.5, 1.0], 10) -> [5, 10] _translate_train_sizes([5, 10], 10) -> [5, 10] Parameters ---------- train_sizes : array-like, shape (n_ticks,), dtype float or int Numbers of training examples that will be used to generate the learning curve. If the dtype is float, it is regarded as a fraction of 'n_max_training_samples', i.e. it has to be within (0, 1]. n_max_training_samples : int Maximum number of training samples (upper bound of 'train_sizes'). Returns ------- train_sizes_abs : array, shape (n_unique_ticks,), dtype int Numbers of training examples that will be used to generate the learning curve. Note that the number of ticks might be less than n_ticks because duplicate entries will be removed. """ train_sizes_abs = np.asarray(train_sizes) n_ticks = train_sizes_abs.shape[0] n_min_required_samples = np.min(train_sizes_abs) n_max_required_samples = np.max(train_sizes_abs) if np.issubdtype(train_sizes_abs.dtype, np.float): if n_min_required_samples <= 0.0 or n_max_required_samples > 1.0: raise ValueError("train_sizes has been interpreted as fractions " "of the maximum number of training samples and " "must be within (0, 1], but is within [%f, %f]." % (n_min_required_samples, n_max_required_samples)) train_sizes_abs = astype(train_sizes_abs * n_max_training_samples, dtype=np.int, copy=False) train_sizes_abs = np.clip(train_sizes_abs, 1, n_max_training_samples) else: if (n_min_required_samples <= 0 or n_max_required_samples > n_max_training_samples): raise ValueError("train_sizes has been interpreted as absolute " "numbers of training samples and must be within " "(0, %d], but is within [%d, %d]." % (n_max_training_samples, n_min_required_samples, n_max_required_samples)) train_sizes_abs = np.unique(train_sizes_abs) if n_ticks > train_sizes_abs.shape[0]: warnings.warn("Removed duplicate entries from 'train_sizes'. Number " "of ticks will be less than than the size of " "'train_sizes' %d instead of %d)." % (train_sizes_abs.shape[0], n_ticks), RuntimeWarning) return train_sizes_abs def _incremental_fit_estimator(estimator, X, y, classes, train, test, train_sizes, scorer, verbose): """Train estimator on training subsets incrementally and compute scores.""" train_scores, test_scores = [], [] partitions = zip(train_sizes, np.split(train, train_sizes)[:-1]) for n_train_samples, partial_train in partitions: train_subset = train[:n_train_samples] X_train, y_train = _safe_split(estimator, X, y, train_subset) X_partial_train, y_partial_train = _safe_split(estimator, X, y, partial_train) X_test, y_test = _safe_split(estimator, X, y, test, train_subset) if y_partial_train is None: estimator.partial_fit(X_partial_train, classes=classes) else: estimator.partial_fit(X_partial_train, y_partial_train, classes=classes) train_scores.append(_score(estimator, X_train, y_train, scorer)) test_scores.append(_score(estimator, X_test, y_test, scorer)) return np.array((train_scores, test_scores)).T def validation_curve(estimator, X, y, param_name, param_range, cv=None, scoring=None, n_jobs=1, pre_dispatch="all", verbose=0): """Validation curve. Determine training and test scores for varying parameter values. Compute scores for an estimator with different values of a specified parameter. This is similar to grid search with one parameter. However, this will also compute training scores and is merely a utility for plotting the results. Read more in the :ref:`User Guide <validation_curve>`. Parameters ---------- estimator : object type that implements the "fit" and "predict" methods An object of that type which is cloned for each validation. X : array-like, shape (n_samples, n_features) Training vector, where n_samples is the number of samples and n_features is the number of features. y : array-like, shape (n_samples) or (n_samples, n_features), optional Target relative to X for classification or regression; None for unsupervised learning. param_name : string Name of the parameter that will be varied. param_range : array-like, shape (n_values,) The values of the parameter that will be evaluated. cv : integer, cross-validation generator, optional If an integer is passed, it is the number of folds (defaults to 3). Specific cross-validation objects can be passed, see sklearn.cross_validation module for the list of possible objects scoring : string, callable or None, optional, default: None A string (see model evaluation documentation) or a scorer callable object / function with signature ``scorer(estimator, X, y)``. n_jobs : integer, optional Number of jobs to run in parallel (default 1). pre_dispatch : integer or string, optional Number of predispatched jobs for parallel execution (default is all). The option can reduce the allocated memory. The string can be an expression like '2*n_jobs'. verbose : integer, optional Controls the verbosity: the higher, the more messages. Returns ------- train_scores : array, shape (n_ticks, n_cv_folds) Scores on training sets. test_scores : array, shape (n_ticks, n_cv_folds) Scores on test set. Notes ----- See :ref:`examples/model_selection/plot_validation_curve.py <example_model_selection_plot_validation_curve.py>` """ X, y = indexable(X, y) cv = check_cv(cv, X, y, classifier=is_classifier(estimator)) scorer = check_scoring(estimator, scoring=scoring) parallel = Parallel(n_jobs=n_jobs, pre_dispatch=pre_dispatch, verbose=verbose) out = parallel(delayed(_fit_and_score)( estimator, X, y, scorer, train, test, verbose, parameters={param_name: v}, fit_params=None, return_train_score=True) for train, test in cv for v in param_range) out = np.asarray(out)[:, :2] n_params = len(param_range) n_cv_folds = out.shape[0] // n_params out = out.reshape(n_cv_folds, n_params, 2).transpose((2, 1, 0)) return out[0], out[1]
bsd-3-clause
saisai/phantomjs
src/qt/qtbase/util/local_database/enumdata.py
102
38654
#!/usr/bin/env python ############################################################################# ## ## Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies). ## Contact: http://www.qt-project.org/legal ## ## This file is part of the test suite of the Qt Toolkit. ## ## $QT_BEGIN_LICENSE:LGPL$ ## Commercial License Usage ## Licensees holding valid commercial Qt licenses may use this file in ## accordance with the commercial license agreement provided with the ## Software or, alternatively, in accordance with the terms contained in ## a written agreement between you and Digia. For licensing terms and ## conditions see http://qt.digia.com/licensing. For further information ## use the contact form at http://qt.digia.com/contact-us. ## ## GNU Lesser General Public License Usage ## Alternatively, this file may be used under the terms of the GNU Lesser ## General Public License version 2.1 as published by the Free Software ## Foundation and appearing in the file LICENSE.LGPL included in the ## packaging of this file. Please review the following information to ## ensure the GNU Lesser General Public License version 2.1 requirements ## will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html. ## ## In addition, as a special exception, Digia gives you certain additional ## rights. These rights are described in the Digia Qt LGPL Exception ## version 1.1, included in the file LGPL_EXCEPTION.txt in this package. ## ## GNU General Public License Usage ## Alternatively, this file may be used under the terms of the GNU ## General Public License version 3.0 as published by the Free Software ## Foundation and appearing in the file LICENSE.GPL included in the ## packaging of this file. Please review the following information to ## ensure the GNU General Public License version 3.0 requirements will be ## met: http://www.gnu.org/copyleft/gpl.html. ## ## ## $QT_END_LICENSE$ ## ############################################################################# # language_list and country_list reflect the current values of enums in qlocale.h # If new xml language files are available in CLDR, these languages and countries # need to be *appended* to this list. language_list = { 0 : [ "AnyLanguage", " " ], 1 : [ "C", " " ], 2 : [ "Abkhazian", "ab" ], 3 : [ "Oromo", "om" ], # macrolanguage 4 : [ "Afar", "aa" ], 5 : [ "Afrikaans", "af" ], 6 : [ "Albanian", "sq" ], # macrolanguage 7 : [ "Amharic", "am" ], 8 : [ "Arabic", "ar" ], # macrolanguage 9 : [ "Armenian", "hy" ], 10 : [ "Assamese", "as" ], 11 : [ "Aymara", "ay" ], # macrolanguage 12 : [ "Azerbaijani", "az" ], # macrolanguage 13 : [ "Bashkir", "ba" ], 14 : [ "Basque", "eu" ], 15 : [ "Bengali", "bn" ], 16 : [ "Dzongkha", "dz" ], 17 : [ "Bihari", "bh" ], 18 : [ "Bislama", "bi" ], 19 : [ "Breton", "br" ], 20 : [ "Bulgarian", "bg" ], 21 : [ "Burmese", "my" ], 22 : [ "Belarusian", "be" ], 23 : [ "Khmer", "km" ], 24 : [ "Catalan", "ca" ], 25 : [ "Chinese", "zh" ], # macrolanguage 26 : [ "Corsican", "co" ], 27 : [ "Croatian", "hr" ], 28 : [ "Czech", "cs" ], 29 : [ "Danish", "da" ], 30 : [ "Dutch", "nl" ], 31 : [ "English", "en" ], 32 : [ "Esperanto", "eo" ], 33 : [ "Estonian", "et" ], # macrolanguage 34 : [ "Faroese", "fo" ], 35 : [ "Fijian", "fj" ], 36 : [ "Finnish", "fi" ], 37 : [ "French", "fr" ], 38 : [ "Western Frisian", "fy" ], 39 : [ "Gaelic", "gd" ], 40 : [ "Galician", "gl" ], 41 : [ "Georgian", "ka" ], 42 : [ "German", "de" ], 43 : [ "Greek", "el" ], 44 : [ "Greenlandic", "kl" ], 45 : [ "Guarani", "gn" ], # macrolanguage 46 : [ "Gujarati", "gu" ], 47 : [ "Hausa", "ha" ], 48 : [ "Hebrew", "he" ], 49 : [ "Hindi", "hi" ], 50 : [ "Hungarian", "hu" ], 51 : [ "Icelandic", "is" ], 52 : [ "Indonesian", "id" ], 53 : [ "Interlingua", "ia" ], 54 : [ "Interlingue", "ie" ], 55 : [ "Inuktitut", "iu" ], # macrolanguage 56 : [ "Inupiak", "ik" ], # macrolanguage 57 : [ "Irish", "ga" ], 58 : [ "Italian", "it" ], 59 : [ "Japanese", "ja" ], 60 : [ "Javanese", "jv" ], 61 : [ "Kannada", "kn" ], 62 : [ "Kashmiri", "ks" ], 63 : [ "Kazakh", "kk" ], 64 : [ "Kinyarwanda", "rw" ], 65 : [ "Kirghiz", "ky" ], 66 : [ "Korean", "ko" ], 67 : [ "Kurdish", "ku" ], # macrolanguage 68 : [ "Rundi", "rn" ], 69 : [ "Lao", "lo" ], 70 : [ "Latin", "la" ], 71 : [ "Latvian", "lv" ], # macrolanguage 72 : [ "Lingala", "ln" ], 73 : [ "Lithuanian", "lt" ], 74 : [ "Macedonian", "mk" ], 75 : [ "Malagasy", "mg" ], # macrolanguage 76 : [ "Malay", "ms" ], # macrolanguage 77 : [ "Malayalam", "ml" ], 78 : [ "Maltese", "mt" ], 79 : [ "Maori", "mi" ], 80 : [ "Marathi", "mr" ], 81 : [ "Marshallese", "mh" ], 82 : [ "Mongolian", "mn" ], # macrolanguage 83 : [ "Nauru", "na" ], 84 : [ "Nepali", "ne" ], # macrolanguage 85 : [ "NorwegianBokmal", "nb" ], 86 : [ "Occitan", "oc" ], 87 : [ "Oriya", "or" ], # macrolanguage 88 : [ "Pashto", "ps" ], # macrolanguage 89 : [ "Persian", "fa" ], # macrolanguage 90 : [ "Polish", "pl" ], 91 : [ "Portuguese", "pt" ], 92 : [ "Punjabi", "pa" ], 93 : [ "Quechua", "qu" ], # macrolanguage 94 : [ "Romansh", "rm" ], 95 : [ "Romanian", "ro" ], 96 : [ "Russian", "ru" ], 97 : [ "Samoan", "sm" ], 98 : [ "Sango", "sg" ], 99 : [ "Sanskrit", "sa" ], 100 : [ "Serbian", "sr" ], 101 : [ "Ossetic", "os" ], 102 : [ "Southern Sotho", "st" ], 103 : [ "Tswana", "tn" ], 104 : [ "Shona", "sn" ], 105 : [ "Sindhi", "sd" ], 106 : [ "Sinhala", "si" ], 107 : [ "Swati", "ss" ], 108 : [ "Slovak", "sk" ], 109 : [ "Slovenian", "sl" ], 110 : [ "Somali", "so" ], 111 : [ "Spanish", "es" ], 112 : [ "Sundanese", "su" ], 113 : [ "Swahili", "sw" ], # macrolanguage 114 : [ "Swedish", "sv" ], 115 : [ "Sardinian", "sc" ], # macrolanguage 116 : [ "Tajik", "tg" ], 117 : [ "Tamil", "ta" ], 118 : [ "Tatar", "tt" ], 119 : [ "Telugu", "te" ], 120 : [ "Thai", "th" ], 121 : [ "Tibetan", "bo" ], 122 : [ "Tigrinya", "ti" ], 123 : [ "Tongan", "to" ], 124 : [ "Tsonga", "ts" ], 125 : [ "Turkish", "tr" ], 126 : [ "Turkmen", "tk" ], 127 : [ "Tahitian", "ty" ], 128 : [ "Uighur", "ug" ], 129 : [ "Ukrainian", "uk" ], 130 : [ "Urdu", "ur" ], 131 : [ "Uzbek", "uz" ], # macrolanguage 132 : [ "Vietnamese", "vi" ], 133 : [ "Volapuk", "vo" ], 134 : [ "Welsh", "cy" ], 135 : [ "Wolof", "wo" ], 136 : [ "Xhosa", "xh" ], 137 : [ "Yiddish", "yi" ], # macrolanguage 138 : [ "Yoruba", "yo" ], 139 : [ "Zhuang", "za" ], # macrolanguage 140 : [ "Zulu", "zu" ], 141 : [ "NorwegianNynorsk", "nn" ], 142 : [ "Bosnian", "bs" ], 143 : [ "Divehi", "dv" ], 144 : [ "Manx", "gv" ], 145 : [ "Cornish", "kw" ], 146 : [ "Akan", "ak" ], # macrolanguage 147 : [ "Konkani", "kok" ], 148 : [ "Ga", "gaa" ], 149 : [ "Igbo", "ig" ], 150 : [ "Kamba", "kam" ], 151 : [ "Syriac", "syr" ], 152 : [ "Blin", "byn" ], 153 : [ "Geez", "gez" ], 154 : [ "Koro", "kfo" ], 155 : [ "Sidamo", "sid" ], 156 : [ "Atsam", "cch" ], 157 : [ "Tigre", "tig" ], 158 : [ "Jju", "kaj" ], 159 : [ "Friulian", "fur" ], 160 : [ "Venda", "ve" ], 161 : [ "Ewe", "ee" ], 162 : [ "Walamo", "wal" ], 163 : [ "Hawaiian", "haw" ], 164 : [ "Tyap", "kcg" ], 165 : [ "Nyanja", "ny" ], 166 : [ "Filipino", "fil" ], 167 : [ "Swiss German", "gsw" ], 168 : [ "Sichuan Yi", "ii" ], 169 : [ "Kpelle", "kpe" ], 170 : [ "Low German", "nds" ], 171 : [ "South Ndebele", "nr" ], 172 : [ "Northern Sotho", "nso" ], 173 : [ "Northern Sami", "se" ], 174 : [ "Taroko", "trv" ], 175 : [ "Gusii", "guz" ], 176 : [ "Taita", "dav" ], 177 : [ "Fulah", "ff" ], # macrolanguage 178 : [ "Kikuyu", "ki" ], 179 : [ "Samburu", "saq" ], 180 : [ "Sena", "seh" ], 181 : [ "North Ndebele", "nd" ], 182 : [ "Rombo", "rof" ], 183 : [ "Tachelhit", "shi" ], 184 : [ "Kabyle", "kab" ], 185 : [ "Nyankole", "nyn" ], 186 : [ "Bena", "bez" ], 187 : [ "Vunjo", "vun" ], 188 : [ "Bambara", "bm" ], 189 : [ "Embu", "ebu" ], 190 : [ "Cherokee", "chr" ], 191 : [ "Morisyen", "mfe" ], 192 : [ "Makonde", "kde" ], 193 : [ "Langi", "lag" ], 194 : [ "Ganda", "lg" ], 195 : [ "Bemba", "bem" ], 196 : [ "Kabuverdianu", "kea" ], 197 : [ "Meru", "mer" ], 198 : [ "Kalenjin", "kln" ], 199 : [ "Nama", "naq" ], 200 : [ "Machame", "jmc" ], 201 : [ "Colognian", "ksh" ], 202 : [ "Masai", "mas" ], 203 : [ "Soga", "xog" ], 204 : [ "Luyia", "luy" ], 205 : [ "Asu", "asa" ], 206 : [ "Teso", "teo" ], 207 : [ "Saho", "ssy" ], 208 : [ "Koyra Chiini", "khq" ], 209 : [ "Rwa", "rwk" ], 210 : [ "Luo", "luo" ], 211 : [ "Chiga", "cgg" ], 212 : [ "Central Morocco Tamazight", "tzm" ], 213 : [ "Koyraboro Senni", "ses" ], 214 : [ "Shambala", "ksb" ], 215 : [ "Bodo", "brx" ], 216 : [ "Avaric", "av" ], 217 : [ "Chamorro", "ch" ], 218 : [ "Chechen", "ce" ], 219 : [ "Church", "cu" ], # macrolanguage 220 : [ "Chuvash", "cv" ], 221 : [ "Cree", "cr" ], # macrolanguage 222 : [ "Haitian", "ht" ], 223 : [ "Herero", "hz" ], 224 : [ "Hiri Motu", "ho" ], 225 : [ "Kanuri", "kr" ], # macrolanguage 226 : [ "Komi", "kv" ], # macrolanguage 227 : [ "Kongo", "kg" ], # macrolanguage 228 : [ "Kwanyama", "kj" ], 229 : [ "Limburgish", "li" ], 230 : [ "LubaKatanga", "lu" ], 231 : [ "Luxembourgish", "lb" ], 232 : [ "Navaho", "nv" ], 233 : [ "Ndonga", "ng" ], 234 : [ "Ojibwa", "oj" ], # macrolanguage 235 : [ "Pali", "pi" ], # macrolanguage 236 : [ "Walloon", "wa" ], 237 : [ "Aghem", "agq" ], 238 : [ "Basaa", "bas" ], 239 : [ "Zarma", "dje" ], 240 : [ "Duala", "dua" ], 241 : [ "JolaFonyi", "dyo" ], 242 : [ "Ewondo", "ewo" ], 243 : [ "Bafia", "ksf" ], 244 : [ "MakhuwaMeetto", "mgh" ], 245 : [ "Mundang", "mua" ], 246 : [ "Kwasio", "nmg" ], 247 : [ "Nuer", "nus" ], 248 : [ "Sakha", "sah" ], 249 : [ "Sangu", "sbp" ], 250 : [ "Congo Swahili", "swc" ], 251 : [ "Tasawaq", "twq" ], 252 : [ "Vai", "vai" ], 253 : [ "Walser", "wae" ], 254 : [ "Yangben", "yav" ], 255 : [ "Avestan", "ae" ], 256 : [ "Asturian", "ast" ], 257 : [ "Ngomba", "jgo" ], 258 : [ "Kako", "kkj" ], 259 : [ "Meta", "mgo" ], 260 : [ "Ngiemboon", "nnh" ], 261 : [ "Aragonese", "an" ], 262 : [ "Akkadian", "akk" ], 263 : [ "AncientEgyptian", "egy" ], 264 : [ "AncientGreek", "grc" ], 265 : [ "Aramaic", "arc" ], 266 : [ "Balinese", "ban" ], 267 : [ "Bamun", "bax" ], 268 : [ "BatakToba", "bbc" ], 269 : [ "Buginese", "bug" ], 270 : [ "Buhid", "bku" ], 271 : [ "Carian", "xcr" ], 272 : [ "Chakma", "ccp" ], 273 : [ "ClassicalMandaic", "myz" ], 274 : [ "Coptic", "cop" ], 275 : [ "Dogri", "doi" ], # macrolanguage 276 : [ "EasternCham", "cjm" ], 277 : [ "EasternKayah", "eky" ], 278 : [ "Etruscan", "ett" ], 279 : [ "Gothic", "got" ], 280 : [ "Hanunoo", "hnn" ], 281 : [ "Ingush", "inh" ], 282 : [ "LargeFloweryMiao", "hmd" ], 283 : [ "Lepcha", "lep" ], 284 : [ "Limbu", "lif" ], 285 : [ "Lisu", "lis" ], 286 : [ "Lu", "khb" ], 287 : [ "Lycian", "xlc" ], 288 : [ "Lydian", "xld" ], 289 : [ "Mandingo", "man" ], # macrolanguage 290 : [ "Manipuri", "mni" ], 291 : [ "Meroitic", "xmr" ], 292 : [ "NorthernThai", "nod" ], 293 : [ "OldIrish", "sga" ], 294 : [ "OldNorse", "non" ], 295 : [ "OldPersian", "peo" ], 296 : [ "OldTurkish", "otk" ], 297 : [ "Pahlavi", "pal" ], 298 : [ "Parthian", "xpr" ], 299 : [ "Phoenician", "phn" ], 300 : [ "PrakritLanguage", "pra" ], 301 : [ "Rejang", "rej" ], 302 : [ "Sabaean", "xsa" ], 303 : [ "Samaritan", "smp" ], 304 : [ "Santali", "sat" ], 305 : [ "Saurashtra", "saz" ], 306 : [ "Sora", "srb" ], 307 : [ "Sylheti", "syl" ], 308 : [ "Tagbanwa", "tbw" ], 309 : [ "TaiDam", "blt" ], 310 : [ "TaiNua", "tdd" ], 311 : [ "Ugaritic", "uga" ], 312 : [ "Akoose", "bss" ], 313 : [ "Lakota", "lkt" ], 314 : [ "Standard Moroccan Tamazight", "zgh" ] } country_list = { 0 : [ "AnyCountry", "ZZ" ], 1 : [ "Afghanistan", "AF" ], 2 : [ "Albania", "AL" ], 3 : [ "Algeria", "DZ" ], 4 : [ "AmericanSamoa", "AS" ], 5 : [ "Andorra", "AD" ], 6 : [ "Angola", "AO" ], 7 : [ "Anguilla", "AI" ], 8 : [ "Antarctica", "AQ" ], 9 : [ "AntiguaAndBarbuda", "AG" ], 10 : [ "Argentina", "AR" ], 11 : [ "Armenia", "AM" ], 12 : [ "Aruba", "AW" ], 13 : [ "Australia", "AU" ], 14 : [ "Austria", "AT" ], 15 : [ "Azerbaijan", "AZ" ], 16 : [ "Bahamas", "BS" ], 17 : [ "Bahrain", "BH" ], 18 : [ "Bangladesh", "BD" ], 19 : [ "Barbados", "BB" ], 20 : [ "Belarus", "BY" ], 21 : [ "Belgium", "BE" ], 22 : [ "Belize", "BZ" ], 23 : [ "Benin", "BJ" ], 24 : [ "Bermuda", "BM" ], 25 : [ "Bhutan", "BT" ], 26 : [ "Bolivia", "BO" ], 27 : [ "BosniaAndHerzegowina", "BA" ], 28 : [ "Botswana", "BW" ], 29 : [ "BouvetIsland", "BV" ], 30 : [ "Brazil", "BR" ], 31 : [ "BritishIndianOceanTerritory", "IO" ], 32 : [ "Brunei", "BN" ], 33 : [ "Bulgaria", "BG" ], 34 : [ "BurkinaFaso", "BF" ], 35 : [ "Burundi", "BI" ], 36 : [ "Cambodia", "KH" ], 37 : [ "Cameroon", "CM" ], 38 : [ "Canada", "CA" ], 39 : [ "CapeVerde", "CV" ], 40 : [ "CaymanIslands", "KY" ], 41 : [ "CentralAfricanRepublic", "CF" ], 42 : [ "Chad", "TD" ], 43 : [ "Chile", "CL" ], 44 : [ "China", "CN" ], 45 : [ "ChristmasIsland", "CX" ], 46 : [ "CocosIslands", "CC" ], 47 : [ "Colombia", "CO" ], 48 : [ "Comoros", "KM" ], 49 : [ "CongoKinshasa", "CD" ], 50 : [ "CongoBrazzaville", "CG" ], 51 : [ "CookIslands", "CK" ], 52 : [ "CostaRica", "CR" ], 53 : [ "IvoryCoast", "CI" ], 54 : [ "Croatia", "HR" ], 55 : [ "Cuba", "CU" ], 56 : [ "Cyprus", "CY" ], 57 : [ "CzechRepublic", "CZ" ], 58 : [ "Denmark", "DK" ], 59 : [ "Djibouti", "DJ" ], 60 : [ "Dominica", "DM" ], 61 : [ "DominicanRepublic", "DO" ], 62 : [ "EastTimor", "TL" ], 63 : [ "Ecuador", "EC" ], 64 : [ "Egypt", "EG" ], 65 : [ "ElSalvador", "SV" ], 66 : [ "EquatorialGuinea", "GQ" ], 67 : [ "Eritrea", "ER" ], 68 : [ "Estonia", "EE" ], 69 : [ "Ethiopia", "ET" ], 70 : [ "FalklandIslands", "FK" ], 71 : [ "FaroeIslands", "FO" ], 72 : [ "Fiji", "FJ" ], 73 : [ "Finland", "FI" ], 74 : [ "France", "FR" ], 75 : [ "Guernsey", "GG" ], 76 : [ "FrenchGuiana", "GF" ], 77 : [ "FrenchPolynesia", "PF" ], 78 : [ "FrenchSouthernTerritories", "TF" ], 79 : [ "Gabon", "GA" ], 80 : [ "Gambia", "GM" ], 81 : [ "Georgia", "GE" ], 82 : [ "Germany", "DE" ], 83 : [ "Ghana", "GH" ], 84 : [ "Gibraltar", "GI" ], 85 : [ "Greece", "GR" ], 86 : [ "Greenland", "GL" ], 87 : [ "Grenada", "GD" ], 88 : [ "Guadeloupe", "GP" ], 89 : [ "Guam", "GU" ], 90 : [ "Guatemala", "GT" ], 91 : [ "Guinea", "GN" ], 92 : [ "GuineaBissau", "GW" ], 93 : [ "Guyana", "GY" ], 94 : [ "Haiti", "HT" ], 95 : [ "HeardAndMcDonaldIslands", "HM" ], 96 : [ "Honduras", "HN" ], 97 : [ "HongKong", "HK" ], 98 : [ "Hungary", "HU" ], 99 : [ "Iceland", "IS" ], 100 : [ "India", "IN" ], 101 : [ "Indonesia", "ID" ], 102 : [ "Iran", "IR" ], 103 : [ "Iraq", "IQ" ], 104 : [ "Ireland", "IE" ], 105 : [ "Israel", "IL" ], 106 : [ "Italy", "IT" ], 107 : [ "Jamaica", "JM" ], 108 : [ "Japan", "JP" ], 109 : [ "Jordan", "JO" ], 110 : [ "Kazakhstan", "KZ" ], 111 : [ "Kenya", "KE" ], 112 : [ "Kiribati", "KI" ], 113 : [ "NorthKorea", "KP" ], 114 : [ "SouthKorea", "KR" ], 115 : [ "Kuwait", "KW" ], 116 : [ "Kyrgyzstan", "KG" ], 117 : [ "Laos", "LA" ], 118 : [ "Latvia", "LV" ], 119 : [ "Lebanon", "LB" ], 120 : [ "Lesotho", "LS" ], 121 : [ "Liberia", "LR" ], 122 : [ "Libya", "LY" ], 123 : [ "Liechtenstein", "LI" ], 124 : [ "Lithuania", "LT" ], 125 : [ "Luxembourg", "LU" ], 126 : [ "Macau", "MO" ], 127 : [ "Macedonia", "MK" ], 128 : [ "Madagascar", "MG" ], 129 : [ "Malawi", "MW" ], 130 : [ "Malaysia", "MY" ], 131 : [ "Maldives", "MV" ], 132 : [ "Mali", "ML" ], 133 : [ "Malta", "MT" ], 134 : [ "MarshallIslands", "MH" ], 135 : [ "Martinique", "MQ" ], 136 : [ "Mauritania", "MR" ], 137 : [ "Mauritius", "MU" ], 138 : [ "Mayotte", "YT" ], 139 : [ "Mexico", "MX" ], 140 : [ "Micronesia", "FM" ], 141 : [ "Moldova", "MD" ], 142 : [ "Monaco", "MC" ], 143 : [ "Mongolia", "MN" ], 144 : [ "Montserrat", "MS" ], 145 : [ "Morocco", "MA" ], 146 : [ "Mozambique", "MZ" ], 147 : [ "Myanmar", "MM" ], 148 : [ "Namibia", "NA" ], 149 : [ "Nauru", "NR" ], 150 : [ "Nepal", "NP" ], 151 : [ "Netherlands", "NL" ], 152 : [ "CuraSao", "CW" ], 153 : [ "NewCaledonia", "NC" ], 154 : [ "NewZealand", "NZ" ], 155 : [ "Nicaragua", "NI" ], 156 : [ "Niger", "NE" ], 157 : [ "Nigeria", "NG" ], 158 : [ "Niue", "NU" ], 159 : [ "NorfolkIsland", "NF" ], 160 : [ "NorthernMarianaIslands", "MP" ], 161 : [ "Norway", "NO" ], 162 : [ "Oman", "OM" ], 163 : [ "Pakistan", "PK" ], 164 : [ "Palau", "PW" ], 165 : [ "PalestinianTerritories", "PS" ], 166 : [ "Panama", "PA" ], 167 : [ "PapuaNewGuinea", "PG" ], 168 : [ "Paraguay", "PY" ], 169 : [ "Peru", "PE" ], 170 : [ "Philippines", "PH" ], 171 : [ "Pitcairn", "PN" ], 172 : [ "Poland", "PL" ], 173 : [ "Portugal", "PT" ], 174 : [ "PuertoRico", "PR" ], 175 : [ "Qatar", "QA" ], 176 : [ "Reunion", "RE" ], 177 : [ "Romania", "RO" ], 178 : [ "Russia", "RU" ], 179 : [ "Rwanda", "RW" ], 180 : [ "SaintKittsAndNevis", "KN" ], 181 : [ "SaintLucia", "LC" ], 182 : [ "SaintVincentAndTheGrenadines", "VC" ], 183 : [ "Samoa", "WS" ], 184 : [ "SanMarino", "SM" ], 185 : [ "SaoTomeAndPrincipe", "ST" ], 186 : [ "SaudiArabia", "SA" ], 187 : [ "Senegal", "SN" ], 188 : [ "Seychelles", "SC" ], 189 : [ "SierraLeone", "SL" ], 190 : [ "Singapore", "SG" ], 191 : [ "Slovakia", "SK" ], 192 : [ "Slovenia", "SI" ], 193 : [ "SolomonIslands", "SB" ], 194 : [ "Somalia", "SO" ], 195 : [ "SouthAfrica", "ZA" ], 196 : [ "SouthGeorgiaAndTheSouthSandwichIslands", "GS" ], 197 : [ "Spain", "ES" ], 198 : [ "SriLanka", "LK" ], 199 : [ "SaintHelena", "SH" ], 200 : [ "SaintPierreAndMiquelon", "PM" ], 201 : [ "Sudan", "SD" ], 202 : [ "Suriname", "SR" ], 203 : [ "SvalbardAndJanMayenIslands", "SJ" ], 204 : [ "Swaziland", "SZ" ], 205 : [ "Sweden", "SE" ], 206 : [ "Switzerland", "CH" ], 207 : [ "Syria", "SY" ], 208 : [ "Taiwan", "TW" ], 209 : [ "Tajikistan", "TJ" ], 210 : [ "Tanzania", "TZ" ], 211 : [ "Thailand", "TH" ], 212 : [ "Togo", "TG" ], 213 : [ "Tokelau", "TK" ], 214 : [ "Tonga", "TO" ], 215 : [ "TrinidadAndTobago", "TT" ], 216 : [ "Tunisia", "TN" ], 217 : [ "Turkey", "TR" ], 218 : [ "Turkmenistan", "TM" ], 219 : [ "TurksAndCaicosIslands", "TC" ], 220 : [ "Tuvalu", "TV" ], 221 : [ "Uganda", "UG" ], 222 : [ "Ukraine", "UA" ], 223 : [ "UnitedArabEmirates", "AE" ], 224 : [ "UnitedKingdom", "GB" ], 225 : [ "UnitedStates", "US" ], 226 : [ "UnitedStatesMinorOutlyingIslands", "UM" ], 227 : [ "Uruguay", "UY" ], 228 : [ "Uzbekistan", "UZ" ], 229 : [ "Vanuatu", "VU" ], 230 : [ "VaticanCityState", "VA" ], 231 : [ "Venezuela", "VE" ], 232 : [ "Vietnam", "VN" ], 233 : [ "BritishVirginIslands", "VG" ], 234 : [ "UnitedStatesVirginIslands", "VI" ], 235 : [ "WallisAndFutunaIslands", "WF" ], 236 : [ "WesternSahara", "EH" ], 237 : [ "Yemen", "YE" ], 238 : [ "CanaryIslands", "IC" ], 239 : [ "Zambia", "ZM" ], 240 : [ "Zimbabwe", "ZW" ], 241 : [ "ClippertonIsland", "CP" ], 242 : [ "Montenegro", "ME" ], 243 : [ "Serbia", "RS" ], 244 : [ "Saint Barthelemy", "BL" ], 245 : [ "Saint Martin", "MF" ], 246 : [ "LatinAmericaAndTheCaribbean", "419" ], 247 : [ "AscensionIsland", "AC" ], 248 : [ "AlandIslands", "AX" ], 249 : [ "DiegoGarcia", "DG" ], 250 : [ "CeutaAndMelilla", "EA" ], 251 : [ "IsleOfMan", "IM" ], 252 : [ "Jersey", "JE" ], 253 : [ "TristanDaCunha", "TA" ], 254 : [ "SouthSudan", "SS" ], 255 : [ "Bonaire", "BQ" ], 256 : [ "SintMaarten", "SX" ], 257 : [ "Kosovo", "XK" ] } script_list = { 0 : [ "AnyScript", "Zzzz" ], 1 : [ "Arabic", "Arab" ], 2 : [ "Cyrillic", "Cyrl" ], 3 : [ "Deseret", "Dsrt" ], 4 : [ "Gurmukhi", "Guru" ], 5 : [ "Simplified Han", "Hans" ], 6 : [ "Traditional Han", "Hant" ], 7 : [ "Latin", "Latn" ], 8 : [ "Mongolian", "Mong" ], 9 : [ "Tifinagh", "Tfng" ], 10 : [ "Armenian", "Armn" ], 11 : [ "Bengali", "Beng" ], 12 : [ "Cherokee", "Cher" ], 13 : [ "Devanagari", "Deva" ], 14 : [ "Ethiopic", "Ethi" ], 15 : [ "Georgian", "Geor" ], 16 : [ "Greek", "Grek" ], 17 : [ "Gujarati", "Gujr" ], 18 : [ "Hebrew", "Hebr" ], 19 : [ "Japanese", "Jpan" ], 20 : [ "Khmer", "Khmr" ], 21 : [ "Kannada", "Knda" ], 22 : [ "Korean", "Kore" ], 23 : [ "Lao", "Laoo" ], 24 : [ "Malayalam", "Mlym" ], 25 : [ "Myanmar", "Mymr" ], 26 : [ "Oriya", "Orya" ], 27 : [ "Tamil", "Taml" ], 28 : [ "Telugu", "Telu" ], 29 : [ "Thaana", "Thaa" ], 30 : [ "Thai", "Thai" ], 31 : [ "Tibetan", "Tibt" ], 32 : [ "Sinhala", "Sinh" ], 33 : [ "Syriac", "Syrc" ], 34 : [ "Yi", "Yiii" ], 35 : [ "Vai", "Vaii" ], 36 : [ "Avestan", "Avst" ], 37 : [ "Balinese", "Bali" ], 38 : [ "Bamum", "Bamu" ], 39 : [ "Batak", "Batk" ], 40 : [ "Bopomofo", "Bopo" ], 41 : [ "Brahmi", "Brah" ], 42 : [ "Buginese", "Bugi" ], 43 : [ "Buhid", "Buhd" ], 44 : [ "CanadianAboriginal", "Cans" ], 45 : [ "Carian", "Cari" ], 46 : [ "Chakma", "Cakm" ], 47 : [ "Cham", "Cham" ], 48 : [ "Coptic", "Copt" ], 49 : [ "Cypriot", "Cprt" ], 50 : [ "Egyptian Hieroglyphs", "Egyp" ], 51 : [ "Fraser", "Lisu" ], 52 : [ "Glagolitic", "Glag" ], 53 : [ "Gothic", "Goth" ], 54 : [ "Han", "Hani" ], 55 : [ "Hangul", "Hang" ], 56 : [ "Hanunoo", "Hano" ], 57 : [ "Imperial Aramaic", "Armi" ], 58 : [ "Inscriptional Pahlavi", "Phli" ], 59 : [ "Inscriptional Parthian", "Prti" ], 60 : [ "Javanese", "Java" ], 61 : [ "Kaithi", "Kthi" ], 62 : [ "Katakana", "Kana" ], 63 : [ "Kayah Li", "Kali" ], 64 : [ "Kharoshthi", "Khar" ], 65 : [ "Lanna", "Lana" ], 66 : [ "Lepcha", "Lepc" ], 67 : [ "Limbu", "Limb" ], 68 : [ "Linear B", "Linb" ], 69 : [ "Lycian", "Lyci" ], 70 : [ "Lydian", "Lydi" ], 71 : [ "Mandaean", "Mand" ], 72 : [ "Meitei Mayek", "Mtei" ], 73 : [ "Meroitic", "Mero" ], 74 : [ "Meroitic Cursive", "Merc" ], 75 : [ "Nko", "Nkoo" ], 76 : [ "New Tai Lue", "Talu" ], 77 : [ "Ogham", "Ogam" ], 78 : [ "Ol Chiki", "Olck" ], 79 : [ "Old Italic", "Ital" ], 80 : [ "Old Persian", "Xpeo" ], 81 : [ "Old South Arabian", "Sarb" ], 82 : [ "Orkhon", "Orkh" ], 83 : [ "Osmanya", "Osma" ], 84 : [ "Phags Pa", "Phag" ], 85 : [ "Phoenician", "Phnx" ], 86 : [ "Pollard Phonetic", "Plrd" ], 87 : [ "Rejang", "Rjng" ], 88 : [ "Runic", "Runr" ], 89 : [ "Samaritan", "Samr" ], 90 : [ "Saurashtra", "Saur" ], 91 : [ "Sharada", "Shrd" ], 92 : [ "Shavian", "Shaw" ], 93 : [ "Sora Sompeng", "Sora" ], 94 : [ "Cuneiform", "Xsux" ], 95 : [ "Sundanese", "Sund" ], 96 : [ "Syloti Nagri", "Sylo" ], 97 : [ "Tagalog", "Tglg" ], 98 : [ "Tagbanwa", "Tagb" ], 99 : [ "Tai Le", "Tale" ], 100 : [ "Tai Viet", "Tavt" ], 101 : [ "Takri", "Takr" ], 102 : [ "Ugaritic", "Ugar" ], 103 : [ "Braille", "Brai" ], 104 : [ "Hiragana", "Hira" ] # ### : [ "Blissymbols", "Blis" ], # ### : [ "Linear A", "Lina" ], # ### : [ "Naxi Geba", "Nkgb" ], # ### : [ "Pahawh Hmong", "Hmng" ], # ### : [ "Varang Kshiti", "Wara" ], } def countryCodeToId(code): if not code: return 0 for country_id in country_list: if country_list[country_id][1] == code: return country_id return -1 def languageCodeToId(code): if not code: return 0 for language_id in language_list: if language_list[language_id][1] == code: return language_id return -1 def scriptCodeToId(code): if not code: return 0 for script_id in script_list: if script_list[script_id][1] == code: return script_id return -1
bsd-3-clause
ryfeus/lambda-packs
Sklearn_scipy_numpy/source/pip/utils/glibc.py
350
2939
from __future__ import absolute_import import re import ctypes import platform import warnings def glibc_version_string(): "Returns glibc version string, or None if not using glibc." # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen # manpage says, "If filename is NULL, then the returned handle is for the # main program". This way we can let the linker do the work to figure out # which libc our process is actually using. process_namespace = ctypes.CDLL(None) try: gnu_get_libc_version = process_namespace.gnu_get_libc_version except AttributeError: # Symbol doesn't exist -> therefore, we are not linked to # glibc. return None # Call gnu_get_libc_version, which returns a string like "2.5" gnu_get_libc_version.restype = ctypes.c_char_p version_str = gnu_get_libc_version() # py2 / py3 compatibility: if not isinstance(version_str, str): version_str = version_str.decode("ascii") return version_str # Separated out from have_compatible_glibc for easier unit testing def check_glibc_version(version_str, required_major, minimum_minor): # Parse string and check against requested version. # # We use a regexp instead of str.split because we want to discard any # random junk that might come after the minor version -- this might happen # in patched/forked versions of glibc (e.g. Linaro's version of glibc # uses version strings like "2.20-2014.11"). See gh-3588. m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str) if not m: warnings.warn("Expected glibc version with 2 components major.minor," " got: %s" % version_str, RuntimeWarning) return False return (int(m.group("major")) == required_major and int(m.group("minor")) >= minimum_minor) def have_compatible_glibc(required_major, minimum_minor): version_str = glibc_version_string() if version_str is None: return False return check_glibc_version(version_str, required_major, minimum_minor) # platform.libc_ver regularly returns completely nonsensical glibc # versions. E.g. on my computer, platform says: # # ~$ python2.7 -c 'import platform; print(platform.libc_ver())' # ('glibc', '2.7') # ~$ python3.5 -c 'import platform; print(platform.libc_ver())' # ('glibc', '2.9') # # But the truth is: # # ~$ ldd --version # ldd (Debian GLIBC 2.22-11) 2.22 # # This is unfortunate, because it means that the linehaul data on libc # versions that was generated by pip 8.1.2 and earlier is useless and # misleading. Solution: instead of using platform, use our code that actually # works. def libc_ver(): glibc_version = glibc_version_string() if glibc_version is None: # For non-glibc platforms, fall back on platform.libc_ver return platform.libc_ver() else: return ("glibc", glibc_version)
mit
shubhangiKishore/pattern
examples/08-server/03-wiki/wiki.py
3
4618
from __future__ import print_function import os, sys; sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", "..")) from pattern.server import App, template, threadsafe from codecs import open # This example demonstrates a simple wiki served by pattern.server. # A wiki is a web app where each page can be edited (e.g, Wikipedia). # We will store the contents of each page as a file in /data. app = App(name="wiki") # Our wiki app has a single URL handler listening at the root ("/"). # It takes any combination of positional and keyword arguments. # This means that any URL will be routed to the index() function. # For example, http://127.0.0.1:8080/pages/bio.html?edit calls index() # with path=("pages", "bio.html") and data={"edit": ""}. @app.route("/") def index(*path, **data): #print "path:", path #print "data:", data # Construct a file name in /data from the URL path. # For example, path=("pages", "bio.html") # is mapped to "/data/pages/bio.html.txt". page = "/".join(path) page = page if page else "index.html" page = page.replace(" ", "-") page = page + ".txt" page = os.path.join(app.path, "data", page) # Absolute paths are safer. #print "page:", page # If the URL ends in "?save", update the page content. if "save" in data and "content" in data: return save(page, src=data["content"]) # If the URL ends in "?edit", show the page editor. if "edit" in data: return edit(page) # If the page does not exist, show the page editor. if not os.path.exists(page): return edit(page) # Show the page. else: return view(page) # The pattern.server module has a simple template() function # that takes a file path or a string and optional parameters. # Placeholders in the template source (e.g., "$name") # are replaced with the parameter values. # Below is a template with placeholders for page name and content. # The page content is loaded from a file stored in /data. # The page name is parsed from the filename, # e.g., "/data/index.html.txt" => "index.html". wiki = """ <!doctype html> <html> <head> <title>$name</title> <meta charset="utf-8"> </head> <body> <h3>$name</h3> $content <br> <a href="?edit">edit</a> </body> </html> """ # The name() function takes a file path (e.g., "/data/index.html.txt") # and returns the page name ("index.html"). def name(page): name = os.path.basename(page) # "/data/index.html.txt" => "index.html.txt" name = os.path.splitext(name)[0] # ("index.html", ".txt") => "index.html" return name # We could also have a function for a *display* name (e.g., "Index"). # Something like: def displayname(page): return name(name(page)).replace("-", " ").title() # The view() function is called when a page needs to be displayed. # Our template has two placeholders: the page $name and $content. # We load the $content from the contents of the given file path. # We load the $name using the name() function above. def view(page): print(displayname(page)) return template(wiki, name=name(page), content=open(page).read()) # The edit() function is called when a URL ends in "?edit", # e.g., http://127.0.0.1:8080/index.html?edit. # In this case, we don't show the contents of "/data/index.html.txt" directly, # but wrapped inside a <textarea> for editing instead. # Once the user is done editing and clicks "Submit", # the browser redirects to http://127.0.0.1:8080/index.html?save, # posting the data inside the <textarea> to the server. # We can catch it as the optional "content" parameter of the index() function # (since the name of the <textarea> is "content"). def edit(page): s = open(page).read() if os.path.exists(page) else "" s = '<form method="post" action="?save">' \ '<textarea name="content" rows="10" cols="80">%s</textarea><br>' \ '<input type="submit">' \ '</form>' % s return template(wiki, name=name(page), content=s) # The save() function is called when edited content is posted to the server. # It creates a file in /data and stores the content. @threadsafe def save(page, src): f = open(page, "w") f.write(src.encode("utf-8")) f.close() return view(page) # Writing HTML by hand in the <textarea> becomes tedious after a while, # so we could for example extend save() with a parser for Markdown syntax: # http://en.wikipedia.org/wiki/Markdown, # http://pythonhosted.org/Markdown/, # or replace the <textarea> with a visual TinyMCE editor: # http://www.tinymce.com. app.run("127.0.0.1", port=8080)
bsd-3-clause
apanju/GMIO_Odoo
odoo.py
257
5618
#!/usr/bin/env python #---------------------------------------------------------- # odoo cli # # To install your odoo development environement type: # # wget -O- https://raw.githubusercontent.com/odoo/odoo/8.0/odoo.py | python # # The setup_* subcommands used to boostrap odoo are defined here inline and may # only depends on the python 2.7 stdlib # # The rest of subcommands are defined in odoo/cli or in <module>/cli by # subclassing the Command object # #---------------------------------------------------------- import os import re import sys import subprocess GIT_HOOKS_PRE_PUSH = """ #!/usr/bin/env python2 import re import sys if re.search('github.com[:/]odoo/odoo.git$', sys.argv[2]): print "Pushing to /odoo/odoo.git is forbidden, please push to odoo-dev, use --no-verify to override" sys.exit(1) """ def printf(f,*l): print "odoo:" + f % l def run(*l): if isinstance(l[0], list): l = l[0] printf("running %s", " ".join(l)) subprocess.check_call(l) def git_locate(): # Locate git dir # TODO add support for os.environ.get('GIT_DIR') # check for an odoo child if os.path.isfile('odoo/.git/config'): os.chdir('odoo') path = os.getcwd() while path != os.path.abspath(os.sep): gitconfig_path = os.path.join(path, '.git/config') if os.path.isfile(gitconfig_path): release_py = os.path.join(path, 'openerp/release.py') if os.path.isfile(release_py): break path = os.path.dirname(path) if path == os.path.abspath(os.sep): path = None return path def cmd_setup_git(): git_dir = git_locate() if git_dir: printf('git repo found at %s',git_dir) else: run("git", "init", "odoo") os.chdir('odoo') git_dir = os.getcwd() if git_dir: # push sane config for git < 2.0, and hooks #run('git','config','push.default','simple') # alias run('git','config','alias.st','status') # merge bzr style run('git','config','merge.commit','no') # pull let me choose between merge or rebase only works in git > 2.0, use an alias for 1 run('git','config','pull.ff','only') run('git','config','alias.pl','pull --ff-only') pre_push_path = os.path.join(git_dir, '.git/hooks/pre-push') open(pre_push_path,'w').write(GIT_HOOKS_PRE_PUSH.strip()) os.chmod(pre_push_path, 0755) # setup odoo remote run('git','config','remote.odoo.url','https://github.com/odoo/odoo.git') run('git','config','remote.odoo.pushurl','[email protected]:odoo/odoo.git') run('git','config','--add','remote.odoo.fetch','dummy') run('git','config','--unset-all','remote.odoo.fetch') run('git','config','--add','remote.odoo.fetch','+refs/heads/*:refs/remotes/odoo/*') # setup odoo-dev remote run('git','config','remote.odoo-dev.url','https://github.com/odoo-dev/odoo.git') run('git','config','remote.odoo-dev.pushurl','[email protected]:odoo-dev/odoo.git') run('git','remote','update') # setup 8.0 branch run('git','config','branch.8.0.remote','odoo') run('git','config','branch.8.0.merge','refs/heads/8.0') run('git','checkout','8.0') else: printf('no git repo found') def cmd_setup_git_dev(): git_dir = git_locate() if git_dir: # setup odoo-dev remote run('git','config','--add','remote.odoo-dev.fetch','dummy') run('git','config','--unset-all','remote.odoo-dev.fetch') run('git','config','--add','remote.odoo-dev.fetch','+refs/heads/*:refs/remotes/odoo-dev/*') run('git','config','--add','remote.odoo-dev.fetch','+refs/pull/*:refs/remotes/odoo-dev/pull/*') run('git','remote','update') def cmd_setup_git_review(): git_dir = git_locate() if git_dir: # setup odoo-dev remote run('git','config','--add','remote.odoo.fetch','dummy') run('git','config','--unset-all','remote.odoo.fetch') run('git','config','--add','remote.odoo.fetch','+refs/heads/*:refs/remotes/odoo/*') run('git','config','--add','remote.odoo.fetch','+refs/tags/*:refs/remotes/odoo/tags/*') run('git','config','--add','remote.odoo.fetch','+refs/pull/*:refs/remotes/odoo/pull/*') def setup_deps_debian(git_dir): debian_control_path = os.path.join(git_dir, 'debian/control') debian_control = open(debian_control_path).read() debs = re.findall('python-[0-9a-z]+',debian_control) debs += ["postgresql"] proc = subprocess.Popen(['sudo','apt-get','install'] + debs, stdin=open('/dev/tty')) proc.communicate() def cmd_setup_deps(): git_dir = git_locate() if git_dir: if os.path.isfile('/etc/debian_version'): setup_deps_debian(git_dir) def setup_pg_debian(git_dir): cmd = ['sudo','su','-','postgres','-c','createuser -s %s' % os.environ['USER']] subprocess.call(cmd) def cmd_setup_pg(): git_dir = git_locate() if git_dir: if os.path.isfile('/etc/debian_version'): setup_pg_debian(git_dir) def cmd_setup(): cmd_setup_git() cmd_setup_deps() cmd_setup_pg() def main(): # regsitry of commands g = globals() cmds = dict([(i[4:],g[i]) for i in g if i.startswith('cmd_')]) # if curl URL | python2 then use command setup if len(sys.argv) == 1 and __file__ == '<stdin>': cmd_setup() elif len(sys.argv) == 2 and sys.argv[1] in cmds: cmds[sys.argv[1]]() else: import openerp openerp.cli.main() if __name__ == "__main__": main()
agpl-3.0
Kiiv/CouchPotatoServer
libs/tornado/util.py
102
12256
"""Miscellaneous utility functions and classes. This module is used internally by Tornado. It is not necessarily expected that the functions and classes defined here will be useful to other applications, but they are documented here in case they are. The one public-facing part of this module is the `Configurable` class and its `~Configurable.configure` method, which becomes a part of the interface of its subclasses, including `.AsyncHTTPClient`, `.IOLoop`, and `.Resolver`. """ from __future__ import absolute_import, division, print_function, with_statement import array import inspect import os import sys import zlib try: xrange # py2 except NameError: xrange = range # py3 class ObjectDict(dict): """Makes a dictionary behave like an object, with attribute-style access. """ def __getattr__(self, name): try: return self[name] except KeyError: raise AttributeError(name) def __setattr__(self, name, value): self[name] = value class GzipDecompressor(object): """Streaming gzip decompressor. The interface is like that of `zlib.decompressobj` (without some of the optional arguments, but it understands gzip headers and checksums. """ def __init__(self): # Magic parameter makes zlib module understand gzip header # http://stackoverflow.com/questions/1838699/how-can-i-decompress-a-gzip-stream-with-zlib # This works on cpython and pypy, but not jython. self.decompressobj = zlib.decompressobj(16 + zlib.MAX_WBITS) def decompress(self, value, max_length=None): """Decompress a chunk, returning newly-available data. Some data may be buffered for later processing; `flush` must be called when there is no more input data to ensure that all data was processed. If ``max_length`` is given, some input data may be left over in ``unconsumed_tail``; you must retrieve this value and pass it back to a future call to `decompress` if it is not empty. """ return self.decompressobj.decompress(value, max_length) @property def unconsumed_tail(self): """Returns the unconsumed portion left over """ return self.decompressobj.unconsumed_tail def flush(self): """Return any remaining buffered data not yet returned by decompress. Also checks for errors such as truncated input. No other methods may be called on this object after `flush`. """ return self.decompressobj.flush() def import_object(name): """Imports an object by name. import_object('x') is equivalent to 'import x'. import_object('x.y.z') is equivalent to 'from x.y import z'. >>> import tornado.escape >>> import_object('tornado.escape') is tornado.escape True >>> import_object('tornado.escape.utf8') is tornado.escape.utf8 True >>> import_object('tornado') is tornado True >>> import_object('tornado.missing_module') Traceback (most recent call last): ... ImportError: No module named missing_module """ if name.count('.') == 0: return __import__(name, None, None) parts = name.split('.') obj = __import__('.'.join(parts[:-1]), None, None, [parts[-1]], 0) try: return getattr(obj, parts[-1]) except AttributeError: raise ImportError("No module named %s" % parts[-1]) # Fake unicode literal support: Python 3.2 doesn't have the u'' marker for # literal strings, and alternative solutions like "from __future__ import # unicode_literals" have other problems (see PEP 414). u() can be applied # to ascii strings that include \u escapes (but they must not contain # literal non-ascii characters). if type('') is not type(b''): def u(s): return s unicode_type = str basestring_type = str else: def u(s): return s.decode('unicode_escape') unicode_type = unicode basestring_type = basestring # Deprecated alias that was used before we dropped py25 support. # Left here in case anyone outside Tornado is using it. bytes_type = bytes if sys.version_info > (3,): exec(""" def raise_exc_info(exc_info): raise exc_info[1].with_traceback(exc_info[2]) def exec_in(code, glob, loc=None): if isinstance(code, str): code = compile(code, '<string>', 'exec', dont_inherit=True) exec(code, glob, loc) """) else: exec(""" def raise_exc_info(exc_info): raise exc_info[0], exc_info[1], exc_info[2] def exec_in(code, glob, loc=None): if isinstance(code, basestring): # exec(string) inherits the caller's future imports; compile # the string first to prevent that. code = compile(code, '<string>', 'exec', dont_inherit=True) exec code in glob, loc """) def errno_from_exception(e): """Provides the errno from an Exception object. There are cases that the errno attribute was not set so we pull the errno out of the args but if someone instantiates an Exception without any args you will get a tuple error. So this function abstracts all that behavior to give you a safe way to get the errno. """ if hasattr(e, 'errno'): return e.errno elif e.args: return e.args[0] else: return None class Configurable(object): """Base class for configurable interfaces. A configurable interface is an (abstract) class whose constructor acts as a factory function for one of its implementation subclasses. The implementation subclass as well as optional keyword arguments to its initializer can be set globally at runtime with `configure`. By using the constructor as the factory method, the interface looks like a normal class, `isinstance` works as usual, etc. This pattern is most useful when the choice of implementation is likely to be a global decision (e.g. when `~select.epoll` is available, always use it instead of `~select.select`), or when a previously-monolithic class has been split into specialized subclasses. Configurable subclasses must define the class methods `configurable_base` and `configurable_default`, and use the instance method `initialize` instead of ``__init__``. """ __impl_class = None __impl_kwargs = None def __new__(cls, **kwargs): base = cls.configurable_base() args = {} if cls is base: impl = cls.configured_class() if base.__impl_kwargs: args.update(base.__impl_kwargs) else: impl = cls args.update(kwargs) instance = super(Configurable, cls).__new__(impl) # initialize vs __init__ chosen for compatibility with AsyncHTTPClient # singleton magic. If we get rid of that we can switch to __init__ # here too. instance.initialize(**args) return instance @classmethod def configurable_base(cls): """Returns the base class of a configurable hierarchy. This will normally return the class in which it is defined. (which is *not* necessarily the same as the cls classmethod parameter). """ raise NotImplementedError() @classmethod def configurable_default(cls): """Returns the implementation class to be used if none is configured.""" raise NotImplementedError() def initialize(self): """Initialize a `Configurable` subclass instance. Configurable classes should use `initialize` instead of ``__init__``. """ @classmethod def configure(cls, impl, **kwargs): """Sets the class to use when the base class is instantiated. Keyword arguments will be saved and added to the arguments passed to the constructor. This can be used to set global defaults for some parameters. """ base = cls.configurable_base() if isinstance(impl, (unicode_type, bytes)): impl = import_object(impl) if impl is not None and not issubclass(impl, cls): raise ValueError("Invalid subclass of %s" % cls) base.__impl_class = impl base.__impl_kwargs = kwargs @classmethod def configured_class(cls): """Returns the currently configured class.""" base = cls.configurable_base() if cls.__impl_class is None: base.__impl_class = cls.configurable_default() return base.__impl_class @classmethod def _save_configuration(cls): base = cls.configurable_base() return (base.__impl_class, base.__impl_kwargs) @classmethod def _restore_configuration(cls, saved): base = cls.configurable_base() base.__impl_class = saved[0] base.__impl_kwargs = saved[1] class ArgReplacer(object): """Replaces one value in an ``args, kwargs`` pair. Inspects the function signature to find an argument by name whether it is passed by position or keyword. For use in decorators and similar wrappers. """ def __init__(self, func, name): self.name = name try: self.arg_pos = inspect.getargspec(func).args.index(self.name) except ValueError: # Not a positional parameter self.arg_pos = None def get_old_value(self, args, kwargs, default=None): """Returns the old value of the named argument without replacing it. Returns ``default`` if the argument is not present. """ if self.arg_pos is not None and len(args) > self.arg_pos: return args[self.arg_pos] else: return kwargs.get(self.name, default) def replace(self, new_value, args, kwargs): """Replace the named argument in ``args, kwargs`` with ``new_value``. Returns ``(old_value, args, kwargs)``. The returned ``args`` and ``kwargs`` objects may not be the same as the input objects, or the input objects may be mutated. If the named argument was not found, ``new_value`` will be added to ``kwargs`` and None will be returned as ``old_value``. """ if self.arg_pos is not None and len(args) > self.arg_pos: # The arg to replace is passed positionally old_value = args[self.arg_pos] args = list(args) # *args is normally a tuple args[self.arg_pos] = new_value else: # The arg to replace is either omitted or passed by keyword. old_value = kwargs.get(self.name) kwargs[self.name] = new_value return old_value, args, kwargs def timedelta_to_seconds(td): """Equivalent to td.total_seconds() (introduced in python 2.7).""" return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / float(10 ** 6) def _websocket_mask_python(mask, data): """Websocket masking function. `mask` is a `bytes` object of length 4; `data` is a `bytes` object of any length. Returns a `bytes` object of the same length as `data` with the mask applied as specified in section 5.3 of RFC 6455. This pure-python implementation may be replaced by an optimized version when available. """ mask = array.array("B", mask) unmasked = array.array("B", data) for i in xrange(len(data)): unmasked[i] = unmasked[i] ^ mask[i % 4] if hasattr(unmasked, 'tobytes'): # tostring was deprecated in py32. It hasn't been removed, # but since we turn on deprecation warnings in our tests # we need to use the right one. return unmasked.tobytes() else: return unmasked.tostring() if (os.environ.get('TORNADO_NO_EXTENSION') or os.environ.get('TORNADO_EXTENSION') == '0'): # These environment variables exist to make it easier to do performance # comparisons; they are not guaranteed to remain supported in the future. _websocket_mask = _websocket_mask_python else: try: from tornado.speedups import websocket_mask as _websocket_mask except ImportError: if os.environ.get('TORNADO_EXTENSION') == '1': raise _websocket_mask = _websocket_mask_python def doctests(): import doctest return doctest.DocTestSuite()
gpl-3.0
kornicameister/ansible-modules-extras
network/openvswitch_port.py
12
8270
#!/usr/bin/python #coding: utf-8 -*- # pylint: disable=C0111 # (c) 2013, David Stygstra <[email protected]> # # Portions copyright @ 2015 VMware, Inc. # # This file is part of Ansible # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This software is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this software. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: openvswitch_port version_added: 1.4 author: "David Stygstra (@stygstra)" short_description: Manage Open vSwitch ports requirements: [ ovs-vsctl ] description: - Manage Open vSwitch ports options: bridge: required: true description: - Name of bridge to manage port: required: true description: - Name of port to manage on the bridge tag: version_added: 2.2 required: false description: - VLAN tag for this port state: required: false default: "present" choices: [ present, absent ] description: - Whether the port should exist timeout: required: false default: 5 description: - How long to wait for ovs-vswitchd to respond external_ids: version_added: 2.0 required: false default: {} description: - Dictionary of external_ids applied to a port. set: version_added: 2.0 required: false default: None description: - Set a single property on a port. ''' EXAMPLES = ''' # Creates port eth2 on bridge br-ex - openvswitch_port: bridge=br-ex port=eth2 state=present # Creates port eth6 and set ofport equal to 6. - openvswitch_port: bridge=bridge-loop port=eth6 state=present set="Interface eth6 ofport_request=6" # Creates port vlan10 with tag 10 on bridge br-ex - openvswitch_port: bridge=br-ex port=vlan10 tag=10 state=present set="Interface vlan10 type=internal" # Assign interface id server1-vifeth6 and mac address 00:00:5E:00:53:23 # to port vifeth6 and setup port to be managed by a controller. - openvswitch_port: bridge=br-int port=vifeth6 state=present args: external_ids: iface-id: "{{inventory_hostname}}-vifeth6" attached-mac: "00:00:5E:00:53:23" vm-id: "{{inventory_hostname}}" iface-status: "active" ''' # pylint: disable=W0703 def truncate_before(value, srch): """ Return content of str before the srch parameters. """ before_index = value.find(srch) if (before_index >= 0): return value[:before_index] else: return value def _set_to_get(set_cmd, module): """ Convert set command to get command and set value. return tuple (get command, set value) """ ## # If set has option: then we want to truncate just before that. set_cmd = truncate_before(set_cmd, " option:") get_cmd = set_cmd.split(" ") (key, value) = get_cmd[-1].split("=") module.log("get commands %s " % key) return (["--", "get"] + get_cmd[:-1] + [key], value) # pylint: disable=R0902 class OVSPort(object): """ Interface to OVS port. """ def __init__(self, module): self.module = module self.bridge = module.params['bridge'] self.port = module.params['port'] self.tag = module.params['tag'] self.state = module.params['state'] self.timeout = module.params['timeout'] self.set_opt = module.params.get('set', None) def _vsctl(self, command, check_rc=True): '''Run ovs-vsctl command''' cmd = ['ovs-vsctl', '-t', str(self.timeout)] + command return self.module.run_command(cmd, check_rc=check_rc) def exists(self): '''Check if the port already exists''' (rtc, out, err) = self._vsctl(['list-ports', self.bridge]) if rtc != 0: self.module.fail_json(msg=err) return any(port.rstrip() == self.port for port in out.split('\n')) def set(self, set_opt): """ Set attributes on a port. """ self.module.log("set called %s" % set_opt) if (not set_opt): return False (get_cmd, set_value) = _set_to_get(set_opt, self.module) (rtc, out, err) = self._vsctl(get_cmd, False) if rtc != 0: ## # ovs-vsctl -t 5 -- get Interface port external_ids:key # returns failure if key does not exist. out = None else: out = out.strip("\n") out = out.strip('"') if (out == set_value): return False (rtc, out, err) = self._vsctl(["--", "set"] + set_opt.split(" ")) if rtc != 0: self.module.fail_json(msg=err) return True def add(self): '''Add the port''' cmd = ['add-port', self.bridge, self.port] if self.tag: cmd += ["tag=" + self.tag] if self.set and self.set_opt: cmd += ["--", "set"] cmd += self.set_opt.split(" ") (rtc, _, err) = self._vsctl(cmd) if rtc != 0: self.module.fail_json(msg=err) return True def delete(self): '''Remove the port''' (rtc, _, err) = self._vsctl(['del-port', self.bridge, self.port]) if rtc != 0: self.module.fail_json(msg=err) def check(self): '''Run check mode''' try: if self.state == 'absent' and self.exists(): changed = True elif self.state == 'present' and not self.exists(): changed = True else: changed = False except Exception, earg: self.module.fail_json(msg=str(earg)) self.module.exit_json(changed=changed) def run(self): '''Make the necessary changes''' changed = False try: if self.state == 'absent': if self.exists(): self.delete() changed = True elif self.state == 'present': ## # Add any missing ports. if (not self.exists()): self.add() changed = True ## # If the -- set changed check here and make changes # but this only makes sense when state=present. if (not changed): changed = self.set(self.set_opt) or changed items = self.module.params['external_ids'].items() for (key, value) in items: value = value.replace('"', '') fmt_opt = "Interface %s external_ids:%s=%s" external_id = fmt_opt % (self.port, key, value) changed = self.set(external_id) or changed ## except Exception, earg: self.module.fail_json(msg=str(earg)) self.module.exit_json(changed=changed) # pylint: disable=E0602 def main(): """ Entry point. """ module = AnsibleModule( argument_spec={ 'bridge': {'required': True}, 'port': {'required': True}, 'tag': {'required': False}, 'state': {'default': 'present', 'choices': ['present', 'absent']}, 'timeout': {'default': 5, 'type': 'int'}, 'set': {'required': False, 'default': None}, 'external_ids': {'default': {}, 'required': False, 'type': 'dict'}, }, supports_check_mode=True, ) port = OVSPort(module) if module.check_mode: port.check() else: port.run() # pylint: disable=W0614 # pylint: disable=W0401 # pylint: disable=W0622 # import module snippets from ansible.module_utils.basic import * main()
gpl-3.0
firebitsbr/infernal-twin
build/pillow/build/lib.linux-i686-2.7/PIL/BufrStubImagePlugin.py
77
1504
# # The Python Imaging Library # $Id$ # # BUFR stub adapter # # Copyright (c) 1996-2003 by Fredrik Lundh # # See the README file for information on usage and redistribution. # from PIL import Image, ImageFile _handler = None ## # Install application-specific BUFR image handler. # # @param handler Handler object. def register_handler(handler): global _handler _handler = handler # -------------------------------------------------------------------- # Image adapter def _accept(prefix): return prefix[:4] == b"BUFR" or prefix[:4] == b"ZCZC" class BufrStubImageFile(ImageFile.StubImageFile): format = "BUFR" format_description = "BUFR" def _open(self): offset = self.fp.tell() if not _accept(self.fp.read(8)): raise SyntaxError("Not a BUFR file") self.fp.seek(offset) # make something up self.mode = "F" self.size = 1, 1 loader = self._load() if loader: loader.open(self) def _load(self): return _handler def _save(im, fp, filename): if _handler is None or not hasattr("_handler", "save"): raise IOError("BUFR save handler not installed") _handler.save(im, fp, filename) # -------------------------------------------------------------------- # Registry Image.register_open(BufrStubImageFile.format, BufrStubImageFile, _accept) Image.register_save(BufrStubImageFile.format, _save) Image.register_extension(BufrStubImageFile.format, ".bufr")
gpl-3.0
wanderer2/pymc3
pymc3/tests/test_glm.py
1
3145
import numpy as np from .helpers import SeededTest from pymc3 import glm, Model, Uniform, Normal, find_MAP, Slice, sample # Generate data def generate_data(intercept, slope, size=700): x = np.linspace(-1, 1, size) y = intercept + x * slope return x, y class TestGLM(SeededTest): @classmethod def setUpClass(cls): super(TestGLM, cls).setUpClass() cls.intercept = 1 cls.slope = 3 cls.sd = .05 x_linear, cls.y_linear = generate_data(cls.intercept, cls.slope, size=1000) cls.y_linear += np.random.normal(size=1000, scale=cls.sd) cls.data_linear = dict(x=x_linear, y=cls.y_linear) x_logistic, y_logistic = generate_data(cls.intercept, cls.slope, size=3000) y_logistic = 1 / (1 + np.exp(-y_logistic)) bern_trials = [np.random.binomial(1, i) for i in y_logistic] cls.data_logistic = dict(x=x_logistic, y=bern_trials) def test_linear_component(self): with Model() as model: y_est, _ = glm.linear_component('y ~ x', self.data_linear) sigma = Uniform('sigma', 0, 20) Normal('y_obs', mu=y_est, sd=sigma, observed=self.y_linear) start = find_MAP(vars=[sigma]) step = Slice(model.vars) trace = sample(500, step, start, progressbar=False, random_seed=self.random_seed) self.assertAlmostEqual(np.mean(trace['Intercept']), self.intercept, 1) self.assertAlmostEqual(np.mean(trace['x']), self.slope, 1) self.assertAlmostEqual(np.mean(trace['sigma']), self.sd, 1) def test_glm(self): with Model() as model: glm.glm('y ~ x', self.data_linear) step = Slice(model.vars) trace = sample(500, step, progressbar=False, random_seed=self.random_seed) self.assertAlmostEqual(np.mean(trace['Intercept']), self.intercept, 1) self.assertAlmostEqual(np.mean(trace['x']), self.slope, 1) self.assertAlmostEqual(np.mean(trace['sd']), self.sd, 1) def test_glm_link_func(self): with Model() as model: glm.glm('y ~ x', self.data_logistic, family=glm.families.Binomial(link=glm.families.logit)) step = Slice(model.vars) trace = sample(1000, step, progressbar=False, random_seed=self.random_seed) self.assertAlmostEqual(np.mean(trace['Intercept']), self.intercept, 1) self.assertAlmostEqual(np.mean(trace['x']), self.slope, 1) def test_more_than_one_glm_is_ok(self): with Model(): glm.glm('y ~ x', self.data_logistic, family=glm.families.Binomial(link=glm.families.logit), name='glm1') glm.glm('y ~ x', self.data_logistic, family=glm.families.Binomial(link=glm.families.logit), name='glm2') def test_from_xy(self): with Model(): glm.glm.from_xy( self.data_logistic['x'], self.data_logistic['y'], family=glm.families.Binomial(link=glm.families.logit), name='glm1')
apache-2.0
bugobliterator/ardupilot-chibios
Tools/ardupilotwaf/px_mkfw.py
18
4864
#!/usr/bin/env python ############################################################################ # # Copyright (C) 2012, 2013 PX4 Development Team. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # 3. Neither the name PX4 nor the names of its contributors may be # used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS # OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED # AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # ############################################################################ # # PX4 firmware image generator # # The PX4 firmware file is a JSON-encoded Python object, containing # metadata fields and a zlib-compressed base64-encoded firmware image. # import sys import argparse import json import base64 import zlib import time import subprocess # # Construct a basic firmware description # def mkdesc(): proto = {} proto['magic'] = "PX4FWv1" proto['board_id'] = 0 proto['board_revision'] = 0 proto['version'] = "" proto['summary'] = "" proto['description'] = "" proto['git_identity'] = "" proto['build_time'] = 0 proto['image'] = bytes() proto['image_size'] = 0 return proto # Parse commandline parser = argparse.ArgumentParser(description="Firmware generator for the PX autopilot system.") parser.add_argument("--prototype", action="store", help="read a prototype description from a file") parser.add_argument("--board_id", action="store", help="set the board ID required") parser.add_argument("--board_revision", action="store", help="set the board revision required") parser.add_argument("--version", action="store", help="set a version string") parser.add_argument("--summary", action="store", help="set a brief description") parser.add_argument("--description", action="store", help="set a longer description") parser.add_argument("--git_identity", action="store", help="the working directory to check for git identity") parser.add_argument("--parameter_xml", action="store", help="the parameters.xml file") parser.add_argument("--airframe_xml", action="store", help="the airframes.xml file") parser.add_argument("--image", action="store", help="the firmware image") args = parser.parse_args() # Fetch the firmware descriptor prototype if specified if args.prototype != None: f = open(args.prototype,"r") desc = json.load(f) f.close() else: desc = mkdesc() desc['build_time'] = int(time.time()) if args.board_id != None: desc['board_id'] = int(args.board_id) if args.board_revision != None: desc['board_revision'] = int(args.board_revision) if args.version != None: desc['version'] = str(args.version) if args.summary != None: desc['summary'] = str(args.summary) if args.description != None: desc['description'] = str(args.description) if args.git_identity != None: cmd = " ".join(["git", "--git-dir", args.git_identity + "/.git", "describe", "--always", "--dirty"]) p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout desc['git_identity'] = str(p.read().strip()) p.close() if args.parameter_xml != None: f = open(args.parameter_xml, "rb") bytes = f.read() desc['parameter_xml_size'] = len(bytes) desc['parameter_xml'] = base64.b64encode(zlib.compress(bytes,9)).decode('utf-8') desc['mav_autopilot'] = 12 # 12 = MAV_AUTOPILOT_PX4 if args.airframe_xml != None: f = open(args.airframe_xml, "rb") bytes = f.read() desc['airframe_xml_size'] = len(bytes) desc['airframe_xml'] = base64.b64encode(zlib.compress(bytes,9)).decode('utf-8') if args.image != None: f = open(args.image, "rb") bytes = f.read() desc['image_size'] = len(bytes) desc['image'] = base64.b64encode(zlib.compress(bytes,9)).decode('utf-8') print(json.dumps(desc, indent=4))
gpl-3.0
InakiZabala/odoomrp-wip
purchase_secondary_unit/models/pricelist.py
21
2471
# -*- encoding: utf-8 -*- ############################################################################## # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # ############################################################################## from openerp import models, fields, api from openerp.addons import decimal_precision as dp class ProductPricelistItem(models.Model): _inherit = 'product.pricelist.item' @api.one @api.depends('product_id', 'product_tmpl_id') def _get_uop_id(self): if self.product_id: self.uop_id = self.product_id.uop_id elif self.product_tmpl_id: self.uop_id = self.product_tmpl_id.uop_id else: self.uop_id = False price_surcharge_uop = fields.Float( string='Price Surcharge for UoP', digits=dp.get_precision('Product Price'), help='Specify the fixed amount to add or substract (if negative) to' ' the amount calculated with the discount.') uop_id = fields.Many2one( comodel_name='product.uom', string='Unit of Purchase', compute=_get_uop_id, readonly=True) @api.onchange('price_surcharge') def onchange_price_surcharge(self): if self.product_id: self.price_surcharge_uop = ( self.price_surcharge / self.product_id.uop_coeff) elif self.product_tmpl_id: self.price_surcharge_uop = ( self.price_surcharge / self.product_tmpl_id.uop_coeff) @api.onchange('price_surcharge_uop') def onchange_price_surcharge_uop(self): if self.product_id: self.price_surcharge = ( self.price_surcharge_uop * self.product_id.uop_coeff) elif self.product_tmpl_id: self.price_surcharge = ( self.price_surcharge_uop * self.product_tmpl_id.uop_coeff)
agpl-3.0
inspyration/odoo
doc/_themes/odoodoc/sphinx_monkeypatch.py
24
3166
# -*- coding: utf-8 -*- import sphinx.roles import sphinx.environment from sphinx.writers.html import HTMLTranslator from docutils.writers.html4css1 import HTMLTranslator as DocutilsTranslator def patch(): # navify toctree (oh god) @monkey(sphinx.environment.BuildEnvironment) def resolve_toctree(old_resolve, self, *args, **kwargs): """ If navbar, bootstrapify TOC to yield a navbar """ navbar = kwargs.pop('navbar', None) toc = old_resolve(self, *args, **kwargs) if toc is None: return None navbarify(toc[0], navbar=navbar) return toc # monkeypatch visit_table to remove border and add .table HTMLTranslator.visit_table = visit_table # disable colspec crap HTMLTranslator.write_colspecs = lambda self: None # copy data- attributes straight from source to dest HTMLTranslator.starttag = starttag_data def navbarify(node, navbar=None): """ :param node: toctree node to navbarify :param navbar: Whether this toctree is a 'main' navbar, a 'side' navbar or not a navbar at all """ if navbar == 'side': for n in node.traverse(): if n.tagname == 'bullet_list': n['classes'].append('nav') elif navbar == 'main': # add classes to just toplevel node['classes'].extend(['nav', 'navbar-nav', 'navbar-right']) for list_item in node.children: # bullet_list # list_item # compact_paragraph # reference # bullet_list # list_item # compact_paragraph # reference # no bullet_list.list_item -> don't dropdownify if not list_item.children[1].children: return list_item['classes'].append('dropdown') # list_item.compact_paragraph.reference link = list_item.children[0].children[0] link['classes'].append('dropdown-toggle') link.attributes['data-toggle'] = 'dropdown' # list_item.bullet_list list_item.children[1]['classes'].append('dropdown-menu') def visit_table(self, node): """ * remove border * add table class """ self._table_row_index = 0 self.context.append(self.compact_p) self.compact_p = True classes = ' '.join({'table', self.settings.table_style}).strip() self.body.append(self.starttag(node, 'table', CLASS=classes)) def starttag_data(self, node, tagname, suffix='\n', empty=False, **attributes): attributes.update( (k, v) for k, v in node.attributes.iteritems() if k.startswith('data-') ) # oh dear return DocutilsTranslator.starttag( self, node, tagname, suffix=suffix, empty=empty, **attributes) class monkey(object): def __init__(self, obj): self.obj = obj def __call__(self, fn): name = fn.__name__ old = getattr(self.obj, name) setattr(self.obj, name, lambda self_, *args, **kwargs: \ fn(old, self_, *args, **kwargs))
agpl-3.0
vprime/puuuu
env/lib/python2.7/site-packages/django/core/management/base.py
104
15912
""" Base classes for writing management commands (named commands which can be executed through ``django-admin.py`` or ``manage.py``). """ from __future__ import unicode_literals import os import sys from optparse import make_option, OptionParser import django from django.core.exceptions import ImproperlyConfigured from django.core.management.color import color_style from django.utils.encoding import force_str from django.utils.six import StringIO class CommandError(Exception): """ Exception class indicating a problem while executing a management command. If this exception is raised during the execution of a management command, it will be caught and turned into a nicely-printed error message to the appropriate output stream (i.e., stderr); as a result, raising this exception (with a sensible description of the error) is the preferred way to indicate that something has gone wrong in the execution of a command. """ pass def handle_default_options(options): """ Include any default options that all commands should accept here so that ManagementUtility can handle them before searching for user commands. """ if options.settings: os.environ['DJANGO_SETTINGS_MODULE'] = options.settings if options.pythonpath: sys.path.insert(0, options.pythonpath) class OutputWrapper(object): """ Wrapper around stdout/stderr """ def __init__(self, out, style_func=None, ending='\n'): self._out = out self.style_func = None if hasattr(out, 'isatty') and out.isatty(): self.style_func = style_func self.ending = ending def __getattr__(self, name): return getattr(self._out, name) def write(self, msg, style_func=None, ending=None): ending = self.ending if ending is None else ending if ending and not msg.endswith(ending): msg += ending style_func = [f for f in (style_func, self.style_func, lambda x:x) if f is not None][0] self._out.write(force_str(style_func(msg))) class BaseCommand(object): """ The base class from which all management commands ultimately derive. Use this class if you want access to all of the mechanisms which parse the command-line arguments and work out what code to call in response; if you don't need to change any of that behavior, consider using one of the subclasses defined in this file. If you are interested in overriding/customizing various aspects of the command-parsing and -execution behavior, the normal flow works as follows: 1. ``django-admin.py`` or ``manage.py`` loads the command class and calls its ``run_from_argv()`` method. 2. The ``run_from_argv()`` method calls ``create_parser()`` to get an ``OptionParser`` for the arguments, parses them, performs any environment changes requested by options like ``pythonpath``, and then calls the ``execute()`` method, passing the parsed arguments. 3. The ``execute()`` method attempts to carry out the command by calling the ``handle()`` method with the parsed arguments; any output produced by ``handle()`` will be printed to standard output and, if the command is intended to produce a block of SQL statements, will be wrapped in ``BEGIN`` and ``COMMIT``. 4. If ``handle()`` or ``execute()`` raised any exception (e.g. ``CommandError``), ``run_from_argv()`` will instead print an error message to ``stderr``. Thus, the ``handle()`` method is typically the starting point for subclasses; many built-in commands and command types either place all of their logic in ``handle()``, or perform some additional parsing work in ``handle()`` and then delegate from it to more specialized methods as needed. Several attributes affect behavior at various steps along the way: ``args`` A string listing the arguments accepted by the command, suitable for use in help messages; e.g., a command which takes a list of application names might set this to '<appname appname ...>'. ``can_import_settings`` A boolean indicating whether the command needs to be able to import Django settings; if ``True``, ``execute()`` will verify that this is possible before proceeding. Default value is ``True``. ``help`` A short description of the command, which will be printed in help messages. ``option_list`` This is the list of ``optparse`` options which will be fed into the command's ``OptionParser`` for parsing arguments. ``output_transaction`` A boolean indicating whether the command outputs SQL statements; if ``True``, the output will automatically be wrapped with ``BEGIN;`` and ``COMMIT;``. Default value is ``False``. ``requires_model_validation`` A boolean; if ``True``, validation of installed models will be performed prior to executing the command. Default value is ``True``. To validate an individual application's models rather than all applications' models, call ``self.validate(app)`` from ``handle()``, where ``app`` is the application's Python module. ``leave_locale_alone`` A boolean indicating whether the locale set in settings should be preserved during the execution of the command instead of being forcibly set to 'en-us'. Default value is ``False``. Make sure you know what you are doing if you decide to change the value of this option in your custom command if it creates database content that is locale-sensitive and such content shouldn't contain any translations (like it happens e.g. with django.contrim.auth permissions) as making the locale differ from the de facto default 'en-us' might cause unintended effects. This option can't be False when the can_import_settings option is set to False too because attempting to set the locale needs access to settings. This condition will generate a CommandError. """ # Metadata about this command. option_list = ( make_option('-v', '--verbosity', action='store', dest='verbosity', default='1', type='choice', choices=['0', '1', '2', '3'], help='Verbosity level; 0=minimal output, 1=normal output, 2=verbose output, 3=very verbose output'), make_option('--settings', help='The Python path to a settings module, e.g. "myproject.settings.main". If this isn\'t provided, the DJANGO_SETTINGS_MODULE environment variable will be used.'), make_option('--pythonpath', help='A directory to add to the Python path, e.g. "/home/djangoprojects/myproject".'), make_option('--traceback', action='store_true', help='Raise on exception'), ) help = '' args = '' # Configuration shortcuts that alter various logic. can_import_settings = True requires_model_validation = True output_transaction = False # Whether to wrap the output in a "BEGIN; COMMIT;" leave_locale_alone = False def __init__(self): self.style = color_style() def get_version(self): """ Return the Django version, which should be correct for all built-in Django commands. User-supplied commands should override this method. """ return django.get_version() def usage(self, subcommand): """ Return a brief description of how to use this command, by default from the attribute ``self.help``. """ usage = '%%prog %s [options] %s' % (subcommand, self.args) if self.help: return '%s\n\n%s' % (usage, self.help) else: return usage def create_parser(self, prog_name, subcommand): """ Create and return the ``OptionParser`` which will be used to parse the arguments to this command. """ return OptionParser(prog=prog_name, usage=self.usage(subcommand), version=self.get_version(), option_list=self.option_list) def print_help(self, prog_name, subcommand): """ Print the help message for this command, derived from ``self.usage()``. """ parser = self.create_parser(prog_name, subcommand) parser.print_help() def run_from_argv(self, argv): """ Set up any environment changes requested (e.g., Python path and Django settings), then run this command. If the command raises a ``CommandError``, intercept it and print it sensibly to stderr. If the ``--traceback`` option is present or the raised ``Exception`` is not ``CommandError``, raise it. """ parser = self.create_parser(argv[0], argv[1]) options, args = parser.parse_args(argv[2:]) handle_default_options(options) try: self.execute(*args, **options.__dict__) except Exception as e: if options.traceback or not isinstance(e, CommandError): raise # self.stderr is not guaranteed to be set here stderr = getattr(self, 'stderr', OutputWrapper(sys.stderr, self.style.ERROR)) stderr.write('%s: %s' % (e.__class__.__name__, e)) sys.exit(1) def execute(self, *args, **options): """ Try to execute this command, performing model validation if needed (as controlled by the attribute ``self.requires_model_validation``, except if force-skipped). """ self.stdout = OutputWrapper(options.get('stdout', sys.stdout)) self.stderr = OutputWrapper(options.get('stderr', sys.stderr), self.style.ERROR) if self.can_import_settings: from django.conf import settings saved_locale = None if not self.leave_locale_alone: # Only mess with locales if we can assume we have a working # settings file, because django.utils.translation requires settings # (The final saying about whether the i18n machinery is active will be # found in the value of the USE_I18N setting) if not self.can_import_settings: raise CommandError("Incompatible values of 'leave_locale_alone' " "(%s) and 'can_import_settings' (%s) command " "options." % (self.leave_locale_alone, self.can_import_settings)) # Switch to US English, because django-admin.py creates database # content like permissions, and those shouldn't contain any # translations. from django.utils import translation saved_locale = translation.get_language() translation.activate('en-us') try: if self.requires_model_validation and not options.get('skip_validation'): self.validate() output = self.handle(*args, **options) if output: if self.output_transaction: # This needs to be imported here, because it relies on # settings. from django.db import connections, DEFAULT_DB_ALIAS connection = connections[options.get('database', DEFAULT_DB_ALIAS)] if connection.ops.start_transaction_sql(): self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql())) self.stdout.write(output) if self.output_transaction: self.stdout.write('\n' + self.style.SQL_KEYWORD("COMMIT;")) finally: if saved_locale is not None: translation.activate(saved_locale) def validate(self, app=None, display_num_errors=False): """ Validates the given app, raising CommandError for any errors. If app is None, then this will validate all installed apps. """ from django.core.management.validation import get_validation_errors s = StringIO() num_errors = get_validation_errors(s, app) if num_errors: s.seek(0) error_text = s.read() raise CommandError("One or more models did not validate:\n%s" % error_text) if display_num_errors: self.stdout.write("%s error%s found" % (num_errors, '' if num_errors == 1 else 's')) def handle(self, *args, **options): """ The actual logic of the command. Subclasses must implement this method. """ raise NotImplementedError() class AppCommand(BaseCommand): """ A management command which takes one or more installed application names as arguments, and does something with each of them. Rather than implementing ``handle()``, subclasses must implement ``handle_app()``, which will be called once for each application. """ args = '<appname appname ...>' def handle(self, *app_labels, **options): from django.db import models if not app_labels: raise CommandError('Enter at least one appname.') try: app_list = [models.get_app(app_label) for app_label in app_labels] except (ImproperlyConfigured, ImportError) as e: raise CommandError("%s. Are you sure your INSTALLED_APPS setting is correct?" % e) output = [] for app in app_list: app_output = self.handle_app(app, **options) if app_output: output.append(app_output) return '\n'.join(output) def handle_app(self, app, **options): """ Perform the command's actions for ``app``, which will be the Python module corresponding to an application name given on the command line. """ raise NotImplementedError() class LabelCommand(BaseCommand): """ A management command which takes one or more arbitrary arguments (labels) on the command line, and does something with each of them. Rather than implementing ``handle()``, subclasses must implement ``handle_label()``, which will be called once for each label. If the arguments should be names of installed applications, use ``AppCommand`` instead. """ args = '<label label ...>' label = 'label' def handle(self, *labels, **options): if not labels: raise CommandError('Enter at least one %s.' % self.label) output = [] for label in labels: label_output = self.handle_label(label, **options) if label_output: output.append(label_output) return '\n'.join(output) def handle_label(self, label, **options): """ Perform the command's actions for ``label``, which will be the string as given on the command line. """ raise NotImplementedError() class NoArgsCommand(BaseCommand): """ A command which takes no arguments on the command line. Rather than implementing ``handle()``, subclasses must implement ``handle_noargs()``; ``handle()`` itself is overridden to ensure no arguments are passed to the command. Attempting to pass arguments will raise ``CommandError``. """ args = '' def handle(self, *args, **options): if args: raise CommandError("Command doesn't accept any arguments") return self.handle_noargs(**options) def handle_noargs(self, **options): """ Perform this command's actions. """ raise NotImplementedError()
mit
mcgachey/edx-platform
common/djangoapps/track/migrations/0001_initial.py
189
2527
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'TrackingLog' db.create_table('track_trackinglog', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('dtcreated', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)), ('username', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)), ('ip', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)), ('event_source', self.gf('django.db.models.fields.CharField')(max_length=32)), ('event_type', self.gf('django.db.models.fields.CharField')(max_length=32, blank=True)), ('event', self.gf('django.db.models.fields.TextField')(blank=True)), ('agent', self.gf('django.db.models.fields.CharField')(max_length=256, blank=True)), ('page', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)), ('time', self.gf('django.db.models.fields.DateTimeField')()), )) db.send_create_signal('track', ['TrackingLog']) def backwards(self, orm): # Deleting model 'TrackingLog' db.delete_table('track_trackinglog') models = { 'track.trackinglog': { 'Meta': {'object_name': 'TrackingLog'}, 'agent': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}), 'dtcreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'event': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'event_source': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'event_type': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'ip': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}), 'page': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}), 'time': ('django.db.models.fields.DateTimeField', [], {}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}) } } complete_apps = ['track']
agpl-3.0
datapythonista/pandas
pandas/tests/util/test_validate_args_and_kwargs.py
8
2391
import pytest from pandas.util._validators import validate_args_and_kwargs _fname = "func" def test_invalid_total_length_max_length_one(): compat_args = ("foo",) kwargs = {"foo": "FOO"} args = ("FoO", "BaZ") min_fname_arg_count = 0 max_length = len(compat_args) + min_fname_arg_count actual_length = len(kwargs) + len(args) + min_fname_arg_count msg = ( fr"{_fname}\(\) takes at most {max_length} " fr"argument \({actual_length} given\)" ) with pytest.raises(TypeError, match=msg): validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args) def test_invalid_total_length_max_length_multiple(): compat_args = ("foo", "bar", "baz") kwargs = {"foo": "FOO", "bar": "BAR"} args = ("FoO", "BaZ") min_fname_arg_count = 2 max_length = len(compat_args) + min_fname_arg_count actual_length = len(kwargs) + len(args) + min_fname_arg_count msg = ( fr"{_fname}\(\) takes at most {max_length} " fr"arguments \({actual_length} given\)" ) with pytest.raises(TypeError, match=msg): validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args) @pytest.mark.parametrize("args,kwargs", [((), {"foo": -5, "bar": 2}), ((-5, 2), {})]) def test_missing_args_or_kwargs(args, kwargs): bad_arg = "bar" min_fname_arg_count = 2 compat_args = {"foo": -5, bad_arg: 1} msg = ( fr"the '{bad_arg}' parameter is not supported " fr"in the pandas implementation of {_fname}\(\)" ) with pytest.raises(ValueError, match=msg): validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args) def test_duplicate_argument(): min_fname_arg_count = 2 compat_args = {"foo": None, "bar": None, "baz": None} kwargs = {"foo": None, "bar": None} args = (None,) # duplicate value for "foo" msg = fr"{_fname}\(\) got multiple values for keyword argument 'foo'" with pytest.raises(TypeError, match=msg): validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args) def test_validation(): # No exceptions should be raised. compat_args = {"foo": 1, "bar": None, "baz": -2} kwargs = {"baz": -2} args = (1, None) min_fname_arg_count = 2 validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)
bsd-3-clause
nanobox-io/git
contrib/hooks/multimail/git_multimail.py
186
110172
#! /usr/bin/env python2 # Copyright (c) 2015 Matthieu Moy and others # Copyright (c) 2012-2014 Michael Haggerty and others # Derived from contrib/hooks/post-receive-email, which is # Copyright (c) 2007 Andy Parkins # and also includes contributions by other authors. # # This file is part of git-multimail. # # git-multimail is free software: you can redistribute it and/or # modify it under the terms of the GNU General Public License version # 2 as published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see # <http://www.gnu.org/licenses/>. """Generate notification emails for pushes to a git repository. This hook sends emails describing changes introduced by pushes to a git repository. For each reference that was changed, it emits one ReferenceChange email summarizing how the reference was changed, followed by one Revision email for each new commit that was introduced by the reference change. Each commit is announced in exactly one Revision email. If the same commit is merged into another branch in the same or a later push, then the ReferenceChange email will list the commit's SHA1 and its one-line summary, but no new Revision email will be generated. This script is designed to be used as a "post-receive" hook in a git repository (see githooks(5)). It can also be used as an "update" script, but this usage is not completely reliable and is deprecated. To help with debugging, this script accepts a --stdout option, which causes the emails to be written to standard output rather than sent using sendmail. See the accompanying README file for the complete documentation. """ import sys import os import re import bisect import socket import subprocess import shlex import optparse import smtplib import time try: from email.utils import make_msgid from email.utils import getaddresses from email.utils import formataddr from email.utils import formatdate from email.header import Header except ImportError: # Prior to Python 2.5, the email module used different names: from email.Utils import make_msgid from email.Utils import getaddresses from email.Utils import formataddr from email.Utils import formatdate from email.Header import Header DEBUG = False ZEROS = '0' * 40 LOGBEGIN = '- Log -----------------------------------------------------------------\n' LOGEND = '-----------------------------------------------------------------------\n' ADDR_HEADERS = set(['from', 'to', 'cc', 'bcc', 'reply-to', 'sender']) # It is assumed in many places that the encoding is uniformly UTF-8, # so changing these constants is unsupported. But define them here # anyway, to make it easier to find (at least most of) the places # where the encoding is important. (ENCODING, CHARSET) = ('UTF-8', 'utf-8') REF_CREATED_SUBJECT_TEMPLATE = ( '%(emailprefix)s%(refname_type)s %(short_refname)s created' ' (now %(newrev_short)s)' ) REF_UPDATED_SUBJECT_TEMPLATE = ( '%(emailprefix)s%(refname_type)s %(short_refname)s updated' ' (%(oldrev_short)s -> %(newrev_short)s)' ) REF_DELETED_SUBJECT_TEMPLATE = ( '%(emailprefix)s%(refname_type)s %(short_refname)s deleted' ' (was %(oldrev_short)s)' ) COMBINED_REFCHANGE_REVISION_SUBJECT_TEMPLATE = ( '%(emailprefix)s%(refname_type)s %(short_refname)s updated: %(oneline)s' ) REFCHANGE_HEADER_TEMPLATE = """\ Date: %(send_date)s To: %(recipients)s Subject: %(subject)s MIME-Version: 1.0 Content-Type: text/plain; charset=%(charset)s Content-Transfer-Encoding: 8bit Message-ID: %(msgid)s From: %(fromaddr)s Reply-To: %(reply_to)s X-Git-Host: %(fqdn)s X-Git-Repo: %(repo_shortname)s X-Git-Refname: %(refname)s X-Git-Reftype: %(refname_type)s X-Git-Oldrev: %(oldrev)s X-Git-Newrev: %(newrev)s Auto-Submitted: auto-generated """ REFCHANGE_INTRO_TEMPLATE = """\ This is an automated email from the git hooks/post-receive script. %(pusher)s pushed a change to %(refname_type)s %(short_refname)s in repository %(repo_shortname)s. """ FOOTER_TEMPLATE = """\ -- \n\ To stop receiving notification emails like this one, please contact %(administrator)s. """ REWIND_ONLY_TEMPLATE = """\ This update removed existing revisions from the reference, leaving the reference pointing at a previous point in the repository history. * -- * -- N %(refname)s (%(newrev_short)s) \\ O -- O -- O (%(oldrev_short)s) Any revisions marked "omits" are not gone; other references still refer to them. Any revisions marked "discards" are gone forever. """ NON_FF_TEMPLATE = """\ This update added new revisions after undoing existing revisions. That is to say, some revisions that were in the old version of the %(refname_type)s are not in the new version. This situation occurs when a user --force pushes a change and generates a repository containing something like this: * -- * -- B -- O -- O -- O (%(oldrev_short)s) \\ N -- N -- N %(refname)s (%(newrev_short)s) You should already have received notification emails for all of the O revisions, and so the following emails describe only the N revisions from the common base, B. Any revisions marked "omits" are not gone; other references still refer to them. Any revisions marked "discards" are gone forever. """ NO_NEW_REVISIONS_TEMPLATE = """\ No new revisions were added by this update. """ DISCARDED_REVISIONS_TEMPLATE = """\ This change permanently discards the following revisions: """ NO_DISCARDED_REVISIONS_TEMPLATE = """\ The revisions that were on this %(refname_type)s are still contained in other references; therefore, this change does not discard any commits from the repository. """ NEW_REVISIONS_TEMPLATE = """\ The %(tot)s revisions listed above as "new" are entirely new to this repository and will be described in separate emails. The revisions listed as "adds" were already present in the repository and have only been added to this reference. """ TAG_CREATED_TEMPLATE = """\ at %(newrev_short)-9s (%(newrev_type)s) """ TAG_UPDATED_TEMPLATE = """\ *** WARNING: tag %(short_refname)s was modified! *** from %(oldrev_short)-9s (%(oldrev_type)s) to %(newrev_short)-9s (%(newrev_type)s) """ TAG_DELETED_TEMPLATE = """\ *** WARNING: tag %(short_refname)s was deleted! *** """ # The template used in summary tables. It looks best if this uses the # same alignment as TAG_CREATED_TEMPLATE and TAG_UPDATED_TEMPLATE. BRIEF_SUMMARY_TEMPLATE = """\ %(action)10s %(rev_short)-9s %(text)s """ NON_COMMIT_UPDATE_TEMPLATE = """\ This is an unusual reference change because the reference did not refer to a commit either before or after the change. We do not know how to provide full information about this reference change. """ REVISION_HEADER_TEMPLATE = """\ Date: %(send_date)s To: %(recipients)s Cc: %(cc_recipients)s Subject: %(emailprefix)s%(num)02d/%(tot)02d: %(oneline)s MIME-Version: 1.0 Content-Type: text/plain; charset=%(charset)s Content-Transfer-Encoding: 8bit From: %(fromaddr)s Reply-To: %(reply_to)s In-Reply-To: %(reply_to_msgid)s References: %(reply_to_msgid)s X-Git-Host: %(fqdn)s X-Git-Repo: %(repo_shortname)s X-Git-Refname: %(refname)s X-Git-Reftype: %(refname_type)s X-Git-Rev: %(rev)s Auto-Submitted: auto-generated """ REVISION_INTRO_TEMPLATE = """\ This is an automated email from the git hooks/post-receive script. %(pusher)s pushed a commit to %(refname_type)s %(short_refname)s in repository %(repo_shortname)s. """ REVISION_FOOTER_TEMPLATE = FOOTER_TEMPLATE # Combined, meaning refchange+revision email (for single-commit additions) COMBINED_HEADER_TEMPLATE = """\ Date: %(send_date)s To: %(recipients)s Subject: %(subject)s MIME-Version: 1.0 Content-Type: text/plain; charset=%(charset)s Content-Transfer-Encoding: 8bit Message-ID: %(msgid)s From: %(fromaddr)s Reply-To: %(reply_to)s X-Git-Host: %(fqdn)s X-Git-Repo: %(repo_shortname)s X-Git-Refname: %(refname)s X-Git-Reftype: %(refname_type)s X-Git-Oldrev: %(oldrev)s X-Git-Newrev: %(newrev)s X-Git-Rev: %(rev)s Auto-Submitted: auto-generated """ COMBINED_INTRO_TEMPLATE = """\ This is an automated email from the git hooks/post-receive script. %(pusher)s pushed a commit to %(refname_type)s %(short_refname)s in repository %(repo_shortname)s. """ COMBINED_FOOTER_TEMPLATE = FOOTER_TEMPLATE class CommandError(Exception): def __init__(self, cmd, retcode): self.cmd = cmd self.retcode = retcode Exception.__init__( self, 'Command "%s" failed with retcode %s' % (' '.join(cmd), retcode,) ) class ConfigurationException(Exception): pass # The "git" program (this could be changed to include a full path): GIT_EXECUTABLE = 'git' # How "git" should be invoked (including global arguments), as a list # of words. This variable is usually initialized automatically by # read_git_output() via choose_git_command(), but if a value is set # here then it will be used unconditionally. GIT_CMD = None def choose_git_command(): """Decide how to invoke git, and record the choice in GIT_CMD.""" global GIT_CMD if GIT_CMD is None: try: # Check to see whether the "-c" option is accepted (it was # only added in Git 1.7.2). We don't actually use the # output of "git --version", though if we needed more # specific version information this would be the place to # do it. cmd = [GIT_EXECUTABLE, '-c', 'foo.bar=baz', '--version'] read_output(cmd) GIT_CMD = [GIT_EXECUTABLE, '-c', 'i18n.logoutputencoding=%s' % (ENCODING,)] except CommandError: GIT_CMD = [GIT_EXECUTABLE] def read_git_output(args, input=None, keepends=False, **kw): """Read the output of a Git command.""" if GIT_CMD is None: choose_git_command() return read_output(GIT_CMD + args, input=input, keepends=keepends, **kw) def read_output(cmd, input=None, keepends=False, **kw): if input: stdin = subprocess.PIPE else: stdin = None p = subprocess.Popen( cmd, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kw ) (out, err) = p.communicate(input) retcode = p.wait() if retcode: raise CommandError(cmd, retcode) if not keepends: out = out.rstrip('\n\r') return out def read_git_lines(args, keepends=False, **kw): """Return the lines output by Git command. Return as single lines, with newlines stripped off.""" return read_git_output(args, keepends=True, **kw).splitlines(keepends) def git_rev_list_ish(cmd, spec, args=None, **kw): """Common functionality for invoking a 'git rev-list'-like command. Parameters: * cmd is the Git command to run, e.g., 'rev-list' or 'log'. * spec is a list of revision arguments to pass to the named command. If None, this function returns an empty list. * args is a list of extra arguments passed to the named command. * All other keyword arguments (if any) are passed to the underlying read_git_lines() function. Return the output of the Git command in the form of a list, one entry per output line. """ if spec is None: return [] if args is None: args = [] args = [cmd, '--stdin'] + args spec_stdin = ''.join(s + '\n' for s in spec) return read_git_lines(args, input=spec_stdin, **kw) def git_rev_list(spec, **kw): """Run 'git rev-list' with the given list of revision arguments. See git_rev_list_ish() for parameter and return value documentation. """ return git_rev_list_ish('rev-list', spec, **kw) def git_log(spec, **kw): """Run 'git log' with the given list of revision arguments. See git_rev_list_ish() for parameter and return value documentation. """ return git_rev_list_ish('log', spec, **kw) def header_encode(text, header_name=None): """Encode and line-wrap the value of an email header field.""" try: if isinstance(text, str): text = text.decode(ENCODING, 'replace') return Header(text, header_name=header_name).encode() except UnicodeEncodeError: return Header(text, header_name=header_name, charset=CHARSET, errors='replace').encode() def addr_header_encode(text, header_name=None): """Encode and line-wrap the value of an email header field containing email addresses.""" return Header( ', '.join( formataddr((header_encode(name), emailaddr)) for name, emailaddr in getaddresses([text]) ), header_name=header_name ).encode() class Config(object): def __init__(self, section, git_config=None): """Represent a section of the git configuration. If git_config is specified, it is passed to "git config" in the GIT_CONFIG environment variable, meaning that "git config" will read the specified path rather than the Git default config paths.""" self.section = section if git_config: self.env = os.environ.copy() self.env['GIT_CONFIG'] = git_config else: self.env = None @staticmethod def _split(s): """Split NUL-terminated values.""" words = s.split('\0') assert words[-1] == '' return words[:-1] def get(self, name, default=None): try: values = self._split(read_git_output( ['config', '--get', '--null', '%s.%s' % (self.section, name)], env=self.env, keepends=True, )) assert len(values) == 1 return values[0] except CommandError: return default def get_bool(self, name, default=None): try: value = read_git_output( ['config', '--get', '--bool', '%s.%s' % (self.section, name)], env=self.env, ) except CommandError: return default return value == 'true' def get_all(self, name, default=None): """Read a (possibly multivalued) setting from the configuration. Return the result as a list of values, or default if the name is unset.""" try: return self._split(read_git_output( ['config', '--get-all', '--null', '%s.%s' % (self.section, name)], env=self.env, keepends=True, )) except CommandError, e: if e.retcode == 1: # "the section or key is invalid"; i.e., there is no # value for the specified key. return default else: raise def get_recipients(self, name, default=None): """Read a recipients list from the configuration. Return the result as a comma-separated list of email addresses, or default if the option is unset. If the setting has multiple values, concatenate them with comma separators.""" lines = self.get_all(name, default=None) if lines is None: return default return ', '.join(line.strip() for line in lines) def set(self, name, value): read_git_output( ['config', '%s.%s' % (self.section, name), value], env=self.env, ) def add(self, name, value): read_git_output( ['config', '--add', '%s.%s' % (self.section, name), value], env=self.env, ) def __contains__(self, name): return self.get_all(name, default=None) is not None # We don't use this method anymore internally, but keep it here in # case somebody is calling it from their own code: def has_key(self, name): return name in self def unset_all(self, name): try: read_git_output( ['config', '--unset-all', '%s.%s' % (self.section, name)], env=self.env, ) except CommandError, e: if e.retcode == 5: # The name doesn't exist, which is what we wanted anyway... pass else: raise def set_recipients(self, name, value): self.unset_all(name) for pair in getaddresses([value]): self.add(name, formataddr(pair)) def generate_summaries(*log_args): """Generate a brief summary for each revision requested. log_args are strings that will be passed directly to "git log" as revision selectors. Iterate over (sha1_short, subject) for each commit specified by log_args (subject is the first line of the commit message as a string without EOLs).""" cmd = [ 'log', '--abbrev', '--format=%h %s', ] + list(log_args) + ['--'] for line in read_git_lines(cmd): yield tuple(line.split(' ', 1)) def limit_lines(lines, max_lines): for (index, line) in enumerate(lines): if index < max_lines: yield line if index >= max_lines: yield '... %d lines suppressed ...\n' % (index + 1 - max_lines,) def limit_linelength(lines, max_linelength): for line in lines: # Don't forget that lines always include a trailing newline. if len(line) > max_linelength + 1: line = line[:max_linelength - 7] + ' [...]\n' yield line class CommitSet(object): """A (constant) set of object names. The set should be initialized with full SHA1 object names. The __contains__() method returns True iff its argument is an abbreviation of any the names in the set.""" def __init__(self, names): self._names = sorted(names) def __len__(self): return len(self._names) def __contains__(self, sha1_abbrev): """Return True iff this set contains sha1_abbrev (which might be abbreviated).""" i = bisect.bisect_left(self._names, sha1_abbrev) return i < len(self) and self._names[i].startswith(sha1_abbrev) class GitObject(object): def __init__(self, sha1, type=None): if sha1 == ZEROS: self.sha1 = self.type = self.commit_sha1 = None else: self.sha1 = sha1 self.type = type or read_git_output(['cat-file', '-t', self.sha1]) if self.type == 'commit': self.commit_sha1 = self.sha1 elif self.type == 'tag': try: self.commit_sha1 = read_git_output( ['rev-parse', '--verify', '%s^0' % (self.sha1,)] ) except CommandError: # Cannot deref tag to determine commit_sha1 self.commit_sha1 = None else: self.commit_sha1 = None self.short = read_git_output(['rev-parse', '--short', sha1]) def get_summary(self): """Return (sha1_short, subject) for this commit.""" if not self.sha1: raise ValueError('Empty commit has no summary') return iter(generate_summaries('--no-walk', self.sha1)).next() def __eq__(self, other): return isinstance(other, GitObject) and self.sha1 == other.sha1 def __hash__(self): return hash(self.sha1) def __nonzero__(self): return bool(self.sha1) def __str__(self): return self.sha1 or ZEROS class Change(object): """A Change that has been made to the Git repository. Abstract class from which both Revisions and ReferenceChanges are derived. A Change knows how to generate a notification email describing itself.""" def __init__(self, environment): self.environment = environment self._values = None def _compute_values(self): """Return a dictionary {keyword: expansion} for this Change. Derived classes overload this method to add more entries to the return value. This method is used internally by get_values(). The return value should always be a new dictionary.""" return self.environment.get_values() def get_values(self, **extra_values): """Return a dictionary {keyword: expansion} for this Change. Return a dictionary mapping keywords to the values that they should be expanded to for this Change (used when interpolating template strings). If any keyword arguments are supplied, add those to the return value as well. The return value is always a new dictionary.""" if self._values is None: self._values = self._compute_values() values = self._values.copy() if extra_values: values.update(extra_values) return values def expand(self, template, **extra_values): """Expand template. Expand the template (which should be a string) using string interpolation of the values for this Change. If any keyword arguments are provided, also include those in the keywords available for interpolation.""" return template % self.get_values(**extra_values) def expand_lines(self, template, **extra_values): """Break template into lines and expand each line.""" values = self.get_values(**extra_values) for line in template.splitlines(True): yield line % values def expand_header_lines(self, template, **extra_values): """Break template into lines and expand each line as an RFC 2822 header. Encode values and split up lines that are too long. Silently skip lines that contain references to unknown variables.""" values = self.get_values(**extra_values) for line in template.splitlines(): (name, value) = line.split(':', 1) try: value = value % values except KeyError, e: if DEBUG: self.environment.log_warning( 'Warning: unknown variable %r in the following line; line skipped:\n' ' %s\n' % (e.args[0], line,) ) else: if name.lower() in ADDR_HEADERS: value = addr_header_encode(value, name) else: value = header_encode(value, name) for splitline in ('%s: %s\n' % (name, value)).splitlines(True): yield splitline def generate_email_header(self): """Generate the RFC 2822 email headers for this Change, a line at a time. The output should not include the trailing blank line.""" raise NotImplementedError() def generate_email_intro(self): """Generate the email intro for this Change, a line at a time. The output will be used as the standard boilerplate at the top of the email body.""" raise NotImplementedError() def generate_email_body(self): """Generate the main part of the email body, a line at a time. The text in the body might be truncated after a specified number of lines (see multimailhook.emailmaxlines).""" raise NotImplementedError() def generate_email_footer(self): """Generate the footer of the email, a line at a time. The footer is always included, irrespective of multimailhook.emailmaxlines.""" raise NotImplementedError() def generate_email(self, push, body_filter=None, extra_header_values={}): """Generate an email describing this change. Iterate over the lines (including the header lines) of an email describing this change. If body_filter is not None, then use it to filter the lines that are intended for the email body. The extra_header_values field is received as a dict and not as **kwargs, to allow passing other keyword arguments in the future (e.g. passing extra values to generate_email_intro()""" for line in self.generate_email_header(**extra_header_values): yield line yield '\n' for line in self.generate_email_intro(): yield line body = self.generate_email_body(push) if body_filter is not None: body = body_filter(body) for line in body: yield line for line in self.generate_email_footer(): yield line class Revision(Change): """A Change consisting of a single git commit.""" CC_RE = re.compile(r'^\s*C[Cc]:\s*(?P<to>[^#]+@[^\s#]*)\s*(#.*)?$') def __init__(self, reference_change, rev, num, tot): Change.__init__(self, reference_change.environment) self.reference_change = reference_change self.rev = rev self.change_type = self.reference_change.change_type self.refname = self.reference_change.refname self.num = num self.tot = tot self.author = read_git_output(['log', '--no-walk', '--format=%aN <%aE>', self.rev.sha1]) self.recipients = self.environment.get_revision_recipients(self) self.cc_recipients = '' if self.environment.get_scancommitforcc(): self.cc_recipients = ', '.join(to.strip() for to in self._cc_recipients()) if self.cc_recipients: self.environment.log_msg( 'Add %s to CC for %s\n' % (self.cc_recipients, self.rev.sha1)) def _cc_recipients(self): cc_recipients = [] message = read_git_output(['log', '--no-walk', '--format=%b', self.rev.sha1]) lines = message.strip().split('\n') for line in lines: m = re.match(self.CC_RE, line) if m: cc_recipients.append(m.group('to')) return cc_recipients def _compute_values(self): values = Change._compute_values(self) oneline = read_git_output( ['log', '--format=%s', '--no-walk', self.rev.sha1] ) values['rev'] = self.rev.sha1 values['rev_short'] = self.rev.short values['change_type'] = self.change_type values['refname'] = self.refname values['short_refname'] = self.reference_change.short_refname values['refname_type'] = self.reference_change.refname_type values['reply_to_msgid'] = self.reference_change.msgid values['num'] = self.num values['tot'] = self.tot values['recipients'] = self.recipients if self.cc_recipients: values['cc_recipients'] = self.cc_recipients values['oneline'] = oneline values['author'] = self.author reply_to = self.environment.get_reply_to_commit(self) if reply_to: values['reply_to'] = reply_to return values def generate_email_header(self, **extra_values): for line in self.expand_header_lines( REVISION_HEADER_TEMPLATE, **extra_values ): yield line def generate_email_intro(self): for line in self.expand_lines(REVISION_INTRO_TEMPLATE): yield line def generate_email_body(self, push): """Show this revision.""" return read_git_lines( ['log'] + self.environment.commitlogopts + ['-1', self.rev.sha1], keepends=True, ) def generate_email_footer(self): return self.expand_lines(REVISION_FOOTER_TEMPLATE) class ReferenceChange(Change): """A Change to a Git reference. An abstract class representing a create, update, or delete of a Git reference. Derived classes handle specific types of reference (e.g., tags vs. branches). These classes generate the main reference change email summarizing the reference change and whether it caused any any commits to be added or removed. ReferenceChange objects are usually created using the static create() method, which has the logic to decide which derived class to instantiate.""" REF_RE = re.compile(r'^refs\/(?P<area>[^\/]+)\/(?P<shortname>.*)$') @staticmethod def create(environment, oldrev, newrev, refname): """Return a ReferenceChange object representing the change. Return an object that represents the type of change that is being made. oldrev and newrev should be SHA1s or ZEROS.""" old = GitObject(oldrev) new = GitObject(newrev) rev = new or old # The revision type tells us what type the commit is, combined with # the location of the ref we can decide between # - working branch # - tracking branch # - unannotated tag # - annotated tag m = ReferenceChange.REF_RE.match(refname) if m: area = m.group('area') short_refname = m.group('shortname') else: area = '' short_refname = refname if rev.type == 'tag': # Annotated tag: klass = AnnotatedTagChange elif rev.type == 'commit': if area == 'tags': # Non-annotated tag: klass = NonAnnotatedTagChange elif area == 'heads': # Branch: klass = BranchChange elif area == 'remotes': # Tracking branch: environment.log_warning( '*** Push-update of tracking branch %r\n' '*** - incomplete email generated.\n' % (refname,) ) klass = OtherReferenceChange else: # Some other reference namespace: environment.log_warning( '*** Push-update of strange reference %r\n' '*** - incomplete email generated.\n' % (refname,) ) klass = OtherReferenceChange else: # Anything else (is there anything else?) environment.log_warning( '*** Unknown type of update to %r (%s)\n' '*** - incomplete email generated.\n' % (refname, rev.type,) ) klass = OtherReferenceChange return klass( environment, refname=refname, short_refname=short_refname, old=old, new=new, rev=rev, ) def __init__(self, environment, refname, short_refname, old, new, rev): Change.__init__(self, environment) self.change_type = { (False, True): 'create', (True, True): 'update', (True, False): 'delete', }[bool(old), bool(new)] self.refname = refname self.short_refname = short_refname self.old = old self.new = new self.rev = rev self.msgid = make_msgid() self.diffopts = environment.diffopts self.graphopts = environment.graphopts self.logopts = environment.logopts self.commitlogopts = environment.commitlogopts self.showgraph = environment.refchange_showgraph self.showlog = environment.refchange_showlog self.header_template = REFCHANGE_HEADER_TEMPLATE self.intro_template = REFCHANGE_INTRO_TEMPLATE self.footer_template = FOOTER_TEMPLATE def _compute_values(self): values = Change._compute_values(self) values['change_type'] = self.change_type values['refname_type'] = self.refname_type values['refname'] = self.refname values['short_refname'] = self.short_refname values['msgid'] = self.msgid values['recipients'] = self.recipients values['oldrev'] = str(self.old) values['oldrev_short'] = self.old.short values['newrev'] = str(self.new) values['newrev_short'] = self.new.short if self.old: values['oldrev_type'] = self.old.type if self.new: values['newrev_type'] = self.new.type reply_to = self.environment.get_reply_to_refchange(self) if reply_to: values['reply_to'] = reply_to return values def send_single_combined_email(self, known_added_sha1s): """Determine if a combined refchange/revision email should be sent If there is only a single new (non-merge) commit added by a change, it is useful to combine the ReferenceChange and Revision emails into one. In such a case, return the single revision; otherwise, return None. This method is overridden in BranchChange.""" return None def generate_combined_email(self, push, revision, body_filter=None, extra_header_values={}): """Generate an email describing this change AND specified revision. Iterate over the lines (including the header lines) of an email describing this change. If body_filter is not None, then use it to filter the lines that are intended for the email body. The extra_header_values field is received as a dict and not as **kwargs, to allow passing other keyword arguments in the future (e.g. passing extra values to generate_email_intro() This method is overridden in BranchChange.""" raise NotImplementedError def get_subject(self): template = { 'create': REF_CREATED_SUBJECT_TEMPLATE, 'update': REF_UPDATED_SUBJECT_TEMPLATE, 'delete': REF_DELETED_SUBJECT_TEMPLATE, }[self.change_type] return self.expand(template) def generate_email_header(self, **extra_values): if 'subject' not in extra_values: extra_values['subject'] = self.get_subject() for line in self.expand_header_lines( self.header_template, **extra_values ): yield line def generate_email_intro(self): for line in self.expand_lines(self.intro_template): yield line def generate_email_body(self, push): """Call the appropriate body-generation routine. Call one of generate_create_summary() / generate_update_summary() / generate_delete_summary().""" change_summary = { 'create': self.generate_create_summary, 'delete': self.generate_delete_summary, 'update': self.generate_update_summary, }[self.change_type](push) for line in change_summary: yield line for line in self.generate_revision_change_summary(push): yield line def generate_email_footer(self): return self.expand_lines(self.footer_template) def generate_revision_change_graph(self, push): if self.showgraph: args = ['--graph'] + self.graphopts for newold in ('new', 'old'): has_newold = False spec = push.get_commits_spec(newold, self) for line in git_log(spec, args=args, keepends=True): if not has_newold: has_newold = True yield '\n' yield 'Graph of %s commits:\n\n' % ( {'new': 'new', 'old': 'discarded'}[newold],) yield ' ' + line if has_newold: yield '\n' def generate_revision_change_log(self, new_commits_list): if self.showlog: yield '\n' yield 'Detailed log of new commits:\n\n' for line in read_git_lines( ['log', '--no-walk'] + self.logopts + new_commits_list + ['--'], keepends=True, ): yield line def generate_new_revision_summary(self, tot, new_commits_list, push): for line in self.expand_lines(NEW_REVISIONS_TEMPLATE, tot=tot): yield line for line in self.generate_revision_change_graph(push): yield line for line in self.generate_revision_change_log(new_commits_list): yield line def generate_revision_change_summary(self, push): """Generate a summary of the revisions added/removed by this change.""" if self.new.commit_sha1 and not self.old.commit_sha1: # A new reference was created. List the new revisions # brought by the new reference (i.e., those revisions that # were not in the repository before this reference # change). sha1s = list(push.get_new_commits(self)) sha1s.reverse() tot = len(sha1s) new_revisions = [ Revision(self, GitObject(sha1), num=i + 1, tot=tot) for (i, sha1) in enumerate(sha1s) ] if new_revisions: yield self.expand('This %(refname_type)s includes the following new commits:\n') yield '\n' for r in new_revisions: (sha1, subject) = r.rev.get_summary() yield r.expand( BRIEF_SUMMARY_TEMPLATE, action='new', text=subject, ) yield '\n' for line in self.generate_new_revision_summary( tot, [r.rev.sha1 for r in new_revisions], push): yield line else: for line in self.expand_lines(NO_NEW_REVISIONS_TEMPLATE): yield line elif self.new.commit_sha1 and self.old.commit_sha1: # A reference was changed to point at a different commit. # List the revisions that were removed and/or added *from # that reference* by this reference change, along with a # diff between the trees for its old and new values. # List of the revisions that were added to the branch by # this update. Note this list can include revisions that # have already had notification emails; we want such # revisions in the summary even though we will not send # new notification emails for them. adds = list(generate_summaries( '--topo-order', '--reverse', '%s..%s' % (self.old.commit_sha1, self.new.commit_sha1,) )) # List of the revisions that were removed from the branch # by this update. This will be empty except for # non-fast-forward updates. discards = list(generate_summaries( '%s..%s' % (self.new.commit_sha1, self.old.commit_sha1,) )) if adds: new_commits_list = push.get_new_commits(self) else: new_commits_list = [] new_commits = CommitSet(new_commits_list) if discards: discarded_commits = CommitSet(push.get_discarded_commits(self)) else: discarded_commits = CommitSet([]) if discards and adds: for (sha1, subject) in discards: if sha1 in discarded_commits: action = 'discards' else: action = 'omits' yield self.expand( BRIEF_SUMMARY_TEMPLATE, action=action, rev_short=sha1, text=subject, ) for (sha1, subject) in adds: if sha1 in new_commits: action = 'new' else: action = 'adds' yield self.expand( BRIEF_SUMMARY_TEMPLATE, action=action, rev_short=sha1, text=subject, ) yield '\n' for line in self.expand_lines(NON_FF_TEMPLATE): yield line elif discards: for (sha1, subject) in discards: if sha1 in discarded_commits: action = 'discards' else: action = 'omits' yield self.expand( BRIEF_SUMMARY_TEMPLATE, action=action, rev_short=sha1, text=subject, ) yield '\n' for line in self.expand_lines(REWIND_ONLY_TEMPLATE): yield line elif adds: (sha1, subject) = self.old.get_summary() yield self.expand( BRIEF_SUMMARY_TEMPLATE, action='from', rev_short=sha1, text=subject, ) for (sha1, subject) in adds: if sha1 in new_commits: action = 'new' else: action = 'adds' yield self.expand( BRIEF_SUMMARY_TEMPLATE, action=action, rev_short=sha1, text=subject, ) yield '\n' if new_commits: for line in self.generate_new_revision_summary( len(new_commits), new_commits_list, push): yield line else: for line in self.expand_lines(NO_NEW_REVISIONS_TEMPLATE): yield line for line in self.generate_revision_change_graph(push): yield line # The diffstat is shown from the old revision to the new # revision. This is to show the truth of what happened in # this change. There's no point showing the stat from the # base to the new revision because the base is effectively a # random revision at this point - the user will be interested # in what this revision changed - including the undoing of # previous revisions in the case of non-fast-forward updates. yield '\n' yield 'Summary of changes:\n' for line in read_git_lines( ['diff-tree'] + self.diffopts + ['%s..%s' % (self.old.commit_sha1, self.new.commit_sha1,)], keepends=True, ): yield line elif self.old.commit_sha1 and not self.new.commit_sha1: # A reference was deleted. List the revisions that were # removed from the repository by this reference change. sha1s = list(push.get_discarded_commits(self)) tot = len(sha1s) discarded_revisions = [ Revision(self, GitObject(sha1), num=i + 1, tot=tot) for (i, sha1) in enumerate(sha1s) ] if discarded_revisions: for line in self.expand_lines(DISCARDED_REVISIONS_TEMPLATE): yield line yield '\n' for r in discarded_revisions: (sha1, subject) = r.rev.get_summary() yield r.expand( BRIEF_SUMMARY_TEMPLATE, action='discards', text=subject, ) for line in self.generate_revision_change_graph(push): yield line else: for line in self.expand_lines(NO_DISCARDED_REVISIONS_TEMPLATE): yield line elif not self.old.commit_sha1 and not self.new.commit_sha1: for line in self.expand_lines(NON_COMMIT_UPDATE_TEMPLATE): yield line def generate_create_summary(self, push): """Called for the creation of a reference.""" # This is a new reference and so oldrev is not valid (sha1, subject) = self.new.get_summary() yield self.expand( BRIEF_SUMMARY_TEMPLATE, action='at', rev_short=sha1, text=subject, ) yield '\n' def generate_update_summary(self, push): """Called for the change of a pre-existing branch.""" return iter([]) def generate_delete_summary(self, push): """Called for the deletion of any type of reference.""" (sha1, subject) = self.old.get_summary() yield self.expand( BRIEF_SUMMARY_TEMPLATE, action='was', rev_short=sha1, text=subject, ) yield '\n' class BranchChange(ReferenceChange): refname_type = 'branch' def __init__(self, environment, refname, short_refname, old, new, rev): ReferenceChange.__init__( self, environment, refname=refname, short_refname=short_refname, old=old, new=new, rev=rev, ) self.recipients = environment.get_refchange_recipients(self) self._single_revision = None def send_single_combined_email(self, known_added_sha1s): if not self.environment.combine_when_single_commit: return None # In the sadly-all-too-frequent usecase of people pushing only # one of their commits at a time to a repository, users feel # the reference change summary emails are noise rather than # important signal. This is because, in this particular # usecase, there is a reference change summary email for each # new commit, and all these summaries do is point out that # there is one new commit (which can readily be inferred by # the existence of the individual revision email that is also # sent). In such cases, our users prefer there to be a combined # reference change summary/new revision email. # # So, if the change is an update and it doesn't discard any # commits, and it adds exactly one non-merge commit (gerrit # forces a workflow where every commit is individually merged # and the git-multimail hook fired off for just this one # change), then we send a combined refchange/revision email. try: # If this change is a reference update that doesn't discard # any commits... if self.change_type != 'update': return None if read_git_lines( ['merge-base', self.old.sha1, self.new.sha1] ) != [self.old.sha1]: return None # Check if this update introduced exactly one non-merge # commit: def split_line(line): """Split line into (sha1, [parent,...]).""" words = line.split() return (words[0], words[1:]) # Get the new commits introduced by the push as a list of # (sha1, [parent,...]) new_commits = [ split_line(line) for line in read_git_lines( [ 'log', '-3', '--format=%H %P', '%s..%s' % (self.old.sha1, self.new.sha1), ] ) ] if not new_commits: return None # If the newest commit is a merge, save it for a later check # but otherwise ignore it merge = None tot = len(new_commits) if len(new_commits[0][1]) > 1: merge = new_commits[0][0] del new_commits[0] # Our primary check: we can't combine if more than one commit # is introduced. We also currently only combine if the new # commit is a non-merge commit, though it may make sense to # combine if it is a merge as well. if not ( len(new_commits) == 1 and len(new_commits[0][1]) == 1 and new_commits[0][0] in known_added_sha1s ): return None # We do not want to combine revision and refchange emails if # those go to separate locations. rev = Revision(self, GitObject(new_commits[0][0]), 1, tot) if rev.recipients != self.recipients: return None # We ignored the newest commit if it was just a merge of the one # commit being introduced. But we don't want to ignore that # merge commit it it involved conflict resolutions. Check that. if merge and merge != read_git_output(['diff-tree', '--cc', merge]): return None # We can combine the refchange and one new revision emails # into one. Return the Revision that a combined email should # be sent about. return rev except CommandError: # Cannot determine number of commits in old..new or new..old; # don't combine reference/revision emails: return None def generate_combined_email(self, push, revision, body_filter=None, extra_header_values={}): values = revision.get_values() if extra_header_values: values.update(extra_header_values) if 'subject' not in extra_header_values: values['subject'] = self.expand(COMBINED_REFCHANGE_REVISION_SUBJECT_TEMPLATE, **values) self._single_revision = revision self.header_template = COMBINED_HEADER_TEMPLATE self.intro_template = COMBINED_INTRO_TEMPLATE self.footer_template = COMBINED_FOOTER_TEMPLATE for line in self.generate_email(push, body_filter, values): yield line def generate_email_body(self, push): '''Call the appropriate body generation routine. If this is a combined refchange/revision email, the special logic for handling this combined email comes from this function. For other cases, we just use the normal handling.''' # If self._single_revision isn't set; don't override if not self._single_revision: for line in super(BranchChange, self).generate_email_body(push): yield line return # This is a combined refchange/revision email; we first provide # some info from the refchange portion, and then call the revision # generate_email_body function to handle the revision portion. adds = list(generate_summaries( '--topo-order', '--reverse', '%s..%s' % (self.old.commit_sha1, self.new.commit_sha1,) )) yield self.expand("The following commit(s) were added to %(refname)s by this push:\n") for (sha1, subject) in adds: yield self.expand( BRIEF_SUMMARY_TEMPLATE, action='new', rev_short=sha1, text=subject, ) yield self._single_revision.rev.short + " is described below\n" yield '\n' for line in self._single_revision.generate_email_body(push): yield line class AnnotatedTagChange(ReferenceChange): refname_type = 'annotated tag' def __init__(self, environment, refname, short_refname, old, new, rev): ReferenceChange.__init__( self, environment, refname=refname, short_refname=short_refname, old=old, new=new, rev=rev, ) self.recipients = environment.get_announce_recipients(self) self.show_shortlog = environment.announce_show_shortlog ANNOTATED_TAG_FORMAT = ( '%(*objectname)\n' '%(*objecttype)\n' '%(taggername)\n' '%(taggerdate)' ) def describe_tag(self, push): """Describe the new value of an annotated tag.""" # Use git for-each-ref to pull out the individual fields from # the tag [tagobject, tagtype, tagger, tagged] = read_git_lines( ['for-each-ref', '--format=%s' % (self.ANNOTATED_TAG_FORMAT,), self.refname], ) yield self.expand( BRIEF_SUMMARY_TEMPLATE, action='tagging', rev_short=tagobject, text='(%s)' % (tagtype,), ) if tagtype == 'commit': # If the tagged object is a commit, then we assume this is a # release, and so we calculate which tag this tag is # replacing try: prevtag = read_git_output(['describe', '--abbrev=0', '%s^' % (self.new,)]) except CommandError: prevtag = None if prevtag: yield ' replaces %s\n' % (prevtag,) else: prevtag = None yield ' length %s bytes\n' % (read_git_output(['cat-file', '-s', tagobject]),) yield ' tagged by %s\n' % (tagger,) yield ' on %s\n' % (tagged,) yield '\n' # Show the content of the tag message; this might contain a # change log or release notes so is worth displaying. yield LOGBEGIN contents = list(read_git_lines(['cat-file', 'tag', self.new.sha1], keepends=True)) contents = contents[contents.index('\n') + 1:] if contents and contents[-1][-1:] != '\n': contents.append('\n') for line in contents: yield line if self.show_shortlog and tagtype == 'commit': # Only commit tags make sense to have rev-list operations # performed on them yield '\n' if prevtag: # Show changes since the previous release revlist = read_git_output( ['rev-list', '--pretty=short', '%s..%s' % (prevtag, self.new,)], keepends=True, ) else: # No previous tag, show all the changes since time # began revlist = read_git_output( ['rev-list', '--pretty=short', '%s' % (self.new,)], keepends=True, ) for line in read_git_lines(['shortlog'], input=revlist, keepends=True): yield line yield LOGEND yield '\n' def generate_create_summary(self, push): """Called for the creation of an annotated tag.""" for line in self.expand_lines(TAG_CREATED_TEMPLATE): yield line for line in self.describe_tag(push): yield line def generate_update_summary(self, push): """Called for the update of an annotated tag. This is probably a rare event and may not even be allowed.""" for line in self.expand_lines(TAG_UPDATED_TEMPLATE): yield line for line in self.describe_tag(push): yield line def generate_delete_summary(self, push): """Called when a non-annotated reference is updated.""" for line in self.expand_lines(TAG_DELETED_TEMPLATE): yield line yield self.expand(' tag was %(oldrev_short)s\n') yield '\n' class NonAnnotatedTagChange(ReferenceChange): refname_type = 'tag' def __init__(self, environment, refname, short_refname, old, new, rev): ReferenceChange.__init__( self, environment, refname=refname, short_refname=short_refname, old=old, new=new, rev=rev, ) self.recipients = environment.get_refchange_recipients(self) def generate_create_summary(self, push): """Called for the creation of an annotated tag.""" for line in self.expand_lines(TAG_CREATED_TEMPLATE): yield line def generate_update_summary(self, push): """Called when a non-annotated reference is updated.""" for line in self.expand_lines(TAG_UPDATED_TEMPLATE): yield line def generate_delete_summary(self, push): """Called when a non-annotated reference is updated.""" for line in self.expand_lines(TAG_DELETED_TEMPLATE): yield line for line in ReferenceChange.generate_delete_summary(self, push): yield line class OtherReferenceChange(ReferenceChange): refname_type = 'reference' def __init__(self, environment, refname, short_refname, old, new, rev): # We use the full refname as short_refname, because otherwise # the full name of the reference would not be obvious from the # text of the email. ReferenceChange.__init__( self, environment, refname=refname, short_refname=refname, old=old, new=new, rev=rev, ) self.recipients = environment.get_refchange_recipients(self) class Mailer(object): """An object that can send emails.""" def send(self, lines, to_addrs): """Send an email consisting of lines. lines must be an iterable over the lines constituting the header and body of the email. to_addrs is a list of recipient addresses (can be needed even if lines already contains a "To:" field). It can be either a string (comma-separated list of email addresses) or a Python list of individual email addresses. """ raise NotImplementedError() class SendMailer(Mailer): """Send emails using 'sendmail -oi -t'.""" SENDMAIL_CANDIDATES = [ '/usr/sbin/sendmail', '/usr/lib/sendmail', ] @staticmethod def find_sendmail(): for path in SendMailer.SENDMAIL_CANDIDATES: if os.access(path, os.X_OK): return path else: raise ConfigurationException( 'No sendmail executable found. ' 'Try setting multimailhook.sendmailCommand.' ) def __init__(self, command=None, envelopesender=None): """Construct a SendMailer instance. command should be the command and arguments used to invoke sendmail, as a list of strings. If an envelopesender is provided, it will also be passed to the command, via '-f envelopesender'.""" if command: self.command = command[:] else: self.command = [self.find_sendmail(), '-oi', '-t'] if envelopesender: self.command.extend(['-f', envelopesender]) def send(self, lines, to_addrs): try: p = subprocess.Popen(self.command, stdin=subprocess.PIPE) except OSError, e: sys.stderr.write( '*** Cannot execute command: %s\n' % ' '.join(self.command) + '*** %s\n' % str(e) + '*** Try setting multimailhook.mailer to "smtp"\n' '*** to send emails without using the sendmail command.\n' ) sys.exit(1) try: p.stdin.writelines(lines) except Exception, e: sys.stderr.write( '*** Error while generating commit email\n' '*** - mail sending aborted.\n' ) try: # subprocess.terminate() is not available in Python 2.4 p.terminate() except AttributeError: pass raise e else: p.stdin.close() retcode = p.wait() if retcode: raise CommandError(self.command, retcode) class SMTPMailer(Mailer): """Send emails using Python's smtplib.""" def __init__(self, envelopesender, smtpserver, smtpservertimeout=10.0, smtpserverdebuglevel=0, smtpencryption='none', smtpuser='', smtppass='', ): if not envelopesender: sys.stderr.write( 'fatal: git_multimail: cannot use SMTPMailer without a sender address.\n' 'please set either multimailhook.envelopeSender or user.email\n' ) sys.exit(1) if smtpencryption == 'ssl' and not (smtpuser and smtppass): raise ConfigurationException( 'Cannot use SMTPMailer with security option ssl ' 'without options username and password.' ) self.envelopesender = envelopesender self.smtpserver = smtpserver self.smtpservertimeout = smtpservertimeout self.smtpserverdebuglevel = smtpserverdebuglevel self.security = smtpencryption self.username = smtpuser self.password = smtppass try: def call(klass, server, timeout): try: return klass(server, timeout=timeout) except TypeError: # Old Python versions do not have timeout= argument. return klass(server) if self.security == 'none': self.smtp = call(smtplib.SMTP, self.smtpserver, timeout=self.smtpservertimeout) elif self.security == 'ssl': self.smtp = call(smtplib.SMTP_SSL, self.smtpserver, timeout=self.smtpservertimeout) elif self.security == 'tls': if ':' not in self.smtpserver: self.smtpserver += ':587' # default port for TLS self.smtp = call(smtplib.SMTP, self.smtpserver, timeout=self.smtpservertimeout) self.smtp.ehlo() self.smtp.starttls() self.smtp.ehlo() else: sys.stdout.write('*** Error: Control reached an invalid option. ***') sys.exit(1) if self.smtpserverdebuglevel > 0: sys.stdout.write( "*** Setting debug on for SMTP server connection (%s) ***\n" % self.smtpserverdebuglevel) self.smtp.set_debuglevel(self.smtpserverdebuglevel) except Exception, e: sys.stderr.write( '*** Error establishing SMTP connection to %s ***\n' % self.smtpserver) sys.stderr.write('*** %s\n' % str(e)) sys.exit(1) def __del__(self): if hasattr(self, 'smtp'): self.smtp.quit() def send(self, lines, to_addrs): try: if self.username or self.password: sys.stderr.write("*** Authenticating as %s ***\n" % self.username) self.smtp.login(self.username, self.password) msg = ''.join(lines) # turn comma-separated list into Python list if needed. if isinstance(to_addrs, basestring): to_addrs = [email for (name, email) in getaddresses([to_addrs])] self.smtp.sendmail(self.envelopesender, to_addrs, msg) except Exception, e: sys.stderr.write('*** Error sending email ***\n') sys.stderr.write('*** %s\n' % str(e)) self.smtp.quit() sys.exit(1) class OutputMailer(Mailer): """Write emails to an output stream, bracketed by lines of '=' characters. This is intended for debugging purposes.""" SEPARATOR = '=' * 75 + '\n' def __init__(self, f): self.f = f def send(self, lines, to_addrs): self.f.write(self.SEPARATOR) self.f.writelines(lines) self.f.write(self.SEPARATOR) def get_git_dir(): """Determine GIT_DIR. Determine GIT_DIR either from the GIT_DIR environment variable or from the working directory, using Git's usual rules.""" try: return read_git_output(['rev-parse', '--git-dir']) except CommandError: sys.stderr.write('fatal: git_multimail: not in a git directory\n') sys.exit(1) class Environment(object): """Describes the environment in which the push is occurring. An Environment object encapsulates information about the local environment. For example, it knows how to determine: * the name of the repository to which the push occurred * what user did the push * what users want to be informed about various types of changes. An Environment object is expected to have the following methods: get_repo_shortname() Return a short name for the repository, for display purposes. get_repo_path() Return the absolute path to the Git repository. get_emailprefix() Return a string that will be prefixed to every email's subject. get_pusher() Return the username of the person who pushed the changes. This value is used in the email body to indicate who pushed the change. get_pusher_email() (may return None) Return the email address of the person who pushed the changes. The value should be a single RFC 2822 email address as a string; e.g., "Joe User <[email protected]>" if available, otherwise "[email protected]". If set, the value is used as the Reply-To address for refchange emails. If it is impossible to determine the pusher's email, this attribute should be set to None (in which case no Reply-To header will be output). get_sender() Return the address to be used as the 'From' email address in the email envelope. get_fromaddr() Return the 'From' email address used in the email 'From:' headers. (May be a full RFC 2822 email address like 'Joe User <[email protected]>'.) get_administrator() Return the name and/or email of the repository administrator. This value is used in the footer as the person to whom requests to be removed from the notification list should be sent. Ideally, it should include a valid email address. get_reply_to_refchange() get_reply_to_commit() Return the address to use in the email "Reply-To" header, as a string. These can be an RFC 2822 email address, or None to omit the "Reply-To" header. get_reply_to_refchange() is used for refchange emails; get_reply_to_commit() is used for individual commit emails. They should also define the following attributes: announce_show_shortlog (bool) True iff announce emails should include a shortlog. refchange_showgraph (bool) True iff refchanges emails should include a detailed graph. refchange_showlog (bool) True iff refchanges emails should include a detailed log. diffopts (list of strings) The options that should be passed to 'git diff' for the summary email. The value should be a list of strings representing words to be passed to the command. graphopts (list of strings) Analogous to diffopts, but contains options passed to 'git log --graph' when generating the detailed graph for a set of commits (see refchange_showgraph) logopts (list of strings) Analogous to diffopts, but contains options passed to 'git log' when generating the detailed log for a set of commits (see refchange_showlog) commitlogopts (list of strings) The options that should be passed to 'git log' for each commit mail. The value should be a list of strings representing words to be passed to the command. quiet (bool) On success do not write to stderr stdout (bool) Write email to stdout rather than emailing. Useful for debugging combine_when_single_commit (bool) True if a combined email should be produced when a single new commit is pushed to a branch, False otherwise. """ REPO_NAME_RE = re.compile(r'^(?P<name>.+?)(?:\.git)$') def __init__(self, osenv=None): self.osenv = osenv or os.environ self.announce_show_shortlog = False self.maxcommitemails = 500 self.diffopts = ['--stat', '--summary', '--find-copies-harder'] self.graphopts = ['--oneline', '--decorate'] self.logopts = [] self.refchange_showgraph = False self.refchange_showlog = False self.commitlogopts = ['-C', '--stat', '-p', '--cc'] self.quiet = False self.stdout = False self.combine_when_single_commit = True self.COMPUTED_KEYS = [ 'administrator', 'charset', 'emailprefix', 'fromaddr', 'pusher', 'pusher_email', 'repo_path', 'repo_shortname', 'sender', ] self._values = None def get_repo_shortname(self): """Use the last part of the repo path, with ".git" stripped off if present.""" basename = os.path.basename(os.path.abspath(self.get_repo_path())) m = self.REPO_NAME_RE.match(basename) if m: return m.group('name') else: return basename def get_pusher(self): raise NotImplementedError() def get_pusher_email(self): return None def get_fromaddr(self): config = Config('user') fromname = config.get('name', default='') fromemail = config.get('email', default='') if fromemail: return formataddr([fromname, fromemail]) return self.get_sender() def get_administrator(self): return 'the administrator of this repository' def get_emailprefix(self): return '' def get_repo_path(self): if read_git_output(['rev-parse', '--is-bare-repository']) == 'true': path = get_git_dir() else: path = read_git_output(['rev-parse', '--show-toplevel']) return os.path.abspath(path) def get_charset(self): return CHARSET def get_values(self): """Return a dictionary {keyword: expansion} for this Environment. This method is called by Change._compute_values(). The keys in the returned dictionary are available to be used in any of the templates. The dictionary is created by calling self.get_NAME() for each of the attributes named in COMPUTED_KEYS and recording those that do not return None. The return value is always a new dictionary.""" if self._values is None: values = {} for key in self.COMPUTED_KEYS: value = getattr(self, 'get_%s' % (key,))() if value is not None: values[key] = value self._values = values return self._values.copy() def get_refchange_recipients(self, refchange): """Return the recipients for notifications about refchange. Return the list of email addresses to which notifications about the specified ReferenceChange should be sent.""" raise NotImplementedError() def get_announce_recipients(self, annotated_tag_change): """Return the recipients for notifications about annotated_tag_change. Return the list of email addresses to which notifications about the specified AnnotatedTagChange should be sent.""" raise NotImplementedError() def get_reply_to_refchange(self, refchange): return self.get_pusher_email() def get_revision_recipients(self, revision): """Return the recipients for messages about revision. Return the list of email addresses to which notifications about the specified Revision should be sent. This method could be overridden, for example, to take into account the contents of the revision when deciding whom to notify about it. For example, there could be a scheme for users to express interest in particular files or subdirectories, and only receive notification emails for revisions that affecting those files.""" raise NotImplementedError() def get_reply_to_commit(self, revision): return revision.author def filter_body(self, lines): """Filter the lines intended for an email body. lines is an iterable over the lines that would go into the email body. Filter it (e.g., limit the number of lines, the line length, character set, etc.), returning another iterable. See FilterLinesEnvironmentMixin and MaxlinesEnvironmentMixin for classes implementing this functionality.""" return lines def log_msg(self, msg): """Write the string msg on a log file or on stderr. Sends the text to stderr by default, override to change the behavior.""" sys.stderr.write(msg) def log_warning(self, msg): """Write the string msg on a log file or on stderr. Sends the text to stderr by default, override to change the behavior.""" sys.stderr.write(msg) def log_error(self, msg): """Write the string msg on a log file or on stderr. Sends the text to stderr by default, override to change the behavior.""" sys.stderr.write(msg) class ConfigEnvironmentMixin(Environment): """A mixin that sets self.config to its constructor's config argument. This class's constructor consumes the "config" argument. Mixins that need to inspect the config should inherit from this class (1) to make sure that "config" is still in the constructor arguments with its own constructor runs and/or (2) to be sure that self.config is set after construction.""" def __init__(self, config, **kw): super(ConfigEnvironmentMixin, self).__init__(**kw) self.config = config class ConfigOptionsEnvironmentMixin(ConfigEnvironmentMixin): """An Environment that reads most of its information from "git config".""" def __init__(self, config, **kw): super(ConfigOptionsEnvironmentMixin, self).__init__( config=config, **kw ) for var, cfg in ( ('announce_show_shortlog', 'announceshortlog'), ('refchange_showgraph', 'refchangeShowGraph'), ('refchange_showlog', 'refchangeshowlog'), ('quiet', 'quiet'), ('stdout', 'stdout'), ): val = config.get_bool(cfg) if val is not None: setattr(self, var, val) maxcommitemails = config.get('maxcommitemails') if maxcommitemails is not None: try: self.maxcommitemails = int(maxcommitemails) except ValueError: self.log_warning( '*** Malformed value for multimailhook.maxCommitEmails: %s\n' % maxcommitemails + '*** Expected a number. Ignoring.\n' ) diffopts = config.get('diffopts') if diffopts is not None: self.diffopts = shlex.split(diffopts) graphopts = config.get('graphOpts') if graphopts is not None: self.graphopts = shlex.split(graphopts) logopts = config.get('logopts') if logopts is not None: self.logopts = shlex.split(logopts) commitlogopts = config.get('commitlogopts') if commitlogopts is not None: self.commitlogopts = shlex.split(commitlogopts) reply_to = config.get('replyTo') self.__reply_to_refchange = config.get('replyToRefchange', default=reply_to) if ( self.__reply_to_refchange is not None and self.__reply_to_refchange.lower() == 'author' ): raise ConfigurationException( '"author" is not an allowed setting for replyToRefchange' ) self.__reply_to_commit = config.get('replyToCommit', default=reply_to) combine = config.get_bool('combineWhenSingleCommit') if combine is not None: self.combine_when_single_commit = combine def get_administrator(self): return ( self.config.get('administrator') or self.get_sender() or super(ConfigOptionsEnvironmentMixin, self).get_administrator() ) def get_repo_shortname(self): return ( self.config.get('reponame') or super(ConfigOptionsEnvironmentMixin, self).get_repo_shortname() ) def get_emailprefix(self): emailprefix = self.config.get('emailprefix') if emailprefix is not None: emailprefix = emailprefix.strip() if emailprefix: return emailprefix + ' ' else: return '' else: return '[%s] ' % (self.get_repo_shortname(),) def get_sender(self): return self.config.get('envelopesender') def get_fromaddr(self): fromaddr = self.config.get('from') if fromaddr: return fromaddr return super(ConfigOptionsEnvironmentMixin, self).get_fromaddr() def get_reply_to_refchange(self, refchange): if self.__reply_to_refchange is None: return super(ConfigOptionsEnvironmentMixin, self).get_reply_to_refchange(refchange) elif self.__reply_to_refchange.lower() == 'pusher': return self.get_pusher_email() elif self.__reply_to_refchange.lower() == 'none': return None else: return self.__reply_to_refchange def get_reply_to_commit(self, revision): if self.__reply_to_commit is None: return super(ConfigOptionsEnvironmentMixin, self).get_reply_to_commit(revision) elif self.__reply_to_commit.lower() == 'author': return revision.author elif self.__reply_to_commit.lower() == 'pusher': return self.get_pusher_email() elif self.__reply_to_commit.lower() == 'none': return None else: return self.__reply_to_commit def get_scancommitforcc(self): return self.config.get('scancommitforcc') class FilterLinesEnvironmentMixin(Environment): """Handle encoding and maximum line length of body lines. emailmaxlinelength (int or None) The maximum length of any single line in the email body. Longer lines are truncated at that length with ' [...]' appended. strict_utf8 (bool) If this field is set to True, then the email body text is expected to be UTF-8. Any invalid characters are converted to U+FFFD, the Unicode replacement character (encoded as UTF-8, of course). """ def __init__(self, strict_utf8=True, emailmaxlinelength=500, **kw): super(FilterLinesEnvironmentMixin, self).__init__(**kw) self.__strict_utf8 = strict_utf8 self.__emailmaxlinelength = emailmaxlinelength def filter_body(self, lines): lines = super(FilterLinesEnvironmentMixin, self).filter_body(lines) if self.__strict_utf8: lines = (line.decode(ENCODING, 'replace') for line in lines) # Limit the line length in Unicode-space to avoid # splitting characters: if self.__emailmaxlinelength: lines = limit_linelength(lines, self.__emailmaxlinelength) lines = (line.encode(ENCODING, 'replace') for line in lines) elif self.__emailmaxlinelength: lines = limit_linelength(lines, self.__emailmaxlinelength) return lines class ConfigFilterLinesEnvironmentMixin( ConfigEnvironmentMixin, FilterLinesEnvironmentMixin, ): """Handle encoding and maximum line length based on config.""" def __init__(self, config, **kw): strict_utf8 = config.get_bool('emailstrictutf8', default=None) if strict_utf8 is not None: kw['strict_utf8'] = strict_utf8 emailmaxlinelength = config.get('emailmaxlinelength') if emailmaxlinelength is not None: kw['emailmaxlinelength'] = int(emailmaxlinelength) super(ConfigFilterLinesEnvironmentMixin, self).__init__( config=config, **kw ) class MaxlinesEnvironmentMixin(Environment): """Limit the email body to a specified number of lines.""" def __init__(self, emailmaxlines, **kw): super(MaxlinesEnvironmentMixin, self).__init__(**kw) self.__emailmaxlines = emailmaxlines def filter_body(self, lines): lines = super(MaxlinesEnvironmentMixin, self).filter_body(lines) if self.__emailmaxlines: lines = limit_lines(lines, self.__emailmaxlines) return lines class ConfigMaxlinesEnvironmentMixin( ConfigEnvironmentMixin, MaxlinesEnvironmentMixin, ): """Limit the email body to the number of lines specified in config.""" def __init__(self, config, **kw): emailmaxlines = int(config.get('emailmaxlines', default='0')) super(ConfigMaxlinesEnvironmentMixin, self).__init__( config=config, emailmaxlines=emailmaxlines, **kw ) class FQDNEnvironmentMixin(Environment): """A mixin that sets the host's FQDN to its constructor argument.""" def __init__(self, fqdn, **kw): super(FQDNEnvironmentMixin, self).__init__(**kw) self.COMPUTED_KEYS += ['fqdn'] self.__fqdn = fqdn def get_fqdn(self): """Return the fully-qualified domain name for this host. Return None if it is unavailable or unwanted.""" return self.__fqdn class ConfigFQDNEnvironmentMixin( ConfigEnvironmentMixin, FQDNEnvironmentMixin, ): """Read the FQDN from the config.""" def __init__(self, config, **kw): fqdn = config.get('fqdn') super(ConfigFQDNEnvironmentMixin, self).__init__( config=config, fqdn=fqdn, **kw ) class ComputeFQDNEnvironmentMixin(FQDNEnvironmentMixin): """Get the FQDN by calling socket.getfqdn().""" def __init__(self, **kw): super(ComputeFQDNEnvironmentMixin, self).__init__( fqdn=socket.getfqdn(), **kw ) class PusherDomainEnvironmentMixin(ConfigEnvironmentMixin): """Deduce pusher_email from pusher by appending an emaildomain.""" def __init__(self, **kw): super(PusherDomainEnvironmentMixin, self).__init__(**kw) self.__emaildomain = self.config.get('emaildomain') def get_pusher_email(self): if self.__emaildomain: # Derive the pusher's full email address in the default way: return '%s@%s' % (self.get_pusher(), self.__emaildomain) else: return super(PusherDomainEnvironmentMixin, self).get_pusher_email() class StaticRecipientsEnvironmentMixin(Environment): """Set recipients statically based on constructor parameters.""" def __init__( self, refchange_recipients, announce_recipients, revision_recipients, scancommitforcc, **kw ): super(StaticRecipientsEnvironmentMixin, self).__init__(**kw) # The recipients for various types of notification emails, as # RFC 2822 email addresses separated by commas (or the empty # string if no recipients are configured). Although there is # a mechanism to choose the recipient lists based on on the # actual *contents* of the change being reported, we only # choose based on the *type* of the change. Therefore we can # compute them once and for all: if not (refchange_recipients or announce_recipients or revision_recipients or scancommitforcc): raise ConfigurationException('No email recipients configured!') self.__refchange_recipients = refchange_recipients self.__announce_recipients = announce_recipients self.__revision_recipients = revision_recipients def get_refchange_recipients(self, refchange): return self.__refchange_recipients def get_announce_recipients(self, annotated_tag_change): return self.__announce_recipients def get_revision_recipients(self, revision): return self.__revision_recipients class ConfigRecipientsEnvironmentMixin( ConfigEnvironmentMixin, StaticRecipientsEnvironmentMixin ): """Determine recipients statically based on config.""" def __init__(self, config, **kw): super(ConfigRecipientsEnvironmentMixin, self).__init__( config=config, refchange_recipients=self._get_recipients( config, 'refchangelist', 'mailinglist', ), announce_recipients=self._get_recipients( config, 'announcelist', 'refchangelist', 'mailinglist', ), revision_recipients=self._get_recipients( config, 'commitlist', 'mailinglist', ), scancommitforcc=config.get('scancommitforcc'), **kw ) def _get_recipients(self, config, *names): """Return the recipients for a particular type of message. Return the list of email addresses to which a particular type of notification email should be sent, by looking at the config value for "multimailhook.$name" for each of names. Use the value from the first name that is configured. The return value is a (possibly empty) string containing RFC 2822 email addresses separated by commas. If no configuration could be found, raise a ConfigurationException.""" for name in names: retval = config.get_recipients(name) if retval is not None: return retval else: return '' class ProjectdescEnvironmentMixin(Environment): """Make a "projectdesc" value available for templates. By default, it is set to the first line of $GIT_DIR/description (if that file is present and appears to be set meaningfully).""" def __init__(self, **kw): super(ProjectdescEnvironmentMixin, self).__init__(**kw) self.COMPUTED_KEYS += ['projectdesc'] def get_projectdesc(self): """Return a one-line descripition of the project.""" git_dir = get_git_dir() try: projectdesc = open(os.path.join(git_dir, 'description')).readline().strip() if projectdesc and not projectdesc.startswith('Unnamed repository'): return projectdesc except IOError: pass return 'UNNAMED PROJECT' class GenericEnvironmentMixin(Environment): def get_pusher(self): return self.osenv.get('USER', self.osenv.get('USERNAME', 'unknown user')) class GenericEnvironment( ProjectdescEnvironmentMixin, ConfigMaxlinesEnvironmentMixin, ComputeFQDNEnvironmentMixin, ConfigFilterLinesEnvironmentMixin, ConfigRecipientsEnvironmentMixin, PusherDomainEnvironmentMixin, ConfigOptionsEnvironmentMixin, GenericEnvironmentMixin, Environment, ): pass class GitoliteEnvironmentMixin(Environment): def get_repo_shortname(self): # The gitolite environment variable $GL_REPO is a pretty good # repo_shortname (though it's probably not as good as a value # the user might have explicitly put in his config). return ( self.osenv.get('GL_REPO', None) or super(GitoliteEnvironmentMixin, self).get_repo_shortname() ) def get_pusher(self): return self.osenv.get('GL_USER', 'unknown user') def get_fromaddr(self): GL_USER = self.osenv.get('GL_USER') if GL_USER is not None: # Find the path to gitolite.conf. Note that gitolite v3 # did away with the GL_ADMINDIR and GL_CONF environment # variables (they are now hard-coded). GL_ADMINDIR = self.osenv.get( 'GL_ADMINDIR', os.path.expanduser(os.path.join('~', '.gitolite'))) GL_CONF = self.osenv.get( 'GL_CONF', os.path.join(GL_ADMINDIR, 'conf', 'gitolite.conf')) if os.path.isfile(GL_CONF): f = open(GL_CONF, 'rU') try: in_user_emails_section = False re_template = r'^\s*#\s*{}\s*$' re_begin, re_user, re_end = ( re.compile(re_template.format(x)) for x in ( r'BEGIN\s+USER\s+EMAILS', re.escape(GL_USER) + r'\s+(.*)', r'END\s+USER\s+EMAILS', )) for l in f: l = l.rstrip('\n') if not in_user_emails_section: if re_begin.match(l): in_user_emails_section = True continue if re_end.match(l): break m = re_user.match(l) if m: return m.group(1) finally: f.close() return super(GitoliteEnvironmentMixin, self).get_fromaddr() class IncrementalDateTime(object): """Simple wrapper to give incremental date/times. Each call will result in a date/time a second later than the previous call. This can be used to falsify email headers, to increase the likelihood that email clients sort the emails correctly.""" def __init__(self): self.time = time.time() def next(self): formatted = formatdate(self.time, True) self.time += 1 return formatted class GitoliteEnvironment( ProjectdescEnvironmentMixin, ConfigMaxlinesEnvironmentMixin, ComputeFQDNEnvironmentMixin, ConfigFilterLinesEnvironmentMixin, ConfigRecipientsEnvironmentMixin, PusherDomainEnvironmentMixin, ConfigOptionsEnvironmentMixin, GitoliteEnvironmentMixin, Environment, ): pass class Push(object): """Represent an entire push (i.e., a group of ReferenceChanges). It is easy to figure out what commits were added to a *branch* by a Reference change: git rev-list change.old..change.new or removed from a *branch*: git rev-list change.new..change.old But it is not quite so trivial to determine which entirely new commits were added to the *repository* by a push and which old commits were discarded by a push. A big part of the job of this class is to figure out these things, and to make sure that new commits are only detailed once even if they were added to multiple references. The first step is to determine the "other" references--those unaffected by the current push. They are computed by listing all references then removing any affected by this push. The results are stored in Push._other_ref_sha1s. The commits contained in the repository before this push were git rev-list other1 other2 other3 ... change1.old change2.old ... Where "changeN.old" is the old value of one of the references affected by this push. The commits contained in the repository after this push are git rev-list other1 other2 other3 ... change1.new change2.new ... The commits added by this push are the difference between these two sets, which can be written git rev-list \ ^other1 ^other2 ... \ ^change1.old ^change2.old ... \ change1.new change2.new ... The commits removed by this push can be computed by git rev-list \ ^other1 ^other2 ... \ ^change1.new ^change2.new ... \ change1.old change2.old ... The last point is that it is possible that other pushes are occurring simultaneously to this one, so reference values can change at any time. It is impossible to eliminate all race conditions, but we reduce the window of time during which problems can occur by translating reference names to SHA1s as soon as possible and working with SHA1s thereafter (because SHA1s are immutable).""" # A map {(changeclass, changetype): integer} specifying the order # that reference changes will be processed if multiple reference # changes are included in a single push. The order is significant # mostly because new commit notifications are threaded together # with the first reference change that includes the commit. The # following order thus causes commits to be grouped with branch # changes (as opposed to tag changes) if possible. SORT_ORDER = dict( (value, i) for (i, value) in enumerate([ (BranchChange, 'update'), (BranchChange, 'create'), (AnnotatedTagChange, 'update'), (AnnotatedTagChange, 'create'), (NonAnnotatedTagChange, 'update'), (NonAnnotatedTagChange, 'create'), (BranchChange, 'delete'), (AnnotatedTagChange, 'delete'), (NonAnnotatedTagChange, 'delete'), (OtherReferenceChange, 'update'), (OtherReferenceChange, 'create'), (OtherReferenceChange, 'delete'), ]) ) def __init__(self, changes, ignore_other_refs=False): self.changes = sorted(changes, key=self._sort_key) self.__other_ref_sha1s = None self.__cached_commits_spec = {} if ignore_other_refs: self.__other_ref_sha1s = set() @classmethod def _sort_key(klass, change): return (klass.SORT_ORDER[change.__class__, change.change_type], change.refname,) @property def _other_ref_sha1s(self): """The GitObjects referred to by references unaffected by this push. """ if self.__other_ref_sha1s is None: # The refnames being changed by this push: updated_refs = set( change.refname for change in self.changes ) # The SHA-1s of commits referred to by all references in this # repository *except* updated_refs: sha1s = set() fmt = ( '%(objectname) %(objecttype) %(refname)\n' '%(*objectname) %(*objecttype) %(refname)' ) for line in read_git_lines( ['for-each-ref', '--format=%s' % (fmt,)]): (sha1, type, name) = line.split(' ', 2) if sha1 and type == 'commit' and name not in updated_refs: sha1s.add(sha1) self.__other_ref_sha1s = sha1s return self.__other_ref_sha1s def _get_commits_spec_incl(self, new_or_old, reference_change=None): """Get new or old SHA-1 from one or each of the changed refs. Return a list of SHA-1 commit identifier strings suitable as arguments to 'git rev-list' (or 'git log' or ...). The returned identifiers are either the old or new values from one or all of the changed references, depending on the values of new_or_old and reference_change. new_or_old is either the string 'new' or the string 'old'. If 'new', the returned SHA-1 identifiers are the new values from each changed reference. If 'old', the SHA-1 identifiers are the old values from each changed reference. If reference_change is specified and not None, only the new or old reference from the specified reference is included in the return value. This function returns None if there are no matching revisions (e.g., because a branch was deleted and new_or_old is 'new'). """ if not reference_change: incl_spec = sorted( getattr(change, new_or_old).sha1 for change in self.changes if getattr(change, new_or_old) ) if not incl_spec: incl_spec = None elif not getattr(reference_change, new_or_old).commit_sha1: incl_spec = None else: incl_spec = [getattr(reference_change, new_or_old).commit_sha1] return incl_spec def _get_commits_spec_excl(self, new_or_old): """Get exclusion revisions for determining new or discarded commits. Return a list of strings suitable as arguments to 'git rev-list' (or 'git log' or ...) that will exclude all commits that, depending on the value of new_or_old, were either previously in the repository (useful for determining which commits are new to the repository) or currently in the repository (useful for determining which commits were discarded from the repository). new_or_old is either the string 'new' or the string 'old'. If 'new', the commits to be excluded are those that were in the repository before the push. If 'old', the commits to be excluded are those that are currently in the repository. """ old_or_new = {'old': 'new', 'new': 'old'}[new_or_old] excl_revs = self._other_ref_sha1s.union( getattr(change, old_or_new).sha1 for change in self.changes if getattr(change, old_or_new).type in ['commit', 'tag'] ) return ['^' + sha1 for sha1 in sorted(excl_revs)] def get_commits_spec(self, new_or_old, reference_change=None): """Get rev-list arguments for added or discarded commits. Return a list of strings suitable as arguments to 'git rev-list' (or 'git log' or ...) that select those commits that, depending on the value of new_or_old, are either new to the repository or were discarded from the repository. new_or_old is either the string 'new' or the string 'old'. If 'new', the returned list is used to select commits that are new to the repository. If 'old', the returned value is used to select the commits that have been discarded from the repository. If reference_change is specified and not None, the new or discarded commits are limited to those that are reachable from the new or old value of the specified reference. This function returns None if there are no added (or discarded) revisions. """ key = (new_or_old, reference_change) if key not in self.__cached_commits_spec: ret = self._get_commits_spec_incl(new_or_old, reference_change) if ret is not None: ret.extend(self._get_commits_spec_excl(new_or_old)) self.__cached_commits_spec[key] = ret return self.__cached_commits_spec[key] def get_new_commits(self, reference_change=None): """Return a list of commits added by this push. Return a list of the object names of commits that were added by the part of this push represented by reference_change. If reference_change is None, then return a list of *all* commits added by this push.""" spec = self.get_commits_spec('new', reference_change) return git_rev_list(spec) def get_discarded_commits(self, reference_change): """Return a list of commits discarded by this push. Return a list of the object names of commits that were entirely discarded from the repository by the part of this push represented by reference_change.""" spec = self.get_commits_spec('old', reference_change) return git_rev_list(spec) def send_emails(self, mailer, body_filter=None): """Use send all of the notification emails needed for this push. Use send all of the notification emails (including reference change emails and commit emails) needed for this push. Send the emails using mailer. If body_filter is not None, then use it to filter the lines that are intended for the email body.""" # The sha1s of commits that were introduced by this push. # They will be removed from this set as they are processed, to # guarantee that one (and only one) email is generated for # each new commit. unhandled_sha1s = set(self.get_new_commits()) send_date = IncrementalDateTime() for change in self.changes: sha1s = [] for sha1 in reversed(list(self.get_new_commits(change))): if sha1 in unhandled_sha1s: sha1s.append(sha1) unhandled_sha1s.remove(sha1) # Check if we've got anyone to send to if not change.recipients: change.environment.log_warning( '*** no recipients configured so no email will be sent\n' '*** for %r update %s->%s\n' % (change.refname, change.old.sha1, change.new.sha1,) ) else: if not change.environment.quiet: change.environment.log_msg( 'Sending notification emails to: %s\n' % (change.recipients,)) extra_values = {'send_date': send_date.next()} rev = change.send_single_combined_email(sha1s) if rev: mailer.send( change.generate_combined_email(self, rev, body_filter, extra_values), rev.recipients, ) # This change is now fully handled; no need to handle # individual revisions any further. continue else: mailer.send( change.generate_email(self, body_filter, extra_values), change.recipients, ) max_emails = change.environment.maxcommitemails if max_emails and len(sha1s) > max_emails: change.environment.log_warning( '*** Too many new commits (%d), not sending commit emails.\n' % len(sha1s) + '*** Try setting multimailhook.maxCommitEmails to a greater value\n' + '*** Currently, multimailhook.maxCommitEmails=%d\n' % max_emails ) return for (num, sha1) in enumerate(sha1s): rev = Revision(change, GitObject(sha1), num=num + 1, tot=len(sha1s)) if not rev.recipients and rev.cc_recipients: change.environment.log_msg('*** Replacing Cc: with To:\n') rev.recipients = rev.cc_recipients rev.cc_recipients = None if rev.recipients: extra_values = {'send_date': send_date.next()} mailer.send( rev.generate_email(self, body_filter, extra_values), rev.recipients, ) # Consistency check: if unhandled_sha1s: change.environment.log_error( 'ERROR: No emails were sent for the following new commits:\n' ' %s\n' % ('\n '.join(sorted(unhandled_sha1s)),) ) def run_as_post_receive_hook(environment, mailer): changes = [] for line in sys.stdin: (oldrev, newrev, refname) = line.strip().split(' ', 2) changes.append( ReferenceChange.create(environment, oldrev, newrev, refname) ) push = Push(changes) push.send_emails(mailer, body_filter=environment.filter_body) def run_as_update_hook(environment, mailer, refname, oldrev, newrev, force_send=False): changes = [ ReferenceChange.create( environment, read_git_output(['rev-parse', '--verify', oldrev]), read_git_output(['rev-parse', '--verify', newrev]), refname, ), ] push = Push(changes, force_send) push.send_emails(mailer, body_filter=environment.filter_body) def choose_mailer(config, environment): mailer = config.get('mailer', default='sendmail') if mailer == 'smtp': smtpserver = config.get('smtpserver', default='localhost') smtpservertimeout = float(config.get('smtpservertimeout', default=10.0)) smtpserverdebuglevel = int(config.get('smtpserverdebuglevel', default=0)) smtpencryption = config.get('smtpencryption', default='none') smtpuser = config.get('smtpuser', default='') smtppass = config.get('smtppass', default='') mailer = SMTPMailer( envelopesender=(environment.get_sender() or environment.get_fromaddr()), smtpserver=smtpserver, smtpservertimeout=smtpservertimeout, smtpserverdebuglevel=smtpserverdebuglevel, smtpencryption=smtpencryption, smtpuser=smtpuser, smtppass=smtppass, ) elif mailer == 'sendmail': command = config.get('sendmailcommand') if command: command = shlex.split(command) mailer = SendMailer(command=command, envelopesender=environment.get_sender()) else: environment.log_error( 'fatal: multimailhook.mailer is set to an incorrect value: "%s"\n' % mailer + 'please use one of "smtp" or "sendmail".\n' ) sys.exit(1) return mailer KNOWN_ENVIRONMENTS = { 'generic': GenericEnvironmentMixin, 'gitolite': GitoliteEnvironmentMixin, } def choose_environment(config, osenv=None, env=None, recipients=None): if not osenv: osenv = os.environ environment_mixins = [ ProjectdescEnvironmentMixin, ConfigMaxlinesEnvironmentMixin, ComputeFQDNEnvironmentMixin, ConfigFilterLinesEnvironmentMixin, PusherDomainEnvironmentMixin, ConfigOptionsEnvironmentMixin, ] environment_kw = { 'osenv': osenv, 'config': config, } if not env: env = config.get('environment') if not env: if 'GL_USER' in osenv and 'GL_REPO' in osenv: env = 'gitolite' else: env = 'generic' environment_mixins.append(KNOWN_ENVIRONMENTS[env]) if recipients: environment_mixins.insert(0, StaticRecipientsEnvironmentMixin) environment_kw['refchange_recipients'] = recipients environment_kw['announce_recipients'] = recipients environment_kw['revision_recipients'] = recipients environment_kw['scancommitforcc'] = config.get('scancommitforcc') else: environment_mixins.insert(0, ConfigRecipientsEnvironmentMixin) environment_klass = type( 'EffectiveEnvironment', tuple(environment_mixins) + (Environment,), {}, ) return environment_klass(**environment_kw) def main(args): parser = optparse.OptionParser( description=__doc__, usage='%prog [OPTIONS]\n or: %prog [OPTIONS] REFNAME OLDREV NEWREV', ) parser.add_option( '--environment', '--env', action='store', type='choice', choices=['generic', 'gitolite'], default=None, help=( 'Choose type of environment is in use. Default is taken from ' 'multimailhook.environment if set; otherwise "generic".' ), ) parser.add_option( '--stdout', action='store_true', default=False, help='Output emails to stdout rather than sending them.', ) parser.add_option( '--recipients', action='store', default=None, help='Set list of email recipients for all types of emails.', ) parser.add_option( '--show-env', action='store_true', default=False, help=( 'Write to stderr the values determined for the environment ' '(intended for debugging purposes).' ), ) parser.add_option( '--force-send', action='store_true', default=False, help=( 'Force sending refchange email when using as an update hook. ' 'This is useful to work around the unreliable new commits ' 'detection in this mode.' ), ) (options, args) = parser.parse_args(args) config = Config('multimailhook') try: environment = choose_environment( config, osenv=os.environ, env=options.environment, recipients=options.recipients, ) if options.show_env: sys.stderr.write('Environment values:\n') for (k, v) in sorted(environment.get_values().items()): sys.stderr.write(' %s : %r\n' % (k, v)) sys.stderr.write('\n') if options.stdout or environment.stdout: mailer = OutputMailer(sys.stdout) else: mailer = choose_mailer(config, environment) # Dual mode: if arguments were specified on the command line, run # like an update hook; otherwise, run as a post-receive hook. if args: if len(args) != 3: parser.error('Need zero or three non-option arguments') (refname, oldrev, newrev) = args run_as_update_hook(environment, mailer, refname, oldrev, newrev, options.force_send) else: run_as_post_receive_hook(environment, mailer) except ConfigurationException, e: sys.exit(str(e)) if __name__ == '__main__': main(sys.argv[1:])
gpl-2.0
mancoast/CPythonPyc_test
fail/340_test_fcntl.py
96
5220
"""Test program for the fcntl C module. """ import platform import os import struct import sys import unittest from test.support import (verbose, TESTFN, unlink, run_unittest, import_module, cpython_only) # Skip test if no fcntl module. fcntl = import_module('fcntl') # TODO - Write tests for flock() and lockf(). def get_lockdata(): try: os.O_LARGEFILE except AttributeError: start_len = "ll" else: start_len = "qq" if (sys.platform.startswith(('netbsd', 'freebsd', 'openbsd', 'bsdos')) or sys.platform == 'darwin'): if struct.calcsize('l') == 8: off_t = 'l' pid_t = 'i' else: off_t = 'lxxxx' pid_t = 'l' lockdata = struct.pack(off_t + off_t + pid_t + 'hh', 0, 0, 0, fcntl.F_WRLCK, 0) elif sys.platform.startswith('gnukfreebsd'): lockdata = struct.pack('qqihhi', 0, 0, 0, fcntl.F_WRLCK, 0, 0) elif sys.platform in ['aix3', 'aix4', 'hp-uxB', 'unixware7']: lockdata = struct.pack('hhlllii', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0) else: lockdata = struct.pack('hh'+start_len+'hh', fcntl.F_WRLCK, 0, 0, 0, 0, 0) if lockdata: if verbose: print('struct.pack: ', repr(lockdata)) return lockdata lockdata = get_lockdata() class BadFile: def __init__(self, fn): self.fn = fn def fileno(self): return self.fn class TestFcntl(unittest.TestCase): def setUp(self): self.f = None def tearDown(self): if self.f and not self.f.closed: self.f.close() unlink(TESTFN) def test_fcntl_fileno(self): # the example from the library docs self.f = open(TESTFN, 'wb') rv = fcntl.fcntl(self.f.fileno(), fcntl.F_SETFL, os.O_NONBLOCK) if verbose: print('Status from fcntl with O_NONBLOCK: ', rv) rv = fcntl.fcntl(self.f.fileno(), fcntl.F_SETLKW, lockdata) if verbose: print('String from fcntl with F_SETLKW: ', repr(rv)) self.f.close() def test_fcntl_file_descriptor(self): # again, but pass the file rather than numeric descriptor self.f = open(TESTFN, 'wb') rv = fcntl.fcntl(self.f, fcntl.F_SETFL, os.O_NONBLOCK) if verbose: print('Status from fcntl with O_NONBLOCK: ', rv) rv = fcntl.fcntl(self.f, fcntl.F_SETLKW, lockdata) if verbose: print('String from fcntl with F_SETLKW: ', repr(rv)) self.f.close() def test_fcntl_bad_file(self): with self.assertRaises(ValueError): fcntl.fcntl(-1, fcntl.F_SETFL, os.O_NONBLOCK) with self.assertRaises(ValueError): fcntl.fcntl(BadFile(-1), fcntl.F_SETFL, os.O_NONBLOCK) with self.assertRaises(TypeError): fcntl.fcntl('spam', fcntl.F_SETFL, os.O_NONBLOCK) with self.assertRaises(TypeError): fcntl.fcntl(BadFile('spam'), fcntl.F_SETFL, os.O_NONBLOCK) @cpython_only def test_fcntl_bad_file_overflow(self): from _testcapi import INT_MAX, INT_MIN # Issue 15989 with self.assertRaises(OverflowError): fcntl.fcntl(INT_MAX + 1, fcntl.F_SETFL, os.O_NONBLOCK) with self.assertRaises(OverflowError): fcntl.fcntl(BadFile(INT_MAX + 1), fcntl.F_SETFL, os.O_NONBLOCK) with self.assertRaises(OverflowError): fcntl.fcntl(INT_MIN - 1, fcntl.F_SETFL, os.O_NONBLOCK) with self.assertRaises(OverflowError): fcntl.fcntl(BadFile(INT_MIN - 1), fcntl.F_SETFL, os.O_NONBLOCK) @unittest.skipIf( platform.machine().startswith('arm') and platform.system() == 'Linux', "ARM Linux returns EINVAL for F_NOTIFY DN_MULTISHOT") def test_fcntl_64_bit(self): # Issue #1309352: fcntl shouldn't fail when the third arg fits in a # C 'long' but not in a C 'int'. try: cmd = fcntl.F_NOTIFY # This flag is larger than 2**31 in 64-bit builds flags = fcntl.DN_MULTISHOT except AttributeError: self.skipTest("F_NOTIFY or DN_MULTISHOT unavailable") fd = os.open(os.path.dirname(os.path.abspath(TESTFN)), os.O_RDONLY) try: fcntl.fcntl(fd, cmd, flags) finally: os.close(fd) def test_flock(self): # Solaris needs readable file for shared lock self.f = open(TESTFN, 'wb+') fileno = self.f.fileno() fcntl.flock(fileno, fcntl.LOCK_SH) fcntl.flock(fileno, fcntl.LOCK_UN) fcntl.flock(self.f, fcntl.LOCK_SH | fcntl.LOCK_NB) fcntl.flock(self.f, fcntl.LOCK_UN) fcntl.flock(fileno, fcntl.LOCK_EX) fcntl.flock(fileno, fcntl.LOCK_UN) self.assertRaises(ValueError, fcntl.flock, -1, fcntl.LOCK_SH) self.assertRaises(TypeError, fcntl.flock, 'spam', fcntl.LOCK_SH) @cpython_only def test_flock_overflow(self): import _testcapi self.assertRaises(OverflowError, fcntl.flock, _testcapi.INT_MAX+1, fcntl.LOCK_SH) def test_main(): run_unittest(TestFcntl) if __name__ == '__main__': test_main()
gpl-3.0
rwaldron/mirovideoconverter3
mvc/widgets/osx/control.py
2
17828
# Miro - an RSS based video player application # Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 # Participatory Culture Foundation # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # # In addition, as a special exception, the copyright holders give # permission to link the code of portions of this program with the OpenSSL # library. # # You must obey the GNU General Public License in all respects for all of # the code used other than OpenSSL. If you modify file(s) with this # exception, you may extend this exception to your version of the file(s), # but you are not obligated to do so. If you do not wish to do so, delete # this exception statement from your version. If you delete this exception # statement from all source files in the program, then also delete it here. """.control - Controls.""" from AppKit import * from Foundation import * from objc import YES, NO, nil from mvc.widgets import widgetconst import wrappermap from .base import Widget from .helpers import NotificationForwarder class SizedControl(Widget): def set_size(self, size): if size == widgetconst.SIZE_NORMAL: self.view.cell().setControlSize_(NSRegularControlSize) font = NSFont.systemFontOfSize_(NSFont.systemFontSize()) self.font_size = NSFont.systemFontSize() elif size == widgetconst.SIZE_SMALL: font = NSFont.systemFontOfSize_(NSFont.smallSystemFontSize()) self.view.cell().setControlSize_(NSSmallControlSize) self.font_size = NSFont.smallSystemFontSize() else: self.view.cell().setControlSize_(NSRegularControlSize) font = NSFont.systemFontOfSize_(NSFont.systemFontSize() * size) self.font_size = NSFont.systemFontSize() * size self.view.setFont_(font) class BaseTextEntry(SizedControl): """See https://develop.participatoryculture.org/index.php/WidgetAPI for a description of the API for this class.""" def __init__(self, initial_text=None): SizedControl.__init__(self) self.view = self.make_view() self.font = NSFont.systemFontOfSize_(NSFont.systemFontSize()) self.view.setFont_(self.font) self.view.setEditable_(YES) self.view.cell().setScrollable_(YES) self.view.cell().setLineBreakMode_(NSLineBreakByClipping) self.sizer_cell = self.view.cell().copy() if initial_text: self.view.setStringValue_(initial_text) self.set_width(len(initial_text)) else: self.set_width(10) self.notifications = NotificationForwarder.create(self.view) self.create_signal('activate') self.create_signal('changed') self.create_signal('validate') def focus(self): if self.view.window() is not None: self.view.window().makeFirstResponder_(self.view) def start_editing(self, initial_text): self.set_text(initial_text) self.focus() # unselect the text and locate the cursor at the end of the entry text_field = self.view.window().fieldEditor_forObject_(YES, self.view) text_field.setSelectedRange_(NSMakeRange(len(self.get_text()), 0)) def viewport_created(self): SizedControl.viewport_created(self) self.notifications.connect(self.on_changed, 'NSControlTextDidChangeNotification') self.notifications.connect(self.on_end_editing, 'NSControlTextDidEndEditingNotification') def remove_viewport(self): SizedControl.remove_viewport(self) self.notifications.disconnect() def baseline(self): return -self.view.font().descender() + 2 def on_changed(self, notification): self.emit('changed') def on_end_editing(self, notification): self.emit('focus-out') def calc_size_request(self): size = self.sizer_cell.cellSize() return size.width, size.height def set_text(self, text): self.view.setStringValue_(text) self.emit('changed') def get_text(self): return self.view.stringValue() def set_width(self, chars): self.sizer_cell.setStringValue_('X' * chars) self.invalidate_size_request() def set_activates_default(self, setting): pass def enable(self): SizedControl.enable(self) self.view.setEnabled_(True) def disable(self): SizedControl.disable(self) self.view.setEnabled_(False) class MiroTextField(NSTextField): def textDidEndEditing_(self, notification): wrappermap.wrapper(self).emit('activate') return NSTextField.textDidEndEditing_(self, notification) class TextEntry(BaseTextEntry): def make_view(self): return MiroTextField.alloc().init() class NumberEntry(BaseTextEntry): def make_view(self): return MiroTextField.alloc().init() def set_max_length(self, length): # TODO pass def _filter_value(self): """Discard any non-numeric characters""" digits = ''.join(x for x in self.view.stringValue() if x.isdigit()) self.view.setStringValue_(digits) def on_changed(self, notification): # overriding on_changed rather than connecting to it ensures that we # filter the value before anything else connected to the signal sees it self._filter_value() BaseTextEntry.on_changed(self, notification) def get_text(self): # handles get_text between when text is entered and when on_changed # filters it, in case that's possible self._filter_value() return BaseTextEntry.get_text(self) class MiroSecureTextField(NSSecureTextField): def textDidEndEditing_(self, notification): wrappermap.wrapper(self).emit('activate') return NSSecureTextField.textDidEndEditing_(self, notification) class SecureTextEntry(BaseTextEntry): def make_view(self): return MiroSecureTextField.alloc().init() class MultilineTextEntry(Widget): def __init__(self, initial_text=None): Widget.__init__(self) if initial_text is None: initial_text = "" self.view = NSTextView.alloc().initWithFrame_(NSRect((0,0),(50,50))) self.view.setMaxSize_((1.0e7, 1.0e7)) self.view.setHorizontallyResizable_(NO) self.view.setVerticallyResizable_(YES) self.notifications = NotificationForwarder.create(self.view) self.create_signal('changed') self.create_signal('focus-out') if initial_text is not None: self.set_text(initial_text) self.set_size(widgetconst.SIZE_NORMAL) def set_size(self, size): if size == widgetconst.SIZE_NORMAL: font = NSFont.systemFontOfSize_(NSFont.systemFontSize()) elif size == widgetconst.SIZE_SMALL: self.view.cell().setControlSize_(NSSmallControlSize) else: raise ValueError("Unknown size: %s" % size) self.view.setFont_(font) def viewport_created(self): Widget.viewport_created(self) self.notifications.connect(self.on_changed, 'NSTextDidChangeNotification') self.notifications.connect(self.on_end_editing, 'NSControlTextDidEndEditingNotification') self.invalidate_size_request() def remove_viewport(self): Widget.remove_viewport(self) self.notifications.disconnect() def focus(self): if self.view.window() is not None: self.view.window().makeFirstResponder_(self.view) def set_text(self, text): self.view.setString_(text) self.invalidate_size_request() def get_text(self): return self.view.string() def on_changed(self, notification): self.invalidate_size_request() self.emit("changed") def on_end_editing(self, notification): self.emit("focus-out") def calc_size_request(self): layout_manager = self.view.layoutManager() text_container = self.view.textContainer() # The next line is there just to force cocoa to layout the text layout_manager.glyphRangeForTextContainer_(text_container) rect = layout_manager.usedRectForTextContainer_(text_container) return rect.size.width, rect.size.height def set_editable(self, editable): if editable: self.view.setEditable_(YES) else: self.view.setEditable_(NO) class MiroButton(NSButton): def initWithSignal_(self, signal): self = super(MiroButton, self).init() self.signal = signal return self def sendAction_to_(self, action, to): # We override the Cocoa machinery here and just send it to our wrapper # widget. wrappermap.wrapper(self).emit(self.signal) return YES class Checkbox(SizedControl): """See https://develop.participatoryculture.org/index.php/WidgetAPI for a description of the API for this class.""" def __init__(self, text="", bold=False, color=None): SizedControl.__init__(self) self.create_signal('toggled') self.view = MiroButton.alloc().initWithSignal_('toggled') self.view.setButtonType_(NSSwitchButton) self.bold = bold self.title = text self.font_size = NSFont.systemFontSize() self.color = self.make_color(color) self._set_title() def set_size(self, size): SizedControl.set_size(self, size) self._set_title() def _set_title(self): if self.color is None: self.view.setTitle_(self.title) else: attributes = { NSForegroundColorAttributeName: self.color, NSFontAttributeName: NSFont.systemFontOfSize_(self.font_size) } string = NSAttributedString.alloc().initWithString_attributes_( self.title, attributes) self.view.setAttributedTitle_(string) def calc_size_request(self): if self.manual_size_request: width, height = self.manual_size_request if width == -1: width = 10000 if height == -1: height = 10000 size = self.view.cell().cellSizeForBounds_( NSRect((0, 0), (width, height))) else: size = self.view.cell().cellSize() return (size.width, size.height) def baseline(self): return -self.view.font().descender() + 1 def get_checked(self): return self.view.state() == NSOnState def set_checked(self, value): if value: self.view.setState_(NSOnState) else: self.view.setState_(NSOffState) def enable(self): SizedControl.enable(self) self.view.setEnabled_(True) def disable(self): SizedControl.disable(self) self.view.setEnabled_(False) def get_text_padding(self): """ Returns the amount of space the checkbox takes up before the label. """ # XXX FIXME return 18 class Button(SizedControl): """See https://develop.participatoryculture.org/index.php/WidgetAPI for a description of the API for this class.""" def __init__(self, label, style='normal', width=0): SizedControl.__init__(self) self.color = None self.title = label self.create_signal('clicked') self.view = MiroButton.alloc().initWithSignal_('clicked') self.view.setButtonType_(NSMomentaryPushInButton) self._set_title() self.setup_style(style) self.min_width = width def set_text(self, label): self.title = label self._set_title() def set_color(self, color): self.color = self.make_color(color) self._set_title() def _set_title(self): if self.color is None: self.view.setTitle_(self.title) else: attributes = { NSForegroundColorAttributeName: self.color, NSFontAttributeName: self.view.font() } string = NSAttributedString.alloc().initWithString_attributes_( self.title, attributes) self.view.setAttributedTitle_(string) def setup_style(self, style): if style == 'normal': self.view.setBezelStyle_(NSRoundedBezelStyle) self.pad_height = 0 self.pad_width = 10 self.min_width = 112 elif style == 'smooth': self.view.setBezelStyle_(NSRoundRectBezelStyle) self.pad_width = 0 self.pad_height = 4 self.paragraph_style = NSMutableParagraphStyle.alloc().init() self.paragraph_style.setAlignment_(NSCenterTextAlignment) def make_default(self): self.view.setKeyEquivalent_("\r") def calc_size_request(self): size = self.view.cell().cellSize() width = max(self.min_width, size.width + self.pad_width) height = size.height + self.pad_height return width, height def baseline(self): return -self.view.font().descender() + 10 + self.pad_height def enable(self): SizedControl.enable(self) self.view.setEnabled_(True) def disable(self): SizedControl.disable(self) self.view.setEnabled_(False) class MiroPopupButton(NSPopUpButton): def init(self): self = super(MiroPopupButton, self).init() self.setTarget_(self) self.setAction_('handleChange:') return self def handleChange_(self, sender): wrappermap.wrapper(self).emit('changed', self.indexOfSelectedItem()) class OptionMenu(SizedControl): def __init__(self, options): SizedControl.__init__(self) self.create_signal('changed') self.view = MiroPopupButton.alloc().init() self.options = options for option, value in options: self.view.addItemWithTitle_(option) def baseline(self): if self.view.cell().controlSize() == NSRegularControlSize: return -self.view.font().descender() + 6 else: return -self.view.font().descender() + 5 def calc_size_request(self): return self.view.cell().cellSize() def set_selected(self, index): self.view.selectItemAtIndex_(index) def get_selected(self): return self.view.indexOfSelectedItem() def enable(self): SizedControl.enable(self) self.view.setEnabled_(True) def disable(self): SizedControl.disable(self) self.view.setEnabled_(False) def set_width(self, width): # TODO pass class RadioButtonGroup: def __init__(self): self._buttons = [] def handle_click(self, widget): self.set_selected(widget) def add_button(self, button): self._buttons.append(button) button.connect('clicked', self.handle_click) if len(self._buttons) == 1: button.view.setState_(NSOnState) else: button.view.setState_(NSOffState) def get_buttons(self): return self._buttons def get_selected(self): for mem in self._buttons: if mem.get_selected(): return mem def set_selected(self, button): for mem in self._buttons: if button is mem: mem.view.setState_(NSOnState) else: mem.view.setState_(NSOffState) class RadioButton(SizedControl): def __init__(self, label, group=None, bold=False, color=None): SizedControl.__init__(self) self.create_signal('clicked') self.view = MiroButton.alloc().initWithSignal_('clicked') self.view.setButtonType_(NSRadioButton) self.color = self.make_color(color) self.title = label self.bold = bold self.font_size = NSFont.systemFontSize() self._set_title() if group is not None: self.group = group else: self.group = RadioButtonGroup() self.group.add_button(self) def set_size(self, size): SizedControl.set_size(self, size) self._set_title() def _set_title(self): if self.color is None: self.view.setTitle_(self.title) else: attributes = { NSForegroundColorAttributeName: self.color, NSFontAttributeName: NSFont.systemFontOfSize_(self.font_size) } string = NSAttributedString.alloc().initWithString_attributes_( self.title, attributes) self.view.setAttributedTitle_(string) def calc_size_request(self): size = self.view.cell().cellSize() return (size.width, size.height) def baseline(self): -self.view.font().descender() + 2 def get_group(self): return self.group def get_selected(self): return self.view.state() == NSOnState def set_selected(self): self.group.set_selected(self) def enable(self): SizedControl.enable(self) self.view.setEnabled_(True) def disable(self): SizedControl.disable(self) self.view.setEnabled_(False)
gpl-3.0
coldmind/django
django/core/mail/backends/filebased.py
558
2771
"""Email backend that writes messages to a file.""" import datetime import os from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.mail.backends.console import \ EmailBackend as ConsoleEmailBackend from django.utils import six class EmailBackend(ConsoleEmailBackend): def __init__(self, *args, **kwargs): self._fname = None if 'file_path' in kwargs: self.file_path = kwargs.pop('file_path') else: self.file_path = getattr(settings, 'EMAIL_FILE_PATH', None) # Make sure self.file_path is a string. if not isinstance(self.file_path, six.string_types): raise ImproperlyConfigured('Path for saving emails is invalid: %r' % self.file_path) self.file_path = os.path.abspath(self.file_path) # Make sure that self.file_path is an directory if it exists. if os.path.exists(self.file_path) and not os.path.isdir(self.file_path): raise ImproperlyConfigured( 'Path for saving email messages exists, but is not a directory: %s' % self.file_path ) # Try to create it, if it not exists. elif not os.path.exists(self.file_path): try: os.makedirs(self.file_path) except OSError as err: raise ImproperlyConfigured( 'Could not create directory for saving email messages: %s (%s)' % (self.file_path, err) ) # Make sure that self.file_path is writable. if not os.access(self.file_path, os.W_OK): raise ImproperlyConfigured('Could not write to directory: %s' % self.file_path) # Finally, call super(). # Since we're using the console-based backend as a base, # force the stream to be None, so we don't default to stdout kwargs['stream'] = None super(EmailBackend, self).__init__(*args, **kwargs) def write_message(self, message): self.stream.write(message.message().as_bytes() + b'\n') self.stream.write(b'-' * 79) self.stream.write(b'\n') def _get_filename(self): """Return a unique file name.""" if self._fname is None: timestamp = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") fname = "%s-%s.log" % (timestamp, abs(id(self))) self._fname = os.path.join(self.file_path, fname) return self._fname def open(self): if self.stream is None: self.stream = open(self._get_filename(), 'ab') return True return False def close(self): try: if self.stream is not None: self.stream.close() finally: self.stream = None
bsd-3-clause
embrah/Sash-specialist
node_modules/node-gyp/gyp/buildbot/buildbot_run.py
1467
4228
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Argument-less script to select what to run on the buildbots.""" import os import shutil import subprocess import sys BUILDBOT_DIR = os.path.dirname(os.path.abspath(__file__)) TRUNK_DIR = os.path.dirname(BUILDBOT_DIR) ROOT_DIR = os.path.dirname(TRUNK_DIR) CMAKE_DIR = os.path.join(ROOT_DIR, 'cmake') CMAKE_BIN_DIR = os.path.join(CMAKE_DIR, 'bin') OUT_DIR = os.path.join(TRUNK_DIR, 'out') def CallSubProcess(*args, **kwargs): """Wrapper around subprocess.call which treats errors as build exceptions.""" with open(os.devnull) as devnull_fd: retcode = subprocess.call(stdin=devnull_fd, *args, **kwargs) if retcode != 0: print '@@@STEP_EXCEPTION@@@' sys.exit(1) def PrepareCmake(): """Build CMake 2.8.8 since the version in Precise is 2.8.7.""" if os.environ['BUILDBOT_CLOBBER'] == '1': print '@@@BUILD_STEP Clobber CMake checkout@@@' shutil.rmtree(CMAKE_DIR) # We always build CMake 2.8.8, so no need to do anything # if the directory already exists. if os.path.isdir(CMAKE_DIR): return print '@@@BUILD_STEP Initialize CMake checkout@@@' os.mkdir(CMAKE_DIR) print '@@@BUILD_STEP Sync CMake@@@' CallSubProcess( ['git', 'clone', '--depth', '1', '--single-branch', '--branch', 'v2.8.8', '--', 'git://cmake.org/cmake.git', CMAKE_DIR], cwd=CMAKE_DIR) print '@@@BUILD_STEP Build CMake@@@' CallSubProcess( ['/bin/bash', 'bootstrap', '--prefix=%s' % CMAKE_DIR], cwd=CMAKE_DIR) CallSubProcess( ['make', 'cmake'], cwd=CMAKE_DIR) def GypTestFormat(title, format=None, msvs_version=None, tests=[]): """Run the gyp tests for a given format, emitting annotator tags. See annotator docs at: https://sites.google.com/a/chromium.org/dev/developers/testing/chromium-build-infrastructure/buildbot-annotations Args: format: gyp format to test. Returns: 0 for sucesss, 1 for failure. """ if not format: format = title print '@@@BUILD_STEP ' + title + '@@@' sys.stdout.flush() env = os.environ.copy() if msvs_version: env['GYP_MSVS_VERSION'] = msvs_version command = ' '.join( [sys.executable, 'gyp/gyptest.py', '--all', '--passed', '--format', format, '--path', CMAKE_BIN_DIR, '--chdir', 'gyp'] + tests) retcode = subprocess.call(command, cwd=ROOT_DIR, env=env, shell=True) if retcode: # Emit failure tag, and keep going. print '@@@STEP_FAILURE@@@' return 1 return 0 def GypBuild(): # Dump out/ directory. print '@@@BUILD_STEP cleanup@@@' print 'Removing %s...' % OUT_DIR shutil.rmtree(OUT_DIR, ignore_errors=True) print 'Done.' retcode = 0 if sys.platform.startswith('linux'): retcode += GypTestFormat('ninja') retcode += GypTestFormat('make') PrepareCmake() retcode += GypTestFormat('cmake') elif sys.platform == 'darwin': retcode += GypTestFormat('ninja') retcode += GypTestFormat('xcode') retcode += GypTestFormat('make') elif sys.platform == 'win32': retcode += GypTestFormat('ninja') if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-win64': retcode += GypTestFormat('msvs-ninja-2013', format='msvs-ninja', msvs_version='2013', tests=[ r'test\generator-output\gyptest-actions.py', r'test\generator-output\gyptest-relocate.py', r'test\generator-output\gyptest-rules.py']) retcode += GypTestFormat('msvs-2013', format='msvs', msvs_version='2013') else: raise Exception('Unknown platform') if retcode: # TODO(bradnelson): once the annotator supports a postscript (section for # after the build proper that could be used for cumulative failures), # use that instead of this. This isolates the final return value so # that it isn't misattributed to the last stage. print '@@@BUILD_STEP failures@@@' sys.exit(retcode) if __name__ == '__main__': GypBuild()
mit
GladeRom/android_external_chromium_org
tools/json_schema_compiler/features_h_generator.py
94
2686
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os.path from code import Code import cpp_util class HGenerator(object): def Generate(self, features, source_file, namespace): return _Generator(features, source_file, namespace).Generate() class _Generator(object): """A .cc generator for features. """ def __init__(self, features, source_file, namespace): self._feature_defs = features self._source_file = source_file self._source_file_filename, _ = os.path.splitext(source_file) self._class_name = cpp_util.ClassName(self._source_file_filename) self._namespace = namespace def Generate(self): """Generates a Code object for features. """ c = Code() (c.Append(cpp_util.CHROMIUM_LICENSE) .Append() .Append(cpp_util.GENERATED_FEATURE_MESSAGE % self._source_file) .Append() ) # Hack: for the purpose of gyp the header file will always be the source # file with its file extension replaced by '.h'. Assume so. output_file = os.path.splitext(self._namespace.source_file)[0] + '.h' ifndef_name = cpp_util.GenerateIfndefName(output_file) (c.Append('#ifndef %s' % ifndef_name) .Append('#define %s' % ifndef_name) .Append() ) (c.Append('#include <map>') .Append('#include <string>') .Append() .Concat(cpp_util.OpenNamespace(self._namespace)) .Append() ) (c.Append('class %s {' % self._class_name) .Append(' public:') .Sblock() .Concat(self._GeneratePublicBody()) .Eblock() .Append(' private:') .Sblock() .Concat(self._GeneratePrivateBody()) .Eblock('};') .Append() .Cblock(cpp_util.CloseNamespace(self._namespace)) ) (c.Append('#endif // %s' % ifndef_name) .Append() ) return c def _GeneratePublicBody(self): c = Code() (c.Append('%s();' % self._class_name) .Append() .Append('enum ID {') .Concat(self._GenerateEnumConstants()) .Eblock('};') .Append() .Append('const char* ToString(ID id) const;') .Append('ID FromString(const std::string& id) const;') .Append() ) return c def _GeneratePrivateBody(self): return Code().Append('std::map<std::string, ' '%s::ID> features_;' % self._class_name) def _GenerateEnumConstants(self): c = Code() (c.Sblock() .Append('kUnknown,') ) for feature in self._feature_defs: c.Append('%s,' % cpp_util.ConstantName(feature.name)) c.Append('kEnumBoundary') return c
bsd-3-clause
pettarin/aeneas
aeneas/audiofilemfcc.py
5
23930
#!/usr/bin/env python # coding=utf-8 # aeneas is a Python/C library and a set of tools # to automagically synchronize audio and text (aka forced alignment) # # Copyright (C) 2012-2013, Alberto Pettarin (www.albertopettarin.it) # Copyright (C) 2013-2015, ReadBeyond Srl (www.readbeyond.it) # Copyright (C) 2015-2017, Alberto Pettarin (www.albertopettarin.it) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ This module contains the following classes: * :class:`~aeneas.audiofilemfcc.AudioFileMFCC`, representing a mono WAVE audio file as a matrix of Mel-frequency ceptral coefficients (MFCC). """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy from aeneas.audiofile import AudioFile from aeneas.exacttiming import TimeInterval from aeneas.exacttiming import TimeValue from aeneas.logger import Loggable from aeneas.mfcc import MFCC from aeneas.runtimeconfiguration import RuntimeConfiguration from aeneas.vad import VAD import aeneas.globalfunctions as gf class AudioFileMFCC(Loggable): """ A monoaural (single channel) WAVE audio file, represented as a NumPy 2D matrix of Mel-frequency ceptral coefficients (MFCC). The matrix is "fat", that is, its number of rows is equal to the number of MFCC coefficients and its number of columns is equal to the number of window shifts in the audio file. The number of MFCC coefficients and the MFCC window shift can be modified via the :data:`~aeneas.runtimeconfiguration.RuntimeConfiguration.MFCC_SIZE` and :data:`~aeneas.runtimeconfiguration.RuntimeConfiguration.MFCC_WINDOW_SHIFT` keys in the ``rconf`` object. If ``mfcc_matrix`` is not ``None``, it will be used as the MFCC matrix. If ``file_path`` or ``audio_file`` is not ``None``, the MFCCs will be computed upon creation of the object, possibly converting to PCM16 Mono WAVE and/or loading audio data in memory. The MFCCs for the entire wave are divided into three contiguous intervals (possibly, zero-length):: HEAD = [:middle_begin[ MIDDLE = [middle_begin:middle_end[ TAIL = [middle_end:[ The usual NumPy convention of including the left/start index and excluding the right/end index is adopted. For alignment purposes, only the ``MIDDLE`` portion of the wave is taken into account; the ``HEAD`` and ``TAIL`` intervals are ignored. This class heavily uses NumPy views and in-place operations to avoid creating temporary data or copying data around. :param string file_path: the path of the PCM16 mono WAVE file, or ``None`` :param tuple file_format: the format of the audio file, if known in advance: ``(codec, channels, rate)`` or ``None`` :param mfcc_matrix: the MFCC matrix to be set, or ``None`` :type mfcc_matrix: :class:`numpy.ndarray` :param audio_file: an audio file, or ``None`` :type audio_file: :class:`~aeneas.audiofile.AudioFile` :param rconf: a runtime configuration :type rconf: :class:`~aeneas.runtimeconfiguration.RuntimeConfiguration` :param logger: the logger object :type logger: :class:`~aeneas.logger.Logger` :raises: ValueError: if ``file_path``, ``audio_file``, and ``mfcc_matrix`` are all ``None`` .. versionadded:: 1.5.0 """ TAG = u"AudioFileMFCC" def __init__( self, file_path=None, file_format=None, mfcc_matrix=None, audio_file=None, rconf=None, logger=None ): if (file_path is None) and (audio_file is None) and (mfcc_matrix is None): raise ValueError(u"You must initialize with at least one of: file_path, audio_file, or mfcc_matrix") super(AudioFileMFCC, self).__init__(rconf=rconf, logger=logger) self.file_path = file_path self.audio_file = audio_file self.is_reversed = False self.__mfcc = None self.__mfcc_mask = None self.__mfcc_mask_map = None self.__speech_intervals = None self.__nonspeech_intervals = None self.log(u"Initializing MFCCs...") if mfcc_matrix is not None: self.__mfcc = mfcc_matrix self.audio_length = self.all_length * self.rconf.mws elif (self.file_path is not None) or (self.audio_file is not None): audio_file_was_none = False if self.audio_file is None: audio_file_was_none = True self.audio_file = AudioFile( file_path=self.file_path, file_format=file_format, rconf=self.rconf, logger=self.logger ) # NOTE load audio samples into memory, if not present already self.audio_file.audio_samples gf.run_c_extension_with_fallback( self.log, "cmfcc", self._compute_mfcc_c_extension, self._compute_mfcc_pure_python, (), rconf=self.rconf ) self.audio_length = self.audio_file.audio_length if audio_file_was_none: self.log(u"Clearing the audio data...") self.audio_file.clear_data() self.audio_file = None self.log(u"Clearing the audio data... done") self.__middle_begin = 0 self.__middle_end = self.__mfcc.shape[1] self.log(u"Initializing MFCCs... done") def __unicode__(self): msg = [ u"File path: %s" % self.file_path, u"Audio length (s): %s" % gf.safe_float(self.audio_length), ] return u"\n".join(msg) def __str__(self): return gf.safe_str(self.__unicode__()) @property def all_mfcc(self): """ The MFCCs of the entire audio file, that is, HEAD + MIDDLE + TAIL. :rtype: :class:`numpy.ndarray` (2D) """ return self.__mfcc @property def all_length(self): """ The length, in MFCC coefficients, of the entire audio file, that is, HEAD + MIDDLE + TAIL. :rtype: int """ return self.__mfcc.shape[1] @property def middle_mfcc(self): """ The MFCCs of the middle part of the audio file, that is, without HEAD and TAIL. :rtype: :class:`numpy.ndarray` (2D) """ return self.__mfcc[:, self.__middle_begin:self.__middle_end] @property def middle_length(self): """ The length, in MFCC coefficients, of the middle part of the audio file, that is, without HEAD and TAIL. :rtype: int """ return self.__middle_end - self.__middle_begin @property def middle_map(self): """ Return the map from the MFCC frame indices in the MIDDLE portion of the wave to the MFCC FULL frame indices, that is, an ``numpy.arange(self.middle_begin, self.middle_end)``. NOTE: to translate indices of MIDDLE, instead of using fancy indexing with the result of this function, you might want to simply add ``self.head_length``. This function is provided mostly for consistency with the MASKED case. :rtype: :class:`numpy.ndarray` (1D) """ return numpy.arange(self.__middle_begin, self.__middle_end) @property def head_length(self): """ The length, in MFCC coefficients, of the HEAD of the audio file. :rtype: int """ return self.__middle_begin @property def tail_length(self): """ The length, in MFCC coefficients, of the TAIL of the audio file. :rtype: int """ return self.all_length - self.__middle_end @property def tail_begin(self): """ The index, in MFCC coefficients, where the TAIL of the audio file starts. :rtype: int """ return self.__middle_end @property def audio_length(self): """ The length, in seconds, of the audio file. This value is the actual length of the audio file, computed as ``number of samples / sample_rate``, hence it might differ than ``len(self.__mfcc) * mfcc_window_shift``. :rtype: :class:`~aeneas.exacttiming.TimeValue` """ return self.__audio_length @audio_length.setter def audio_length(self, audio_length): self.__audio_length = audio_length @property def is_reversed(self): """ Return ``True`` if currently reversed. :rtype: bool """ return self.__is_reversed @is_reversed.setter def is_reversed(self, is_reversed): self.__is_reversed = is_reversed @property def masked_mfcc(self): """ Return the MFCC speech frames in the FULL wave. :rtype: :class:`numpy.ndarray` (2D) """ self._ensure_mfcc_mask() return self.__mfcc[:, self.__mfcc_mask] @property def masked_length(self): """ Return the number of MFCC speech frames in the FULL wave. :rtype: int """ self._ensure_mfcc_mask() return len(self.__mfcc_mask_map) @property def masked_map(self): """ Return the map from the MFCC speech frame indices to the MFCC FULL frame indices. :rtype: :class:`numpy.ndarray` (1D) """ self._ensure_mfcc_mask() return self.__mfcc_mask_map @property def masked_middle_mfcc(self): """ Return the MFCC speech frames in the MIDDLE portion of the wave. :rtype: :class:`numpy.ndarray` (2D) """ begin, end = self._masked_middle_begin_end() return (self.masked_mfcc)[:, begin:end] @property def masked_middle_length(self): """ Return the number of MFCC speech frames in the MIDDLE portion of the wave. :rtype: int """ begin, end = self._masked_middle_begin_end() return end - begin @property def masked_middle_map(self): """ Return the map from the MFCC speech frame indices in the MIDDLE portion of the wave to the MFCC FULL frame indices. :rtype: :class:`numpy.ndarray` (1D) """ begin, end = self._masked_middle_begin_end() return self.__mfcc_mask_map[begin:end] def _masked_middle_begin_end(self): """ Return the begin and end indices w.r.t. ``self.__mfcc_mask_map``, corresponding to indices in the MIDDLE portion of the wave, that is, which fall between ``self.__middle_begin`` and ``self.__middle_end`` in ``self.__mfcc``. :rtype: (int, int) """ self._ensure_mfcc_mask() begin = numpy.searchsorted(self.__mfcc_mask_map, self.__middle_begin, side="left") end = numpy.searchsorted(self.__mfcc_mask_map, self.__middle_end, side="right") return (begin, end) def intervals(self, speech=True, time=True): """ Return a list of intervals:: [(b_1, e_1), (b_2, e_2), ..., (b_k, e_k)] where ``b_i`` is the time when the ``i``-th interval begins, and ``e_i`` is the time when it ends. :param bool speech: if ``True``, return speech intervals, otherwise return nonspeech intervals :param bool time: if ``True``, return :class:`~aeneas.exacttiming.TimeInterval` objects, otherwise return indices (int) :rtype: list of pairs (see above) """ self._ensure_mfcc_mask() if speech: self.log(u"Converting speech runs to intervals...") intervals = self.__speech_intervals else: self.log(u"Converting nonspeech runs to intervals...") intervals = self.__nonspeech_intervals if time: mws = self.rconf.mws intervals = [TimeInterval( begin=(b * mws), end=((e + 1) * mws) ) for b, e in intervals] self.log(u"Converting... done") return intervals def inside_nonspeech(self, index): """ If ``index`` is contained in a nonspeech interval, return a pair ``(interval_begin, interval_end)`` such that ``interval_begin <= index < interval_end``, i.e., ``interval_end`` is assumed not to be included. Otherwise, return ``None``. :rtype: ``None`` or tuple """ self._ensure_mfcc_mask() if (index < 0) or (index >= self.all_length) or (self.__mfcc_mask[index]): return None return self._binary_search_intervals(self.__nonspeech_intervals, index) @classmethod def _binary_search_intervals(cls, intervals, index): """ Binary search for the interval containing index, assuming there is such an interval. This function should never return ``None``. """ start = 0 end = len(intervals) - 1 while start <= end: middle_index = start + ((end - start) // 2) middle = intervals[middle_index] if (middle[0] <= index) and (index < middle[1]): return middle elif middle[0] > index: end = middle_index - 1 else: start = middle_index + 1 return None @property def middle_begin(self): """ Return the index where MIDDLE starts. :rtype: int """ return self.__middle_begin @middle_begin.setter def middle_begin(self, index): """ Set the index where MIDDLE starts. :param int index: the new index for MIDDLE begin """ if (index < 0) or (index > self.all_length): raise ValueError(u"The given index is not valid") self.__middle_begin = index @property def middle_begin_seconds(self): """ Return the time instant, in seconds, where MIDDLE starts. :rtype: :class:`~aeneas.exacttiming.TimeValue` """ return TimeValue(self.__middle_begin) * self.rconf.mws @property def middle_end(self): """ Return the index (+1) where MIDDLE ends. :rtype: int """ return self.__middle_end @middle_end.setter def middle_end(self, index): """ Set the index (+1) where MIDDLE ends. :param int index: the new index for MIDDLE end """ if (index < 0) or (index > self.all_length): raise ValueError(u"The given index is not valid") self.__middle_end = index @property def middle_end_seconds(self): """ Return the time instant, in seconds, where MIDDLE ends. :rtype: :class:`~aeneas.exacttiming.TimeValue` """ return TimeValue(self.__middle_end) * self.rconf.mws def _ensure_mfcc_mask(self): """ Ensure that ``run_vad()`` has already been called, and hence ``self.__mfcc_mask`` has a meaningful value. """ if self.__mfcc_mask is None: self.log(u"VAD was not run: running it now") self.run_vad() def _compute_mfcc_c_extension(self): """ Compute MFCCs using the Python C extension cmfcc. """ self.log(u"Computing MFCCs using C extension...") try: self.log(u"Importing cmfcc...") import aeneas.cmfcc.cmfcc self.log(u"Importing cmfcc... done") self.__mfcc = (aeneas.cmfcc.cmfcc.compute_from_data( self.audio_file.audio_samples, self.audio_file.audio_sample_rate, self.rconf[RuntimeConfiguration.MFCC_FILTERS], self.rconf[RuntimeConfiguration.MFCC_SIZE], self.rconf[RuntimeConfiguration.MFCC_FFT_ORDER], self.rconf[RuntimeConfiguration.MFCC_LOWER_FREQUENCY], self.rconf[RuntimeConfiguration.MFCC_UPPER_FREQUENCY], self.rconf[RuntimeConfiguration.MFCC_EMPHASIS_FACTOR], self.rconf[RuntimeConfiguration.MFCC_WINDOW_LENGTH], self.rconf[RuntimeConfiguration.MFCC_WINDOW_SHIFT] )[0]).transpose() self.log(u"Computing MFCCs using C extension... done") return (True, None) except Exception as exc: self.log_exc(u"An unexpected error occurred while running cmfcc", exc, False, None) return (False, None) def _compute_mfcc_pure_python(self): """ Compute MFCCs using the pure Python code. """ self.log(u"Computing MFCCs using pure Python code...") try: self.__mfcc = MFCC( rconf=self.rconf, logger=self.logger ).compute_from_data( self.audio_file.audio_samples, self.audio_file.audio_sample_rate ).transpose() self.log(u"Computing MFCCs using pure Python code... done") return (True, None) except Exception as exc: self.log_exc(u"An unexpected error occurred while running pure Python code", exc, False, None) return (False, None) def reverse(self): """ Reverse the audio file. The reversing is done efficiently using NumPy views inplace instead of swapping values. Only speech and nonspeech intervals are actually recomputed as Python lists. """ self.log(u"Reversing...") all_length = self.all_length self.__mfcc = self.__mfcc[:, ::-1] tmp = self.__middle_end self.__middle_end = all_length - self.__middle_begin self.__middle_begin = all_length - tmp if self.__mfcc_mask is not None: self.__mfcc_mask = self.__mfcc_mask[::-1] # equivalent to # self.__mfcc_mask_map = ((all_length - 1) - self.__mfcc_mask_map)[::-1] # but done in place using NumPy view self.__mfcc_mask_map *= -1 self.__mfcc_mask_map += all_length - 1 self.__mfcc_mask_map = self.__mfcc_mask_map[::-1] self.__speech_intervals = [(all_length - i[1], all_length - i[0]) for i in self.__speech_intervals[::-1]] self.__nonspeech_intervals = [(all_length - i[1], all_length - i[0]) for i in self.__nonspeech_intervals[::-1]] self.is_reversed = not self.is_reversed self.log(u"Reversing...done") def run_vad( self, log_energy_threshold=None, min_nonspeech_length=None, extend_before=None, extend_after=None ): """ Determine which frames contain speech and nonspeech, and store the resulting boolean mask internally. The four parameters might be ``None``: in this case, the corresponding RuntimeConfiguration values are applied. :param float log_energy_threshold: the minimum log energy threshold to consider a frame as speech :param int min_nonspeech_length: the minimum length, in frames, of a nonspeech interval :param int extend_before: extend each speech interval by this number of frames to the left (before) :param int extend_after: extend each speech interval by this number of frames to the right (after) """ def _compute_runs(array): """ Compute runs as a list of arrays, each containing the indices of a contiguous run. :param array: the data array :type array: :class:`numpy.ndarray` (1D) :rtype: list of :class:`numpy.ndarray` (1D) """ if len(array) < 1: return [] return numpy.split(array, numpy.where(numpy.diff(array) != 1)[0] + 1) self.log(u"Creating VAD object") vad = VAD(rconf=self.rconf, logger=self.logger) self.log(u"Running VAD...") self.__mfcc_mask = vad.run_vad( wave_energy=self.__mfcc[0], log_energy_threshold=log_energy_threshold, min_nonspeech_length=min_nonspeech_length, extend_before=extend_before, extend_after=extend_after ) self.__mfcc_mask_map = (numpy.where(self.__mfcc_mask))[0] self.log(u"Running VAD... done") self.log(u"Storing speech and nonspeech intervals...") # where( == True) already computed, reusing # COMMENTED runs = _compute_runs((numpy.where(self.__mfcc_mask))[0]) runs = _compute_runs(self.__mfcc_mask_map) self.__speech_intervals = [(r[0], r[-1]) for r in runs] # where( == False) not already computed, computing now runs = _compute_runs((numpy.where(~self.__mfcc_mask))[0]) self.__nonspeech_intervals = [(r[0], r[-1]) for r in runs] self.log(u"Storing speech and nonspeech intervals... done") def set_head_middle_tail(self, head_length=None, middle_length=None, tail_length=None): """ Set the HEAD, MIDDLE, TAIL explicitly. If a parameter is ``None``, it will be ignored. If both ``middle_length`` and ``tail_length`` are specified, only ``middle_length`` will be applied. :param head_length: the length of HEAD, in seconds :type head_length: :class:`~aeneas.exacttiming.TimeValue` :param middle_length: the length of MIDDLE, in seconds :type middle_length: :class:`~aeneas.exacttiming.TimeValue` :param tail_length: the length of TAIL, in seconds :type tail_length: :class:`~aeneas.exacttiming.TimeValue` :raises: TypeError: if one of the arguments is not ``None`` or :class:`~aeneas.exacttiming.TimeValue` :raises: ValueError: if one of the arguments is greater than the length of the audio file """ for variable, name in [ (head_length, "head_length"), (middle_length, "middle_length"), (tail_length, "tail_length") ]: if (variable is not None) and (not isinstance(variable, TimeValue)): raise TypeError(u"%s is not None or TimeValue" % name) if (variable is not None) and (variable > self.audio_length): raise ValueError(u"%s is greater than the length of the audio file" % name) self.log(u"Setting head middle tail...") mws = self.rconf.mws self.log([u"Before: 0 %d %d %d", self.middle_begin, self.middle_end, self.all_length]) if head_length is not None: self.middle_begin = int(head_length / mws) if middle_length is not None: self.middle_end = self.middle_begin + int(middle_length / mws) elif tail_length is not None: self.middle_end = self.all_length - int(tail_length / mws) self.log([u"After: 0 %d %d %d", self.middle_begin, self.middle_end, self.all_length]) self.log(u"Setting head middle tail... done")
agpl-3.0
michael-ball/sublime-text
sublime-text-3/Packages/Python PEP8 Autoformat/libs/py33/lib2to3/fixes/fix_print.py
164
2854
# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for print. Change: 'print' into 'print()' 'print ...' into 'print(...)' 'print ... ,' into 'print(..., end=" ")' 'print >>x, ...' into 'print(..., file=x)' No changes are applied if print_function is imported from __future__ """ # Local imports from .. import patcomp from .. import pytree from ..pgen2 import token from .. import fixer_base from ..fixer_util import Name, Call, Comma, String, is_tuple parend_expr = patcomp.compile_pattern( """atom< '(' [atom|STRING|NAME] ')' >""" ) class FixPrint(fixer_base.BaseFix): BM_compatible = True PATTERN = """ simple_stmt< any* bare='print' any* > | print_stmt """ def transform(self, node, results): assert results bare_print = results.get("bare") if bare_print: # Special-case print all by itself bare_print.replace(Call(Name("print"), [], prefix=bare_print.prefix)) return assert node.children[0] == Name("print") args = node.children[1:] if len(args) == 1 and parend_expr.match(args[0]): # We don't want to keep sticking parens around an # already-parenthesised expression. return sep = end = file = None if args and args[-1] == Comma(): args = args[:-1] end = " " if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, ">>"): assert len(args) >= 2 file = args[1].clone() args = args[3:] # Strip a possible comma after the file expression # Now synthesize a print(args, sep=..., end=..., file=...) node. l_args = [arg.clone() for arg in args] if l_args: l_args[0].prefix = "" if sep is not None or end is not None or file is not None: if sep is not None: self.add_kwarg(l_args, "sep", String(repr(sep))) if end is not None: self.add_kwarg(l_args, "end", String(repr(end))) if file is not None: self.add_kwarg(l_args, "file", file) n_stmt = Call(Name("print"), l_args) n_stmt.prefix = node.prefix return n_stmt def add_kwarg(self, l_nodes, s_kwd, n_expr): # XXX All this prefix-setting may lose comments (though rarely) n_expr.prefix = "" n_argument = pytree.Node(self.syms.argument, (Name(s_kwd), pytree.Leaf(token.EQUAL, "="), n_expr)) if l_nodes: l_nodes.append(Comma()) n_argument.prefix = " " l_nodes.append(n_argument)
unlicense
x303597316/hue
desktop/core/ext-py/Django-1.6.10/django/contrib/gis/geos/tests/test_geos.py
89
45668
from __future__ import unicode_literals import ctypes import json import random from binascii import a2b_hex, b2a_hex from io import BytesIO from django.contrib.gis.gdal import HAS_GDAL from django.contrib.gis import memoryview from django.contrib.gis.geometry.test_data import TestDataMixin from django.utils.encoding import force_bytes from django.utils import six from django.utils.six.moves import xrange from django.utils import unittest from django.utils.unittest import skipUnless from .. import HAS_GEOS if HAS_GEOS: from .. import (GEOSException, GEOSIndexError, GEOSGeometry, GeometryCollection, Point, MultiPoint, Polygon, MultiPolygon, LinearRing, LineString, MultiLineString, fromfile, fromstr, geos_version_info, GEOS_PREPARE) from ..base import gdal, numpy, GEOSBase @skipUnless(HAS_GEOS, "Geos is required.") class GEOSTest(unittest.TestCase, TestDataMixin): @property def null_srid(self): """ Returns the proper null SRID depending on the GEOS version. See the comments in `test_srid` for more details. """ info = geos_version_info() if info['version'] == '3.0.0' and info['release_candidate']: return -1 else: return None def test_base(self): "Tests out the GEOSBase class." # Testing out GEOSBase class, which provides a `ptr` property # that abstracts out access to underlying C pointers. class FakeGeom1(GEOSBase): pass # This one only accepts pointers to floats c_float_p = ctypes.POINTER(ctypes.c_float) class FakeGeom2(GEOSBase): ptr_type = c_float_p # Default ptr_type is `c_void_p`. fg1 = FakeGeom1() # Default ptr_type is C float pointer fg2 = FakeGeom2() # These assignments are OK -- None is allowed because # it's equivalent to the NULL pointer. fg1.ptr = ctypes.c_void_p() fg1.ptr = None fg2.ptr = c_float_p(ctypes.c_float(5.23)) fg2.ptr = None # Because pointers have been set to NULL, an exception should be # raised when we try to access it. Raising an exception is # preferrable to a segmentation fault that commonly occurs when # a C method is given a NULL memory reference. for fg in (fg1, fg2): # Equivalent to `fg.ptr` self.assertRaises(GEOSException, fg._get_ptr) # Anything that is either not None or the acceptable pointer type will # result in a TypeError when trying to assign it to the `ptr` property. # Thus, memmory addresses (integers) and pointers of the incorrect type # (in `bad_ptrs`) will not be allowed. bad_ptrs = (5, ctypes.c_char_p(b'foobar')) for bad_ptr in bad_ptrs: # Equivalent to `fg.ptr = bad_ptr` self.assertRaises(TypeError, fg1._set_ptr, bad_ptr) self.assertRaises(TypeError, fg2._set_ptr, bad_ptr) def test_wkt(self): "Testing WKT output." for g in self.geometries.wkt_out: geom = fromstr(g.wkt) if geom.hasz and geos_version_info()['version'] >= '3.3.0': self.assertEqual(g.ewkt, geom.wkt) def test_hex(self): "Testing HEX output." for g in self.geometries.hex_wkt: geom = fromstr(g.wkt) self.assertEqual(g.hex, geom.hex.decode()) def test_hexewkb(self): "Testing (HEX)EWKB output." # For testing HEX(EWKB). ogc_hex = b'01010000000000000000000000000000000000F03F' ogc_hex_3d = b'01010000800000000000000000000000000000F03F0000000000000040' # `SELECT ST_AsHEXEWKB(ST_GeomFromText('POINT(0 1)', 4326));` hexewkb_2d = b'0101000020E61000000000000000000000000000000000F03F' # `SELECT ST_AsHEXEWKB(ST_GeomFromEWKT('SRID=4326;POINT(0 1 2)'));` hexewkb_3d = b'01010000A0E61000000000000000000000000000000000F03F0000000000000040' pnt_2d = Point(0, 1, srid=4326) pnt_3d = Point(0, 1, 2, srid=4326) # OGC-compliant HEX will not have SRID value. self.assertEqual(ogc_hex, pnt_2d.hex) self.assertEqual(ogc_hex_3d, pnt_3d.hex) # HEXEWKB should be appropriate for its dimension -- have to use an # a WKBWriter w/dimension set accordingly, else GEOS will insert # garbage into 3D coordinate if there is none. Also, GEOS has a # a bug in versions prior to 3.1 that puts the X coordinate in # place of Z; an exception should be raised on those versions. self.assertEqual(hexewkb_2d, pnt_2d.hexewkb) if GEOS_PREPARE: self.assertEqual(hexewkb_3d, pnt_3d.hexewkb) self.assertEqual(True, GEOSGeometry(hexewkb_3d).hasz) else: try: hexewkb = pnt_3d.hexewkb except GEOSException: pass else: self.fail('Should have raised GEOSException.') # Same for EWKB. self.assertEqual(memoryview(a2b_hex(hexewkb_2d)), pnt_2d.ewkb) if GEOS_PREPARE: self.assertEqual(memoryview(a2b_hex(hexewkb_3d)), pnt_3d.ewkb) else: try: ewkb = pnt_3d.ewkb except GEOSException: pass else: self.fail('Should have raised GEOSException') # Redundant sanity check. self.assertEqual(4326, GEOSGeometry(hexewkb_2d).srid) def test_kml(self): "Testing KML output." for tg in self.geometries.wkt_out: geom = fromstr(tg.wkt) kml = getattr(tg, 'kml', False) if kml: self.assertEqual(kml, geom.kml) def test_errors(self): "Testing the Error handlers." # string-based for err in self.geometries.errors: with self.assertRaises((GEOSException, ValueError)): _ = fromstr(err.wkt) # Bad WKB self.assertRaises(GEOSException, GEOSGeometry, memoryview(b'0')) class NotAGeometry(object): pass # Some other object self.assertRaises(TypeError, GEOSGeometry, NotAGeometry()) # None self.assertRaises(TypeError, GEOSGeometry, None) def test_wkb(self): "Testing WKB output." for g in self.geometries.hex_wkt: geom = fromstr(g.wkt) wkb = geom.wkb self.assertEqual(b2a_hex(wkb).decode().upper(), g.hex) def test_create_hex(self): "Testing creation from HEX." for g in self.geometries.hex_wkt: geom_h = GEOSGeometry(g.hex) # we need to do this so decimal places get normalised geom_t = fromstr(g.wkt) self.assertEqual(geom_t.wkt, geom_h.wkt) def test_create_wkb(self): "Testing creation from WKB." for g in self.geometries.hex_wkt: wkb = memoryview(a2b_hex(g.hex.encode())) geom_h = GEOSGeometry(wkb) # we need to do this so decimal places get normalised geom_t = fromstr(g.wkt) self.assertEqual(geom_t.wkt, geom_h.wkt) def test_ewkt(self): "Testing EWKT." srids = (-1, 32140) for srid in srids: for p in self.geometries.polygons: ewkt = 'SRID=%d;%s' % (srid, p.wkt) poly = fromstr(ewkt) self.assertEqual(srid, poly.srid) self.assertEqual(srid, poly.shell.srid) self.assertEqual(srid, fromstr(poly.ewkt).srid) # Checking export @skipUnless(HAS_GDAL, "GDAL is required.") def test_json(self): "Testing GeoJSON input/output (via GDAL)." for g in self.geometries.json_geoms: geom = GEOSGeometry(g.wkt) if not hasattr(g, 'not_equal'): # Loading jsons to prevent decimal differences self.assertEqual(json.loads(g.json), json.loads(geom.json)) self.assertEqual(json.loads(g.json), json.loads(geom.geojson)) self.assertEqual(GEOSGeometry(g.wkt), GEOSGeometry(geom.json)) def test_fromfile(self): "Testing the fromfile() factory." ref_pnt = GEOSGeometry('POINT(5 23)') wkt_f = BytesIO() wkt_f.write(force_bytes(ref_pnt.wkt)) wkb_f = BytesIO() wkb_f.write(bytes(ref_pnt.wkb)) # Other tests use `fromfile()` on string filenames so those # aren't tested here. for fh in (wkt_f, wkb_f): fh.seek(0) pnt = fromfile(fh) self.assertEqual(ref_pnt, pnt) def test_eq(self): "Testing equivalence." p = fromstr('POINT(5 23)') self.assertEqual(p, p.wkt) self.assertNotEqual(p, 'foo') ls = fromstr('LINESTRING(0 0, 1 1, 5 5)') self.assertEqual(ls, ls.wkt) self.assertNotEqual(p, 'bar') # Error shouldn't be raise on equivalence testing with # an invalid type. for g in (p, ls): self.assertNotEqual(g, None) self.assertNotEqual(g, {'foo' : 'bar'}) self.assertNotEqual(g, False) def test_points(self): "Testing Point objects." prev = fromstr('POINT(0 0)') for p in self.geometries.points: # Creating the point from the WKT pnt = fromstr(p.wkt) self.assertEqual(pnt.geom_type, 'Point') self.assertEqual(pnt.geom_typeid, 0) self.assertEqual(p.x, pnt.x) self.assertEqual(p.y, pnt.y) self.assertEqual(True, pnt == fromstr(p.wkt)) self.assertEqual(False, pnt == prev) # Making sure that the point's X, Y components are what we expect self.assertAlmostEqual(p.x, pnt.tuple[0], 9) self.assertAlmostEqual(p.y, pnt.tuple[1], 9) # Testing the third dimension, and getting the tuple arguments if hasattr(p, 'z'): self.assertEqual(True, pnt.hasz) self.assertEqual(p.z, pnt.z) self.assertEqual(p.z, pnt.tuple[2], 9) tup_args = (p.x, p.y, p.z) set_tup1 = (2.71, 3.14, 5.23) set_tup2 = (5.23, 2.71, 3.14) else: self.assertEqual(False, pnt.hasz) self.assertEqual(None, pnt.z) tup_args = (p.x, p.y) set_tup1 = (2.71, 3.14) set_tup2 = (3.14, 2.71) # Centroid operation on point should be point itself self.assertEqual(p.centroid, pnt.centroid.tuple) # Now testing the different constructors pnt2 = Point(tup_args) # e.g., Point((1, 2)) pnt3 = Point(*tup_args) # e.g., Point(1, 2) self.assertEqual(True, pnt == pnt2) self.assertEqual(True, pnt == pnt3) # Now testing setting the x and y pnt.y = 3.14 pnt.x = 2.71 self.assertEqual(3.14, pnt.y) self.assertEqual(2.71, pnt.x) # Setting via the tuple/coords property pnt.tuple = set_tup1 self.assertEqual(set_tup1, pnt.tuple) pnt.coords = set_tup2 self.assertEqual(set_tup2, pnt.coords) prev = pnt # setting the previous geometry def test_multipoints(self): "Testing MultiPoint objects." for mp in self.geometries.multipoints: mpnt = fromstr(mp.wkt) self.assertEqual(mpnt.geom_type, 'MultiPoint') self.assertEqual(mpnt.geom_typeid, 4) self.assertAlmostEqual(mp.centroid[0], mpnt.centroid.tuple[0], 9) self.assertAlmostEqual(mp.centroid[1], mpnt.centroid.tuple[1], 9) self.assertRaises(GEOSIndexError, mpnt.__getitem__, len(mpnt)) self.assertEqual(mp.centroid, mpnt.centroid.tuple) self.assertEqual(mp.coords, tuple(m.tuple for m in mpnt)) for p in mpnt: self.assertEqual(p.geom_type, 'Point') self.assertEqual(p.geom_typeid, 0) self.assertEqual(p.empty, False) self.assertEqual(p.valid, True) def test_linestring(self): "Testing LineString objects." prev = fromstr('POINT(0 0)') for l in self.geometries.linestrings: ls = fromstr(l.wkt) self.assertEqual(ls.geom_type, 'LineString') self.assertEqual(ls.geom_typeid, 1) self.assertEqual(ls.empty, False) self.assertEqual(ls.ring, False) if hasattr(l, 'centroid'): self.assertEqual(l.centroid, ls.centroid.tuple) if hasattr(l, 'tup'): self.assertEqual(l.tup, ls.tuple) self.assertEqual(True, ls == fromstr(l.wkt)) self.assertEqual(False, ls == prev) self.assertRaises(GEOSIndexError, ls.__getitem__, len(ls)) prev = ls # Creating a LineString from a tuple, list, and numpy array self.assertEqual(ls, LineString(ls.tuple)) # tuple self.assertEqual(ls, LineString(*ls.tuple)) # as individual arguments self.assertEqual(ls, LineString([list(tup) for tup in ls.tuple])) # as list self.assertEqual(ls.wkt, LineString(*tuple(Point(tup) for tup in ls.tuple)).wkt) # Point individual arguments if numpy: self.assertEqual(ls, LineString(numpy.array(ls.tuple))) # as numpy array def test_multilinestring(self): "Testing MultiLineString objects." prev = fromstr('POINT(0 0)') for l in self.geometries.multilinestrings: ml = fromstr(l.wkt) self.assertEqual(ml.geom_type, 'MultiLineString') self.assertEqual(ml.geom_typeid, 5) self.assertAlmostEqual(l.centroid[0], ml.centroid.x, 9) self.assertAlmostEqual(l.centroid[1], ml.centroid.y, 9) self.assertEqual(True, ml == fromstr(l.wkt)) self.assertEqual(False, ml == prev) prev = ml for ls in ml: self.assertEqual(ls.geom_type, 'LineString') self.assertEqual(ls.geom_typeid, 1) self.assertEqual(ls.empty, False) self.assertRaises(GEOSIndexError, ml.__getitem__, len(ml)) self.assertEqual(ml.wkt, MultiLineString(*tuple(s.clone() for s in ml)).wkt) self.assertEqual(ml, MultiLineString(*tuple(LineString(s.tuple) for s in ml))) def test_linearring(self): "Testing LinearRing objects." for rr in self.geometries.linearrings: lr = fromstr(rr.wkt) self.assertEqual(lr.geom_type, 'LinearRing') self.assertEqual(lr.geom_typeid, 2) self.assertEqual(rr.n_p, len(lr)) self.assertEqual(True, lr.valid) self.assertEqual(False, lr.empty) # Creating a LinearRing from a tuple, list, and numpy array self.assertEqual(lr, LinearRing(lr.tuple)) self.assertEqual(lr, LinearRing(*lr.tuple)) self.assertEqual(lr, LinearRing([list(tup) for tup in lr.tuple])) if numpy: self.assertEqual(lr, LinearRing(numpy.array(lr.tuple))) def test_polygons_from_bbox(self): "Testing `from_bbox` class method." bbox = (-180, -90, 180, 90) p = Polygon.from_bbox(bbox) self.assertEqual(bbox, p.extent) # Testing numerical precision x = 3.14159265358979323 bbox = (0, 0, 1, x) p = Polygon.from_bbox(bbox) y = p.extent[-1] self.assertEqual(format(x, '.13f'), format(y, '.13f')) def test_polygons(self): "Testing Polygon objects." prev = fromstr('POINT(0 0)') for p in self.geometries.polygons: # Creating the Polygon, testing its properties. poly = fromstr(p.wkt) self.assertEqual(poly.geom_type, 'Polygon') self.assertEqual(poly.geom_typeid, 3) self.assertEqual(poly.empty, False) self.assertEqual(poly.ring, False) self.assertEqual(p.n_i, poly.num_interior_rings) self.assertEqual(p.n_i + 1, len(poly)) # Testing __len__ self.assertEqual(p.n_p, poly.num_points) # Area & Centroid self.assertAlmostEqual(p.area, poly.area, 9) self.assertAlmostEqual(p.centroid[0], poly.centroid.tuple[0], 9) self.assertAlmostEqual(p.centroid[1], poly.centroid.tuple[1], 9) # Testing the geometry equivalence self.assertEqual(True, poly == fromstr(p.wkt)) self.assertEqual(False, poly == prev) # Should not be equal to previous geometry self.assertEqual(True, poly != prev) # Testing the exterior ring ring = poly.exterior_ring self.assertEqual(ring.geom_type, 'LinearRing') self.assertEqual(ring.geom_typeid, 2) if p.ext_ring_cs: self.assertEqual(p.ext_ring_cs, ring.tuple) self.assertEqual(p.ext_ring_cs, poly[0].tuple) # Testing __getitem__ # Testing __getitem__ and __setitem__ on invalid indices self.assertRaises(GEOSIndexError, poly.__getitem__, len(poly)) self.assertRaises(GEOSIndexError, poly.__setitem__, len(poly), False) self.assertRaises(GEOSIndexError, poly.__getitem__, -1 * len(poly) - 1) # Testing __iter__ for r in poly: self.assertEqual(r.geom_type, 'LinearRing') self.assertEqual(r.geom_typeid, 2) # Testing polygon construction. self.assertRaises(TypeError, Polygon, 0, [1, 2, 3]) self.assertRaises(TypeError, Polygon, 'foo') # Polygon(shell, (hole1, ... holeN)) rings = tuple(r for r in poly) self.assertEqual(poly, Polygon(rings[0], rings[1:])) # Polygon(shell_tuple, hole_tuple1, ... , hole_tupleN) ring_tuples = tuple(r.tuple for r in poly) self.assertEqual(poly, Polygon(*ring_tuples)) # Constructing with tuples of LinearRings. self.assertEqual(poly.wkt, Polygon(*tuple(r for r in poly)).wkt) self.assertEqual(poly.wkt, Polygon(*tuple(LinearRing(r.tuple) for r in poly)).wkt) def test_polygon_comparison(self): p1 = Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0))) p2 = Polygon(((0, 0), (0, 1), (1, 0), (0, 0))) self.assertTrue(p1 > p2) self.assertFalse(p1 < p2) self.assertFalse(p2 > p1) self.assertTrue(p2 < p1) p3 = Polygon(((0, 0), (0, 1), (1, 1), (2, 0), (0, 0))) p4 = Polygon(((0, 0), (0, 1), (2, 2), (1, 0), (0, 0))) self.assertFalse(p4 < p3) self.assertTrue(p3 < p4) self.assertTrue(p4 > p3) self.assertFalse(p3 > p4) def test_multipolygons(self): "Testing MultiPolygon objects." prev = fromstr('POINT (0 0)') for mp in self.geometries.multipolygons: mpoly = fromstr(mp.wkt) self.assertEqual(mpoly.geom_type, 'MultiPolygon') self.assertEqual(mpoly.geom_typeid, 6) self.assertEqual(mp.valid, mpoly.valid) if mp.valid: self.assertEqual(mp.num_geom, mpoly.num_geom) self.assertEqual(mp.n_p, mpoly.num_coords) self.assertEqual(mp.num_geom, len(mpoly)) self.assertRaises(GEOSIndexError, mpoly.__getitem__, len(mpoly)) for p in mpoly: self.assertEqual(p.geom_type, 'Polygon') self.assertEqual(p.geom_typeid, 3) self.assertEqual(p.valid, True) self.assertEqual(mpoly.wkt, MultiPolygon(*tuple(poly.clone() for poly in mpoly)).wkt) def test_memory_hijinks(self): "Testing Geometry __del__() on rings and polygons." #### Memory issues with rings and polygons # These tests are needed to ensure sanity with writable geometries. # Getting a polygon with interior rings, and pulling out the interior rings poly = fromstr(self.geometries.polygons[1].wkt) ring1 = poly[0] ring2 = poly[1] # These deletes should be 'harmless' since they are done on child geometries del ring1 del ring2 ring1 = poly[0] ring2 = poly[1] # Deleting the polygon del poly # Access to these rings is OK since they are clones. s1, s2 = str(ring1), str(ring2) def test_coord_seq(self): "Testing Coordinate Sequence objects." for p in self.geometries.polygons: if p.ext_ring_cs: # Constructing the polygon and getting the coordinate sequence poly = fromstr(p.wkt) cs = poly.exterior_ring.coord_seq self.assertEqual(p.ext_ring_cs, cs.tuple) # done in the Polygon test too. self.assertEqual(len(p.ext_ring_cs), len(cs)) # Making sure __len__ works # Checks __getitem__ and __setitem__ for i in xrange(len(p.ext_ring_cs)): c1 = p.ext_ring_cs[i] # Expected value c2 = cs[i] # Value from coordseq self.assertEqual(c1, c2) # Constructing the test value to set the coordinate sequence with if len(c1) == 2: tset = (5, 23) else: tset = (5, 23, 8) cs[i] = tset # Making sure every set point matches what we expect for j in range(len(tset)): cs[i] = tset self.assertEqual(tset[j], cs[i][j]) def test_relate_pattern(self): "Testing relate() and relate_pattern()." g = fromstr('POINT (0 0)') self.assertRaises(GEOSException, g.relate_pattern, 0, 'invalid pattern, yo') for rg in self.geometries.relate_geoms: a = fromstr(rg.wkt_a) b = fromstr(rg.wkt_b) self.assertEqual(rg.result, a.relate_pattern(b, rg.pattern)) self.assertEqual(rg.pattern, a.relate(b)) def test_intersection(self): "Testing intersects() and intersection()." for i in xrange(len(self.geometries.topology_geoms)): a = fromstr(self.geometries.topology_geoms[i].wkt_a) b = fromstr(self.geometries.topology_geoms[i].wkt_b) i1 = fromstr(self.geometries.intersect_geoms[i].wkt) self.assertEqual(True, a.intersects(b)) i2 = a.intersection(b) self.assertEqual(i1, i2) self.assertEqual(i1, a & b) # __and__ is intersection operator a &= b # testing __iand__ self.assertEqual(i1, a) def test_union(self): "Testing union()." for i in xrange(len(self.geometries.topology_geoms)): a = fromstr(self.geometries.topology_geoms[i].wkt_a) b = fromstr(self.geometries.topology_geoms[i].wkt_b) u1 = fromstr(self.geometries.union_geoms[i].wkt) u2 = a.union(b) self.assertEqual(u1, u2) self.assertEqual(u1, a | b) # __or__ is union operator a |= b # testing __ior__ self.assertEqual(u1, a) def test_difference(self): "Testing difference()." for i in xrange(len(self.geometries.topology_geoms)): a = fromstr(self.geometries.topology_geoms[i].wkt_a) b = fromstr(self.geometries.topology_geoms[i].wkt_b) d1 = fromstr(self.geometries.diff_geoms[i].wkt) d2 = a.difference(b) self.assertEqual(d1, d2) self.assertEqual(d1, a - b) # __sub__ is difference operator a -= b # testing __isub__ self.assertEqual(d1, a) def test_symdifference(self): "Testing sym_difference()." for i in xrange(len(self.geometries.topology_geoms)): a = fromstr(self.geometries.topology_geoms[i].wkt_a) b = fromstr(self.geometries.topology_geoms[i].wkt_b) d1 = fromstr(self.geometries.sdiff_geoms[i].wkt) d2 = a.sym_difference(b) self.assertEqual(d1, d2) self.assertEqual(d1, a ^ b) # __xor__ is symmetric difference operator a ^= b # testing __ixor__ self.assertEqual(d1, a) def test_buffer(self): "Testing buffer()." for bg in self.geometries.buffer_geoms: g = fromstr(bg.wkt) # The buffer we expect exp_buf = fromstr(bg.buffer_wkt) quadsegs = bg.quadsegs width = bg.width # Can't use a floating-point for the number of quadsegs. self.assertRaises(ctypes.ArgumentError, g.buffer, width, float(quadsegs)) # Constructing our buffer buf = g.buffer(width, quadsegs) self.assertEqual(exp_buf.num_coords, buf.num_coords) self.assertEqual(len(exp_buf), len(buf)) # Now assuring that each point in the buffer is almost equal for j in xrange(len(exp_buf)): exp_ring = exp_buf[j] buf_ring = buf[j] self.assertEqual(len(exp_ring), len(buf_ring)) for k in xrange(len(exp_ring)): # Asserting the X, Y of each point are almost equal (due to floating point imprecision) self.assertAlmostEqual(exp_ring[k][0], buf_ring[k][0], 9) self.assertAlmostEqual(exp_ring[k][1], buf_ring[k][1], 9) def test_srid(self): "Testing the SRID property and keyword." # Testing SRID keyword on Point pnt = Point(5, 23, srid=4326) self.assertEqual(4326, pnt.srid) pnt.srid = 3084 self.assertEqual(3084, pnt.srid) self.assertRaises(ctypes.ArgumentError, pnt.set_srid, '4326') # Testing SRID keyword on fromstr(), and on Polygon rings. poly = fromstr(self.geometries.polygons[1].wkt, srid=4269) self.assertEqual(4269, poly.srid) for ring in poly: self.assertEqual(4269, ring.srid) poly.srid = 4326 self.assertEqual(4326, poly.shell.srid) # Testing SRID keyword on GeometryCollection gc = GeometryCollection(Point(5, 23), LineString((0, 0), (1.5, 1.5), (3, 3)), srid=32021) self.assertEqual(32021, gc.srid) for i in range(len(gc)): self.assertEqual(32021, gc[i].srid) # GEOS may get the SRID from HEXEWKB # 'POINT(5 23)' at SRID=4326 in hex form -- obtained from PostGIS # using `SELECT GeomFromText('POINT (5 23)', 4326);`. hex = '0101000020E610000000000000000014400000000000003740' p1 = fromstr(hex) self.assertEqual(4326, p1.srid) # In GEOS 3.0.0rc1-4 when the EWKB and/or HEXEWKB is exported, # the SRID information is lost and set to -1 -- this is not a # problem on the 3.0.0 version (another reason to upgrade). exp_srid = self.null_srid p2 = fromstr(p1.hex) self.assertEqual(exp_srid, p2.srid) p3 = fromstr(p1.hex, srid=-1) # -1 is intended. self.assertEqual(-1, p3.srid) @skipUnless(HAS_GDAL, "GDAL is required.") def test_custom_srid(self): """ Test with a srid unknown from GDAL """ pnt = Point(111200, 220900, srid=999999) self.assertTrue(pnt.ewkt.startswith("SRID=999999;POINT (111200.0")) self.assertIsInstance(pnt.ogr, gdal.OGRGeometry) self.assertIsNone(pnt.srs) # Test conversion from custom to a known srid c2w = gdal.CoordTransform( gdal.SpatialReference('+proj=mill +lat_0=0 +lon_0=0 +x_0=0 +y_0=0 +R_A +ellps=WGS84 +datum=WGS84 +units=m +no_defs'), gdal.SpatialReference(4326)) new_pnt = pnt.transform(c2w, clone=True) self.assertEqual(new_pnt.srid, 4326) self.assertAlmostEqual(new_pnt.x, 1, 3) self.assertAlmostEqual(new_pnt.y, 2, 3) def test_mutable_geometries(self): "Testing the mutability of Polygons and Geometry Collections." ### Testing the mutability of Polygons ### for p in self.geometries.polygons: poly = fromstr(p.wkt) # Should only be able to use __setitem__ with LinearRing geometries. self.assertRaises(TypeError, poly.__setitem__, 0, LineString((1, 1), (2, 2))) # Constructing the new shell by adding 500 to every point in the old shell. shell_tup = poly.shell.tuple new_coords = [] for point in shell_tup: new_coords.append((point[0] + 500., point[1] + 500.)) new_shell = LinearRing(*tuple(new_coords)) # Assigning polygon's exterior ring w/the new shell poly.exterior_ring = new_shell s = str(new_shell) # new shell is still accessible self.assertEqual(poly.exterior_ring, new_shell) self.assertEqual(poly[0], new_shell) ### Testing the mutability of Geometry Collections for tg in self.geometries.multipoints: mp = fromstr(tg.wkt) for i in range(len(mp)): # Creating a random point. pnt = mp[i] new = Point(random.randint(21, 100), random.randint(21, 100)) # Testing the assignment mp[i] = new s = str(new) # what was used for the assignment is still accessible self.assertEqual(mp[i], new) self.assertEqual(mp[i].wkt, new.wkt) self.assertNotEqual(pnt, mp[i]) # MultiPolygons involve much more memory management because each # Polygon w/in the collection has its own rings. for tg in self.geometries.multipolygons: mpoly = fromstr(tg.wkt) for i in xrange(len(mpoly)): poly = mpoly[i] old_poly = mpoly[i] # Offsetting the each ring in the polygon by 500. for j in xrange(len(poly)): r = poly[j] for k in xrange(len(r)): r[k] = (r[k][0] + 500., r[k][1] + 500.) poly[j] = r self.assertNotEqual(mpoly[i], poly) # Testing the assignment mpoly[i] = poly s = str(poly) # Still accessible self.assertEqual(mpoly[i], poly) self.assertNotEqual(mpoly[i], old_poly) # Extreme (!!) __setitem__ -- no longer works, have to detect # in the first object that __setitem__ is called in the subsequent # objects -- maybe mpoly[0, 0, 0] = (3.14, 2.71)? #mpoly[0][0][0] = (3.14, 2.71) #self.assertEqual((3.14, 2.71), mpoly[0][0][0]) # Doing it more slowly.. #self.assertEqual((3.14, 2.71), mpoly[0].shell[0]) #del mpoly def test_threed(self): "Testing three-dimensional geometries." # Testing a 3D Point pnt = Point(2, 3, 8) self.assertEqual((2.,3.,8.), pnt.coords) self.assertRaises(TypeError, pnt.set_coords, (1.,2.)) pnt.coords = (1.,2.,3.) self.assertEqual((1.,2.,3.), pnt.coords) # Testing a 3D LineString ls = LineString((2., 3., 8.), (50., 250., -117.)) self.assertEqual(((2.,3.,8.), (50.,250.,-117.)), ls.tuple) self.assertRaises(TypeError, ls.__setitem__, 0, (1.,2.)) ls[0] = (1.,2.,3.) self.assertEqual((1.,2.,3.), ls[0]) def test_distance(self): "Testing the distance() function." # Distance to self should be 0. pnt = Point(0, 0) self.assertEqual(0.0, pnt.distance(Point(0, 0))) # Distance should be 1 self.assertEqual(1.0, pnt.distance(Point(0, 1))) # Distance should be ~ sqrt(2) self.assertAlmostEqual(1.41421356237, pnt.distance(Point(1, 1)), 11) # Distances are from the closest vertex in each geometry -- # should be 3 (distance from (2, 2) to (5, 2)). ls1 = LineString((0, 0), (1, 1), (2, 2)) ls2 = LineString((5, 2), (6, 1), (7, 0)) self.assertEqual(3, ls1.distance(ls2)) def test_length(self): "Testing the length property." # Points have 0 length. pnt = Point(0, 0) self.assertEqual(0.0, pnt.length) # Should be ~ sqrt(2) ls = LineString((0, 0), (1, 1)) self.assertAlmostEqual(1.41421356237, ls.length, 11) # Should be circumfrence of Polygon poly = Polygon(LinearRing((0, 0), (0, 1), (1, 1), (1, 0), (0, 0))) self.assertEqual(4.0, poly.length) # Should be sum of each element's length in collection. mpoly = MultiPolygon(poly.clone(), poly) self.assertEqual(8.0, mpoly.length) def test_emptyCollections(self): "Testing empty geometries and collections." gc1 = GeometryCollection([]) gc2 = fromstr('GEOMETRYCOLLECTION EMPTY') pnt = fromstr('POINT EMPTY') ls = fromstr('LINESTRING EMPTY') poly = fromstr('POLYGON EMPTY') mls = fromstr('MULTILINESTRING EMPTY') mpoly1 = fromstr('MULTIPOLYGON EMPTY') mpoly2 = MultiPolygon(()) for g in [gc1, gc2, pnt, ls, poly, mls, mpoly1, mpoly2]: self.assertEqual(True, g.empty) # Testing len() and num_geom. if isinstance(g, Polygon): self.assertEqual(1, len(g)) # Has one empty linear ring self.assertEqual(1, g.num_geom) self.assertEqual(0, len(g[0])) elif isinstance(g, (Point, LineString)): self.assertEqual(1, g.num_geom) self.assertEqual(0, len(g)) else: self.assertEqual(0, g.num_geom) self.assertEqual(0, len(g)) # Testing __getitem__ (doesn't work on Point or Polygon) if isinstance(g, Point): self.assertRaises(GEOSIndexError, g.get_x) elif isinstance(g, Polygon): lr = g.shell self.assertEqual('LINEARRING EMPTY', lr.wkt) self.assertEqual(0, len(lr)) self.assertEqual(True, lr.empty) self.assertRaises(GEOSIndexError, lr.__getitem__, 0) else: self.assertRaises(GEOSIndexError, g.__getitem__, 0) def test_collections_of_collections(self): "Testing GeometryCollection handling of other collections." # Creating a GeometryCollection WKT string composed of other # collections and polygons. coll = [mp.wkt for mp in self.geometries.multipolygons if mp.valid] coll.extend([mls.wkt for mls in self.geometries.multilinestrings]) coll.extend([p.wkt for p in self.geometries.polygons]) coll.extend([mp.wkt for mp in self.geometries.multipoints]) gc_wkt = 'GEOMETRYCOLLECTION(%s)' % ','.join(coll) # Should construct ok from WKT gc1 = GEOSGeometry(gc_wkt) # Should also construct ok from individual geometry arguments. gc2 = GeometryCollection(*tuple(g for g in gc1)) # And, they should be equal. self.assertEqual(gc1, gc2) @skipUnless(HAS_GDAL, "GDAL is required.") def test_gdal(self): "Testing `ogr` and `srs` properties." g1 = fromstr('POINT(5 23)') self.assertIsInstance(g1.ogr, gdal.OGRGeometry) self.assertIsNone(g1.srs) if GEOS_PREPARE: g1_3d = fromstr('POINT(5 23 8)') self.assertIsInstance(g1_3d.ogr, gdal.OGRGeometry) self.assertEqual(g1_3d.ogr.z, 8) g2 = fromstr('LINESTRING(0 0, 5 5, 23 23)', srid=4326) self.assertIsInstance(g2.ogr, gdal.OGRGeometry) self.assertIsInstance(g2.srs, gdal.SpatialReference) self.assertEqual(g2.hex, g2.ogr.hex) self.assertEqual('WGS 84', g2.srs.name) def test_copy(self): "Testing use with the Python `copy` module." import copy poly = GEOSGeometry('POLYGON((0 0, 0 23, 23 23, 23 0, 0 0), (5 5, 5 10, 10 10, 10 5, 5 5))') cpy1 = copy.copy(poly) cpy2 = copy.deepcopy(poly) self.assertNotEqual(poly._ptr, cpy1._ptr) self.assertNotEqual(poly._ptr, cpy2._ptr) @skipUnless(HAS_GDAL, "GDAL is required to transform geometries") def test_transform(self): "Testing `transform` method." orig = GEOSGeometry('POINT (-104.609 38.255)', 4326) trans = GEOSGeometry('POINT (992385.4472045 481455.4944650)', 2774) # Using a srid, a SpatialReference object, and a CoordTransform object # for transformations. t1, t2, t3 = orig.clone(), orig.clone(), orig.clone() t1.transform(trans.srid) t2.transform(gdal.SpatialReference('EPSG:2774')) ct = gdal.CoordTransform(gdal.SpatialReference('WGS84'), gdal.SpatialReference(2774)) t3.transform(ct) # Testing use of the `clone` keyword. k1 = orig.clone() k2 = k1.transform(trans.srid, clone=True) self.assertEqual(k1, orig) self.assertNotEqual(k1, k2) prec = 3 for p in (t1, t2, t3, k2): self.assertAlmostEqual(trans.x, p.x, prec) self.assertAlmostEqual(trans.y, p.y, prec) @skipUnless(HAS_GDAL, "GDAL is required to transform geometries") def test_transform_3d(self): p3d = GEOSGeometry('POINT (5 23 100)', 4326) p3d.transform(2774) if GEOS_PREPARE: self.assertEqual(p3d.z, 100) else: self.assertIsNone(p3d.z) @skipUnless(HAS_GDAL, "GDAL is required.") def test_transform_noop(self): """ Testing `transform` method (SRID match) """ # transform() should no-op if source & dest SRIDs match, # regardless of whether GDAL is available. if gdal.HAS_GDAL: g = GEOSGeometry('POINT (-104.609 38.255)', 4326) gt = g.tuple g.transform(4326) self.assertEqual(g.tuple, gt) self.assertEqual(g.srid, 4326) g = GEOSGeometry('POINT (-104.609 38.255)', 4326) g1 = g.transform(4326, clone=True) self.assertEqual(g1.tuple, g.tuple) self.assertEqual(g1.srid, 4326) self.assertTrue(g1 is not g, "Clone didn't happen") old_has_gdal = gdal.HAS_GDAL try: gdal.HAS_GDAL = False g = GEOSGeometry('POINT (-104.609 38.255)', 4326) gt = g.tuple g.transform(4326) self.assertEqual(g.tuple, gt) self.assertEqual(g.srid, 4326) g = GEOSGeometry('POINT (-104.609 38.255)', 4326) g1 = g.transform(4326, clone=True) self.assertEqual(g1.tuple, g.tuple) self.assertEqual(g1.srid, 4326) self.assertTrue(g1 is not g, "Clone didn't happen") finally: gdal.HAS_GDAL = old_has_gdal def test_transform_nosrid(self): """ Testing `transform` method (no SRID or negative SRID) """ g = GEOSGeometry('POINT (-104.609 38.255)', srid=None) self.assertRaises(GEOSException, g.transform, 2774) g = GEOSGeometry('POINT (-104.609 38.255)', srid=None) self.assertRaises(GEOSException, g.transform, 2774, clone=True) g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1) self.assertRaises(GEOSException, g.transform, 2774) g = GEOSGeometry('POINT (-104.609 38.255)', srid=-1) self.assertRaises(GEOSException, g.transform, 2774, clone=True) @skipUnless(HAS_GDAL, "GDAL is required.") def test_transform_nogdal(self): """ Testing `transform` method (GDAL not available) """ old_has_gdal = gdal.HAS_GDAL try: gdal.HAS_GDAL = False g = GEOSGeometry('POINT (-104.609 38.255)', 4326) self.assertRaises(GEOSException, g.transform, 2774) g = GEOSGeometry('POINT (-104.609 38.255)', 4326) self.assertRaises(GEOSException, g.transform, 2774, clone=True) finally: gdal.HAS_GDAL = old_has_gdal def test_extent(self): "Testing `extent` method." # The xmin, ymin, xmax, ymax of the MultiPoint should be returned. mp = MultiPoint(Point(5, 23), Point(0, 0), Point(10, 50)) self.assertEqual((0.0, 0.0, 10.0, 50.0), mp.extent) pnt = Point(5.23, 17.8) # Extent of points is just the point itself repeated. self.assertEqual((5.23, 17.8, 5.23, 17.8), pnt.extent) # Testing on the 'real world' Polygon. poly = fromstr(self.geometries.polygons[3].wkt) ring = poly.shell x, y = ring.x, ring.y xmin, ymin = min(x), min(y) xmax, ymax = max(x), max(y) self.assertEqual((xmin, ymin, xmax, ymax), poly.extent) def test_pickle(self): "Testing pickling and unpickling support." # Using both pickle and cPickle -- just 'cause. from django.utils.six.moves import cPickle import pickle # Creating a list of test geometries for pickling, # and setting the SRID on some of them. def get_geoms(lst, srid=None): return [GEOSGeometry(tg.wkt, srid) for tg in lst] tgeoms = get_geoms(self.geometries.points) tgeoms.extend(get_geoms(self.geometries.multilinestrings, 4326)) tgeoms.extend(get_geoms(self.geometries.polygons, 3084)) tgeoms.extend(get_geoms(self.geometries.multipolygons, 900913)) # The SRID won't be exported in GEOS 3.0 release candidates. no_srid = self.null_srid == -1 for geom in tgeoms: s1, s2 = cPickle.dumps(geom), pickle.dumps(geom) g1, g2 = cPickle.loads(s1), pickle.loads(s2) for tmpg in (g1, g2): self.assertEqual(geom, tmpg) if not no_srid: self.assertEqual(geom.srid, tmpg.srid) @skipUnless(HAS_GEOS and GEOS_PREPARE, "geos >= 3.1.0 is required") def test_prepared(self): "Testing PreparedGeometry support." # Creating a simple multipolygon and getting a prepared version. mpoly = GEOSGeometry('MULTIPOLYGON(((0 0,0 5,5 5,5 0,0 0)),((5 5,5 10,10 10,10 5,5 5)))') prep = mpoly.prepared # A set of test points. pnts = [Point(5, 5), Point(7.5, 7.5), Point(2.5, 7.5)] covers = [True, True, False] # No `covers` op for regular GEOS geoms. for pnt, c in zip(pnts, covers): # Results should be the same (but faster) self.assertEqual(mpoly.contains(pnt), prep.contains(pnt)) self.assertEqual(mpoly.intersects(pnt), prep.intersects(pnt)) self.assertEqual(c, prep.covers(pnt)) # Original geometry deletion should not crash the prepared one (#21662) del mpoly self.assertTrue(prep.covers(Point(5, 5))) def test_line_merge(self): "Testing line merge support" ref_geoms = (fromstr('LINESTRING(1 1, 1 1, 3 3)'), fromstr('MULTILINESTRING((1 1, 3 3), (3 3, 4 2))'), ) ref_merged = (fromstr('LINESTRING(1 1, 3 3)'), fromstr('LINESTRING (1 1, 3 3, 4 2)'), ) for geom, merged in zip(ref_geoms, ref_merged): self.assertEqual(merged, geom.merged) @skipUnless(HAS_GEOS and GEOS_PREPARE, "geos >= 3.1.0 is required") def test_valid_reason(self): "Testing IsValidReason support" g = GEOSGeometry("POINT(0 0)") self.assertTrue(g.valid) self.assertIsInstance(g.valid_reason, six.string_types) self.assertEqual(g.valid_reason, "Valid Geometry") g = GEOSGeometry("LINESTRING(0 0, 0 0)") self.assertFalse(g.valid) self.assertIsInstance(g.valid_reason, six.string_types) self.assertTrue(g.valid_reason.startswith("Too few points in geometry component")) @skipUnless(HAS_GEOS and geos_version_info()['version'] >= '3.2.0', "geos >= 3.2.0 is required") def test_linearref(self): "Testing linear referencing" ls = fromstr('LINESTRING(0 0, 0 10, 10 10, 10 0)') mls = fromstr('MULTILINESTRING((0 0, 0 10), (10 0, 10 10))') self.assertEqual(ls.project(Point(0, 20)), 10.0) self.assertEqual(ls.project(Point(7, 6)), 24) self.assertEqual(ls.project_normalized(Point(0, 20)), 1.0/3) self.assertEqual(ls.interpolate(10), Point(0, 10)) self.assertEqual(ls.interpolate(24), Point(10, 6)) self.assertEqual(ls.interpolate_normalized(1.0/3), Point(0, 10)) self.assertEqual(mls.project(Point(0, 20)), 10) self.assertEqual(mls.project(Point(7, 6)), 16) self.assertEqual(mls.interpolate(9), Point(0, 9)) self.assertEqual(mls.interpolate(17), Point(10, 7)) def test_geos_version(self): """Testing the GEOS version regular expression.""" from django.contrib.gis.geos.libgeos import version_regex versions = [('3.0.0rc4-CAPI-1.3.3', '3.0.0', '1.3.3'), ('3.0.0-CAPI-1.4.1', '3.0.0', '1.4.1'), ('3.4.0dev-CAPI-1.8.0', '3.4.0', '1.8.0'), ('3.4.0dev-CAPI-1.8.0 r0', '3.4.0', '1.8.0')] for v_init, v_geos, v_capi in versions: m = version_regex.match(v_init) self.assertTrue(m, msg="Unable to parse the version string '%s'" % v_init) self.assertEqual(m.group('version'), v_geos) self.assertEqual(m.group('capi_version'), v_capi)
apache-2.0
hynnet/hiwifi-openwrt-HC5661-HC5761
staging_dir/host/lib/python2.7/test/test_errno.py
91
1184
#! /usr/bin/env python """Test the errno module Roger E. Masse """ import errno from test import test_support import unittest std_c_errors = frozenset(['EDOM', 'ERANGE']) class ErrnoAttributeTests(unittest.TestCase): def test_for_improper_attributes(self): # No unexpected attributes should be on the module. for error_code in std_c_errors: self.assertTrue(hasattr(errno, error_code), "errno is missing %s" % error_code) def test_using_errorcode(self): # Every key value in errno.errorcode should be on the module. for value in errno.errorcode.itervalues(): self.assertTrue(hasattr(errno, value), 'no %s attr in errno' % value) class ErrorcodeTests(unittest.TestCase): def test_attributes_in_errorcode(self): for attribute in errno.__dict__.iterkeys(): if attribute.isupper(): self.assertIn(getattr(errno, attribute), errno.errorcode, 'no %s attr in errno.errorcode' % attribute) def test_main(): test_support.run_unittest(ErrnoAttributeTests, ErrorcodeTests) if __name__ == '__main__': test_main()
gpl-2.0
wxgeo/geophar
wxgeometrie/modules/tablatex/tests/test_tabsign.py
1
12983
# -*- coding: utf-8 -*- from wxgeometrie.modules.tablatex.tests.tabtestlib import assert_tableau from wxgeometrie.modules.tablatex.tabsign import tabsign from pytest import XFAIL def assert_tabsign(chaine, code_latex, **options): assert_tableau(tabsign, chaine, code_latex, **options) def test_mode_manuel(): s = "x: -oo;+oo// 2x+1: -- -1/2 ++// 3-x: ++ 3 --// f(x)" tab = \ r"""\begin{center} \begin{tabular}{|c|ccccccc|} \hline $x$ & $-\infty$ & & $-\frac{1}{2}$ & & $3$ & & $+\infty$ \\ \hline $2x+1$ & & $-$ & 0 & + & & + & \\ \hline $3-x$ & & + & & + & 0 & $-$ & \\ \hline $f(x)$ & & $-$ & 0 & + & 0 & $-$ & \\ \hline \end{tabular} \end{center} % x: -oo;+oo// 2x+1: -- -1/2 ++// 3-x: ++ 3 --// f(x) """ assert_tabsign(s, tab) def test_mode_auto(): s = 'g(x)=(x-7/2)(x+7/2)' tab = \ r'''\begin{center} \begin{tabular}{|c|ccccccc|} \hline $x$ & $-\infty$ & & $-\frac{7}{2}$ & & $\frac{7}{2}$ & & $+\infty$ \\ \hline $x-\frac{7}{2}$ & & $-$ & & $-$ & 0 & + & \\ \hline $x+\frac{7}{2}$ & & $-$ & 0 & + & & + & \\ \hline $g(x)$ & & + & 0 & $-$ & 0 & + & \\ \hline \end{tabular} \end{center} % x: -oo;+oo// x-7/2: -- 7/2 ++ // x+7/2: -- -7/2 ++ // g(x) % g(x)=(x-7/2)(x+7/2) ''' assert_tabsign(s, tab) def test_polynomes(): s= 'f(x)=x^3-30x^2+112' tab = \ r"""\begin{center} \begin{tabular}{|c|ccccccccc|} \hline $x$ & $-\infty$ & & $-6 \sqrt{7}+14$ & & $2$ & & $14+6 \sqrt{7}$ & & $+\infty$ \\ \hline $f(x)$ & & $-$ & 0 & + & 0 & $-$ & 0 & + & \\ \hline \end{tabular} \end{center} % x: -oo;+oo// x^3-30 x^2+112: -- -6*sqrt(7) + 14 ++ 2 -- 14 + 6*sqrt(7) ++ // f(x) % f(x)=x^3-30x^2+112 """ assert_tabsign(s, tab) s = '- 6 x^{2} - 12 x + 4' tab = \ r'''\begin{center} \begin{tabular}{|c|ccccccc|} \hline $x$ & $-\infty$ & & $-\frac{\sqrt{15}}{3}-1$ & & $-1+\frac{\sqrt{15}}{3}$ & & $+\infty$ \\ \hline $-6 x^{2}-12 x+4$ & & $-$ & 0 & + & 0 & $-$ & \\ \hline \end{tabular} \end{center} % x: -oo;+oo// -6 x^(2)-12 x+4: -- -sqrt(15)/3 - 1 ++ -1 + sqrt(15)/3 -- // - 6 x^{2} - 12 x + 4 % - 6 x^{2} - 12 x + 4 ''' assert_tabsign(s, tab) def test_quotients(): s = '(3x-2)/((x-1)^2)' tab = \ r'''\providecommand{\geopharDB}[1]{$\left|\vphantom{\text{#1}}\right|$} \begin{center} \begin{tabular}{|c|ccccccc|} \hline $x$ & $-\infty$ & & $\frac{2}{3}$ & & $1$ & & $+\infty$ \\ \hline $3 x-2$ & & $-$ & 0 & + & & + & \\ \hline $(x-1)^{2}$ & & + & & + & 0 & + & \\ \hline $\frac{3x-2}{(x-1)^{2}}$ & & $-$ & 0 & + & \geopharDB{$\frac{3x-2}{(x-1)^{2}}$} & + & \\ \hline \end{tabular} \end{center} % x: -oo;!1: !1;+oo// 3 x-2: -- 2/3 ++ // !(x-1)^2: ++ 1 ++ // (3x-2)/((x-1)^2) % (3x-2)/((x-1)^2) ''' assert_tabsign(s, tab) s = '(3x-2)/(x-1)^2' tab = \ r'''\providecommand{\geopharDB}[1]{$\left|\vphantom{\text{#1}}\right|$} \begin{center} \begin{tabular}{|c|ccccccc|} \hline $x$ & $-\infty$ & & $\frac{2}{3}$ & & $1$ & & $+\infty$ \\ \hline $3 x-2$ & & $-$ & 0 & + & & + & \\ \hline $(x-1)^{2}$ & & + & & + & 0 & + & \\ \hline $\frac{3x-2}{(x-1)^{2}}$ & & $-$ & 0 & + & \geopharDB{$\frac{3x-2}{(x-1)^{2}}$} & + & \\ \hline \end{tabular} \end{center} % x: -oo;!1: !1;+oo// 3 x-2: -- 2/3 ++ // !(x-1)^2: ++ 1 ++ // (3x-2)/(x-1)^2 % (3x-2)/(x-1)^2 ''' assert_tabsign(s, tab) def test_latex(): s = '\dfrac{3x-2}{(x-1)^2}' tab = \ r'''\providecommand{\geopharDB}[1]{$\left|\vphantom{\text{#1}}\right|$} \begin{center} \begin{tabular}{|c|ccccccc|} \hline $x$ & $-\infty$ & & $\frac{2}{3}$ & & $1$ & & $+\infty$ \\ \hline $3 x-2$ & & $-$ & 0 & + & & + & \\ \hline $(x-1)^{2}$ & & + & & + & 0 & + & \\ \hline $\dfrac{3x-2}{(x-1)^{2}}$ & & $-$ & 0 & + & \geopharDB{$\dfrac{3x-2}{(x-1)^{2}}$} & + & \\ \hline \end{tabular} \end{center} % x: -oo;!1: !1;+oo// 3 x-2: -- 2/3 ++ // !(x-1)^2: ++ 1 ++ // \dfrac{3x-2}{(x-1)^2} % \dfrac{3x-2}{(x-1)^2} ''' assert_tabsign(s, tab) s = "g(x)=\dfrac{-x+1}{\e^{x}}" tab = \ r'''\begin{center} \begin{tabular}{|c|ccccc|} \hline $x$ & $-\infty$ & & $1$ & & $+\infty$ \\ \hline $-x+1$ & & + & 0 & $-$ & \\ \hline $\e^{x}$ & & + & & + & \\ \hline $g(x)$ & & + & 0 & $-$ & \\ \hline \end{tabular} \end{center} % x: -oo;+oo// -x+1: ++ 1 -- // e^(x): ++ // g(x) % g(x)=\dfrac{-x+1}{\e^{x}} ''' assert_tabsign(s, tab) s= "f'(x)=1-\e^{-x+2}" tab = \ r'''\begin{center} \begin{tabular}{|c|ccccc|} \hline $x$ & $-\infty$ & & $2$ & & $+\infty$ \\ \hline $f'(x)$ & & $-$ & 0 & + & \\ \hline \end{tabular} \end{center} % x: -oo;+oo// 1-e^(-x+2): -- 2 ++ // f'(x) % f'(x)=1-\e^{-x+2} ''' assert_tabsign(s, tab) def test_intervalle(): s = "x^2 sur [1;+oo[" tab = \ r'''\begin{center} \begin{tabular}{|c|ccc|} \hline $x$ & $1$ & & $+\infty$ \\ \hline $x^{2}$ & & + & \\ \hline \end{tabular} \end{center} % x: 1;+oo// x^2: ++ // x^2 % x^2 sur [1;+\infty[ ''' assert_tabsign(s, tab) s = "u(x)=1-x sur ]0;+oo[" tab = \ r'''\providecommand{\geopharDB}[1]{$\left|\vphantom{\text{#1}}\right|$} \begin{center} \begin{tabular}{|c|ccccc|} \hline $x$ & $0$ & & $1$ & & $+\infty$ \\ \hline $u(x)$ & \geopharDB{$u(x)$} & + & 0 & $-$ & \\ \hline \end{tabular} \end{center} % x: !0;+oo// 1-x: ++ 1 -- // u(x) % u(x)=1-x sur ]0;+\infty[ ''' assert_tabsign(s, tab) s = "u(x)=x(1-x) sur ]-1;0[U]0;4[" tab = \ r'''\providecommand{\geopharDB}[1]{$\left|\vphantom{\text{#1}}\right|$} \begin{center} \begin{tabular}{|c|ccccccc|} \hline $x$ & $-1$ & & $0$ & & $1$ & & $4$ \\ \hline $x$ & & $-$ & 0 & + & & + & \\ \hline $1-x$ & & + & & + & 0 & $-$ & \\ \hline $u(x)$ & \geopharDB{$u(x)$} & $-$ & \geopharDB{$u(x)$} & + & 0 & $-$ & \geopharDB{$u(x)$} \\ \hline \end{tabular} \end{center} % x: !-1;!0: !0;!4// !x: -- 0 ++ // 1-x: ++ 1 -- // u(x) % u(x)=x(1-x) sur ]-1;0[U]0;4[ ''' assert_tabsign(s, tab) s = "u(x)=(1+x)(1-x)/x sur ]-3;2[U]2;4]" tab = \ r'''\providecommand{\geopharDB}[1]{$\left|\vphantom{\text{#1}}\right|$} \begin{center} \begin{tabular}{|c|ccccccccccc|} \hline $x$ & $-3$ & & $-1$ & & $0$ & & $1$ & & $2$ & & $4$ \\ \hline $1+x$ & & $-$ & 0 & + & & + & & + & & + & \\ \hline $1-x$ & & + & & + & & + & 0 & $-$ & & $-$ & \\ \hline $x$ & & $-$ & & $-$ & 0 & + & & + & & + & \\ \hline $u(x)$ & \geopharDB{$u(x)$} & + & 0 & $-$ & \geopharDB{$u(x)$} & + & 0 & $-$ & \geopharDB{$u(x)$} & $-$ & \\ \hline \end{tabular} \end{center} % x: !-3;!0: !0;!2: !2;4// 1+x: -- -1 ++ // 1-x: ++ 1 -- // !x: -- 0 ++ // u(x) % u(x)=(1+x)(1-x)/x sur ]-3;2[U]2;4] ''' assert_tabsign(s, tab) def test_issue_173(): s = "(1 - x)\e^{ 2x}" tab = \ r'''\begin{center} \begin{tabular}{|c|ccccc|} \hline $x$ & $-\infty$ & & $1$ & & $+\infty$ \\ \hline $1-x$ & & + & 0 & $-$ & \\ \hline $\e^{2 x}$ & & + & & + & \\ \hline $(1-x)\e^{ 2x}$ & & + & 0 & $-$ & \\ \hline \end{tabular} \end{center} % x: -oo;+oo// 1-x: ++ 1 -- // e^(2 x): ++ // (1 - x)\e^{ 2x} % (1 - x)\e^{ 2x} ''' assert_tabsign(s, tab) def test_issue_200(): s = 'f(x)=x^2-3' tab = \ r'''\begin{center} \begin{tabular}{|c|ccccccc|} \hline $x$ & $-\infty$ & & $-\sqrt{3}$ & & $\sqrt{3}$ & & $+\infty$ \\ \hline $f(x)$ & & + & 0 & $-$ & 0 & + & \\ \hline \end{tabular} \end{center} % x: -oo;+oo// x^2-3: ++ -sqrt(3) -- sqrt(3) ++ // f(x) % f(x)=x^2-3 ''' assert_tabsign(s, tab) def test_issue_189(): # Tableaux de signes et de variation avec des décimaux s = '2-0.25x' options = {'cellspace': True} tab = \ r'''\begin{center} \begin{tabular}{|Sc|ScScScScSc|} \hline $x$ & $-\infty$ & & $8$ & & $+\infty$ \\ \hline $2-0.25x$ & & + & 0 & $-$ & \\ \hline \end{tabular} \end{center} % x: -oo;+oo// 2-0.25 x: ++ 8 -- // 2-0.25x % 2-0.25x ''' assert_tabsign(s, tab, **options) def test_intervalle_virgule(): s = 'h(x)=x^2-x/2-3 sur [-2,5;3,5]' options = {'cellspace': True} tab = \ r'''\begin{center} \begin{tabular}{|Sc|ScScScScScScSc|} \hline $x$ & $-2,5$ & & $-\frac{3}{2}$ & & $2$ & & $3,5$ \\ \hline $h(x)$ & & + & 0 & $-$ & 0 & + & \\ \hline \end{tabular} \end{center} % x: -2,5;3,5// x^2-x/2-3: ++ -3/2 -- 2 ++ // h(x) % h(x)=x^2-x/2-3 sur [-2,5;3,5] ''' assert_tabsign(s, tab, **options) def test_constante(): s = 'f(x)=5' tab = \ r'''\begin{center} \begin{tabular}{|c|ccc|} \hline $x$ & $-\infty$ & & $+\infty$ \\ \hline $f(x)$ & & + & \\ \hline \end{tabular} \end{center} % x: -oo;+oo// 5: ++ // f(x) % f(x)=5 ''' assert_tabsign(s, tab) def test_issue_247(): "FS#247 - Accepter la syntaxe suivant : 'f(x): -- -8 ++ -2 -- 5 ++'." s = "f(x): -- -8 ++ -2 -- 5 ++" tab = \ r'''\begin{center} \begin{tabular}{|c|ccccccccc|} \hline $x$ & $-\infty$ & & $-8$ & & $-2$ & & $5$ & & $+\infty$ \\ \hline $f(x)$ & & $-$ & 0 & + & 0 & $-$ & 0 & + & \\ \hline \end{tabular} \end{center} % f(x): -- -8 ++ -2 -- 5 ++ ''' assert_tabsign(s, tab) def test_mix_numeric_and_symbolic_values(): s = 'f(x): -- x_1 ++ 5 ++ x_2 -- 7 --' tab = \ r'''\begin{center} \begin{tabular}{|c|ccccccccccc|} \hline $x$ & $-\infty$ & & $x_1$ & & $5$ & & $x_2$ & & $7$ & & $+\infty$ \\ \hline $f(x)$ & & $-$ & 0 & + & 0 & + & 0 & $-$ & 0 & $-$ & \\ \hline \end{tabular} \end{center} % f(x): -- x_1 ++ 5 ++ x_2 -- 7 -- ''' assert_tabsign(s, tab) s = r'x:-oo;+oo // f(x): -- 5 ++ // g(x): ++ \alpha=2,1 --' tab = \ r'''\begin{center} \begin{tabular}{|c|ccccccc|} \hline $x$ & $-\infty$ & & $\alpha$ & & $5$ & & $+\infty$ \\ \hline $f(x)$ & & $-$ & & $-$ & 0 & + & \\ \hline $g(x)$ & & + & 0 & $-$ & & $-$ & \\ \hline $f(x)g(x)$ & & $-$ & 0 & + & 0 & $-$ & \\ \hline \end{tabular} \end{center} % x:-oo;+oo // f(x): -- 5 ++ // g(x): ++ \alpha=2,1 -- ''' assert_tabsign(s, tab) def test_approche(): s = "f(x)=x^2-3x-5" tab = \ r'''\begin{center} \begin{tabular}{|c|ccccccc|} \hline $x$ & $-\infty$ & & $-\frac{\sqrt{29}}{2}+\frac{3}{2}$ & & $\frac{3}{2}+\frac{\sqrt{29}}{2}$ & & $+\infty$ \\ \hline $f(x)$ & & + & 0 & $-$ & 0 & + & \\ \hline \end{tabular} \end{center} % x: -oo;+oo// x^2-3 x-5: ++ -sqrt(29)/2 + 3/2 -- 3/2 + sqrt(29)/2 ++ // f(x) % f(x)=x^2-3x-5 ''' assert_tabsign(s, tab) options = {'approche': True, "decimales": 2} tab = \ r'''\begin{center} \begin{tabular}{|c|ccccccc|} \hline $x$ & $-\infty$ & & $-1,19$ & & $4,19$ & & $+\infty$ \\ \hline $f(x)$ & & + & 0 & $-$ & 0 & + & \\ \hline \end{tabular} \end{center} % x: -oo;+oo// x^2-3 x-5: ++ -1,19 -- 4,19 ++ // f(x) % f(x)=x^2-3x-5 ''' assert_tabsign(s, tab, **options)
gpl-2.0
mihail911/nupic
examples/opf/experiments/anomaly/spatial/2field_many_novelAtEnd/description.py
40
15840
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ Template file used by the OPF Experiment Generator to generate the actual description.py file by replacing $XXXXXXXX tokens with desired values. This description.py file was generated by: '~/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py' """ from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI from nupic.frameworks.opf.expdescriptionhelpers import ( updateConfigFromSubConfig, applyValueGettersToContainer, DeferredDictLookup) from nupic.frameworks.opf.clamodelcallbacks import * from nupic.frameworks.opf.metrics import MetricSpec from nupic.frameworks.opf.opfutils import (InferenceType, InferenceElement) from nupic.support import aggregationDivide from nupic.frameworks.opf.opftaskdriver import ( IterationPhaseSpecLearnOnly, IterationPhaseSpecInferOnly, IterationPhaseSpecLearnAndInfer) # Model Configuration Dictionary: # # Define the model parameters and adjust for any modifications if imported # from a sub-experiment. # # These fields might be modified by a sub-experiment; this dict is passed # between the sub-experiment and base experiment # # # NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements # within the config dictionary may be assigned futures derived from the # ValueGetterBase class, such as DeferredDictLookup. # This facility is particularly handy for enabling substitution of values in # the config dictionary from other values in the config dictionary, which is # needed by permutation.py-based experiments. These values will be resolved # during the call to applyValueGettersToContainer(), # which we call after the base experiment's config dictionary is updated from # the sub-experiment. See ValueGetterBase and # DeferredDictLookup for more details about value-getters. # # For each custom encoder parameter to be exposed to the sub-experiment/ # permutation overrides, define a variable in this section, using key names # beginning with a single underscore character to avoid collisions with # pre-defined keys (e.g., _dsEncoderFieldName2_N). # # Example: # config = dict( # _dsEncoderFieldName2_N = 70, # _dsEncoderFieldName2_W = 5, # dsEncoderSchema = [ # base=dict( # fieldname='Name2', type='ScalarEncoder', # name='Name2', minval=0, maxval=270, clipInput=True, # n=DeferredDictLookup('_dsEncoderFieldName2_N'), # w=DeferredDictLookup('_dsEncoderFieldName2_W')), # ], # ) # updateConfigFromSubConfig(config) # applyValueGettersToContainer(config) config = { # Type of model that the rest of these parameters apply to. 'model': "CLA", # Version that specifies the format of the config. 'version': 1, # Intermediate variables used to compute fields in modelParams and also # referenced from the control section. 'aggregationInfo': { 'fields': [ ('numericFieldNameA', 'mean'), ('numericFieldNameB', 'sum'), ('categoryFieldNameC', 'first')], 'hours': 0}, 'predictAheadTime': None, # Model parameter dictionary. 'modelParams': { # The type of inference that this model will perform 'inferenceType': 'NontemporalAnomaly', 'sensorParams': { # Sensor diagnostic output verbosity control; # if > 0: sensor region will print out on screen what it's sensing # at each step 0: silent; >=1: some info; >=2: more info; # >=3: even more info (see compute() in py/regions/RecordSensor.py) 'verbosity' : 0, # Example: # dsEncoderSchema = [ # DeferredDictLookup('__field_name_encoder'), # ], # # (value generated from DS_ENCODER_SCHEMA) 'encoders': { 'f0': dict(fieldname='f0', n=100, name='f0', type='SDRCategoryEncoder', w=21), 'f1': dict(fieldname='f1', n=100, name='f1', type='SDRCategoryEncoder', w=21), }, # A dictionary specifying the period for automatically-generated # resets from a RecordSensor; # # None = disable automatically-generated resets (also disabled if # all of the specified values evaluate to 0). # Valid keys is the desired combination of the following: # days, hours, minutes, seconds, milliseconds, microseconds, weeks # # Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12), # # (value generated from SENSOR_AUTO_RESET) 'sensorAutoReset' : None, }, 'spEnable': True, 'spParams': { # SP diagnostic output verbosity control; # 0: silent; >=1: some info; >=2: more info; 'spVerbosity' : 0, 'globalInhibition': 1, # Number of cell columns in the cortical region (same number for # SP and TP) # (see also tpNCellsPerCol) 'columnCount': 2048, 'inputWidth': 0, # SP inhibition control (absolute value); # Maximum number of active columns in the SP region's output (when # there are more, the weaker ones are suppressed) 'numActiveColumnsPerInhArea': 40, 'seed': 1956, # potentialPct # What percent of the columns's receptive field is available # for potential synapses. At initialization time, we will # choose potentialPct * (2*potentialRadius+1)^2 'potentialPct': 0.5, # The default connected threshold. Any synapse whose # permanence value is above the connected threshold is # a "connected synapse", meaning it can contribute to the # cell's firing. Typical value is 0.10. Cells whose activity # level before inhibition falls below minDutyCycleBeforeInh # will have their own internal synPermConnectedCell # threshold set below this default value. # (This concept applies to both SP and TP and so 'cells' # is correct here as opposed to 'columns') 'synPermConnected': 0.1, 'synPermActiveInc': 0.1, 'synPermInactiveDec': 0.01, }, # Controls whether TP is enabled or disabled; # TP is necessary for making temporal predictions, such as predicting # the next inputs. Without TP, the model is only capable of # reconstructing missing sensor inputs (via SP). 'tpEnable' : True, 'tpParams': { # TP diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity # (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py) 'verbosity': 0, # Number of cell columns in the cortical region (same number for # SP and TP) # (see also tpNCellsPerCol) 'columnCount': 2048, # The number of cells (i.e., states), allocated per column. 'cellsPerColumn': 32, 'inputWidth': 2048, 'seed': 1960, # Temporal Pooler implementation selector (see _getTPClass in # CLARegion.py). 'temporalImp': 'cpp', # New Synapse formation count # NOTE: If None, use spNumActivePerInhArea # # TODO: need better explanation 'newSynapseCount': 20, # Maximum number of synapses per segment # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # # TODO: for Ron: once the appropriate value is placed in TP # constructor, see if we should eliminate this parameter from # description.py. 'maxSynapsesPerSegment': 32, # Maximum number of segments per cell # > 0 for fixed-size CLA # -1 for non-fixed-size CLA # # TODO: for Ron: once the appropriate value is placed in TP # constructor, see if we should eliminate this parameter from # description.py. 'maxSegmentsPerCell': 128, # Initial Permanence # TODO: need better explanation 'initialPerm': 0.21, # Permanence Increment 'permanenceInc': 0.1, # Permanence Decrement # If set to None, will automatically default to tpPermanenceInc # value. 'permanenceDec' : 0.1, 'globalDecay': 0.0, 'maxAge': 0, # Minimum number of active synapses for a segment to be considered # during search for the best-matching segments. # None=use default # Replaces: tpMinThreshold 'minThreshold': 12, # Segment activation threshold. # A segment is active if it has >= tpSegmentActivationThreshold # connected synapses that are active due to infActiveState # None=use default # Replaces: tpActivationThreshold 'activationThreshold': 16, 'outputType': 'normal', # "Pay Attention Mode" length. This tells the TP how many new # elements to append to the end of a learned sequence at a time. # Smaller values are better for datasets with short sequences, # higher values are better for datasets with long sequences. 'pamLength': 1, }, 'clParams': { # Classifier implementation selection. 'implementation': 'cpp', 'regionName' : 'CLAClassifierRegion', # Classifier diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity 'clVerbosity' : 0, # This controls how fast the classifier learns/forgets. Higher values # make it adapt faster and forget older patterns faster. 'alpha': 0.001, # This is set after the call to updateConfigFromSubConfig and is # computed from the aggregationInfo and predictAheadTime. 'steps': '1', }, 'trainSPNetOnlyIfRequested': False, }, } # end of config dictionary # Adjust base config dictionary for any modifications if imported from a # sub-experiment updateConfigFromSubConfig(config) # Compute predictionSteps based on the predictAheadTime and the aggregation # period, which may be permuted over. if config['predictAheadTime'] is not None: predictionSteps = int(round(aggregationDivide( config['predictAheadTime'], config['aggregationInfo']))) assert (predictionSteps >= 1) config['modelParams']['clParams']['steps'] = str(predictionSteps) # Adjust config by applying ValueGetterBase-derived # futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order # to support value-getter-based substitutions from the sub-experiment (if any) applyValueGettersToContainer(config) # [optional] A sequence of one or more tasks that describe what to do with the # model. Each task consists of a task label, an input spec., iteration count, # and a task-control spec per opfTaskSchema.json # # NOTE: The tasks are intended for OPF clients that make use of OPFTaskDriver. # Clients that interact with OPFExperiment directly do not make use of # the tasks specification. # control = dict( environment='opfExperiment', tasks = [ { # Task label; this label string may be used for diagnostic logging and for # constructing filenames or directory pathnames for task-specific files, etc. 'taskLabel' : "Anomaly", # Input stream specification per py/nupic/cluster/database/StreamDef.json. # 'dataset' : { 'info': 'test_NoProviders', 'version': 1, 'streams': [ { 'columns': ['*'], 'info': 'my simple dataset', 'source': 'file://'+os.path.join(os.path.dirname(__file__), 'data.csv'), } ], # TODO: Aggregation is not supported yet by run_opf_experiment.py #'aggregation' : config['aggregationInfo'] }, # Iteration count: maximum number of iterations. Each iteration corresponds # to one record from the (possibly aggregated) dataset. The task is # terminated when either number of iterations reaches iterationCount or # all records in the (possibly aggregated) database have been processed, # whichever occurs first. # # iterationCount of -1 = iterate over the entire dataset 'iterationCount' : -1, # Task Control parameters for OPFTaskDriver (per opfTaskControlSchema.json) 'taskControl' : { # Iteration cycle list consisting of opftaskdriver.IterationPhaseSpecXXXXX # instances. 'iterationCycle' : [ #IterationPhaseSpecLearnOnly(1000), IterationPhaseSpecLearnAndInfer(1000, inferenceArgs=None), #IterationPhaseSpecInferOnly(10, inferenceArgs=None), ], 'metrics' : [ ], # Logged Metrics: A sequence of regular expressions that specify which of # the metrics from the Inference Specifications section MUST be logged for # every prediction. The regex's correspond to the automatically generated # metric labels. This is similar to the way the optimization metric is # specified in permutations.py. 'loggedMetrics': ['.*nupicScore.*'], # Callbacks for experimentation/research (optional) 'callbacks' : { # Callbacks to be called at the beginning of a task, before model iterations. # Signature: callback(<reference to OPFExperiment>); returns nothing # 'setup' : [claModelControlEnableSPLearningCb, claModelControlEnableTPLearningCb], # 'setup' : [claModelControlDisableTPLearningCb], 'setup' : [], # Callbacks to be called after every learning/inference iteration # Signature: callback(<reference to OPFExperiment>); returns nothing 'postIter' : [], # Callbacks to be called when the experiment task is finished # Signature: callback(<reference to OPFExperiment>); returns nothing 'finish' : [] } } # End of taskControl }, # End of task ] ) descriptionInterface = ExperimentDescriptionAPI(modelConfig=config, control=control)
gpl-3.0
kevmccor/openemr
phpmyadmin/doc/_ext/configext.py
141
6618
from sphinx.domains import Domain, ObjType from sphinx.roles import XRefRole from sphinx.domains.std import GenericObject, StandardDomain from sphinx.directives import ObjectDescription from sphinx.util.nodes import clean_astext, make_refnode from sphinx.util import ws_re from sphinx import addnodes from sphinx.util.docfields import Field from docutils import nodes def get_id_from_cfg(text): ''' Formats anchor ID from config option. ''' if text[:6] == '$cfg[\'': text = text[6:] if text[-2:] == '\']': text = text[:-2] text = text.replace('[$i]', '') parts = text.split("']['") return parts class ConfigOption(ObjectDescription): indextemplate = 'configuration option; %s' parse_node = None has_arguments = True doc_field_types = [ Field('default', label='Default value', has_arg=False, names=('default', )), Field('type', label='Type', has_arg=False, names=('type',)), ] def handle_signature(self, sig, signode): signode.clear() signode += addnodes.desc_name(sig, sig) # normalize whitespace like XRefRole does name = ws_re.sub('', sig) return name def add_target_and_index(self, name, sig, signode): targetparts = get_id_from_cfg(name) targetname = 'cfg_%s' % '_'.join(targetparts) signode['ids'].append(targetname) self.state.document.note_explicit_target(signode) indextype = 'single' # Generic index entries indexentry = self.indextemplate % (name,) self.indexnode['entries'].append((indextype, indexentry, targetname, targetname)) self.indexnode['entries'].append((indextype, name, targetname, targetname)) # Server section if targetparts[0] == 'Servers' and len(targetparts) > 1: indexname = ', '.join(targetparts[1:]) self.indexnode['entries'].append((indextype, 'server configuration; %s' % indexname, targetname, targetname)) self.indexnode['entries'].append((indextype, indexname, targetname, targetname)) else: indexname = ', '.join(targetparts) self.indexnode['entries'].append((indextype, indexname, targetname, targetname)) self.env.domaindata['config']['objects'][self.objtype, name] = \ self.env.docname, targetname class ConfigSectionXRefRole(XRefRole): """ Cross-referencing role for configuration sections (adds an index entry). """ def result_nodes(self, document, env, node, is_ref): if not is_ref: return [node], [] varname = node['reftarget'] tgtid = 'index-%s' % env.new_serialno('index') indexnode = addnodes.index() indexnode['entries'] = [ ('single', varname, tgtid, varname), ('single', 'configuration section; %s' % varname, tgtid, varname) ] targetnode = nodes.target('', '', ids=[tgtid]) document.note_explicit_target(targetnode) return [indexnode, targetnode, node], [] class ConfigSection(ObjectDescription): indextemplate = 'configuration section; %s' parse_node = None def handle_signature(self, sig, signode): if self.parse_node: name = self.parse_node(self.env, sig, signode) else: signode.clear() signode += addnodes.desc_name(sig, sig) # normalize whitespace like XRefRole does name = ws_re.sub('', sig) return name def add_target_and_index(self, name, sig, signode): targetname = '%s-%s' % (self.objtype, name) signode['ids'].append(targetname) self.state.document.note_explicit_target(signode) if self.indextemplate: colon = self.indextemplate.find(':') if colon != -1: indextype = self.indextemplate[:colon].strip() indexentry = self.indextemplate[colon+1:].strip() % (name,) else: indextype = 'single' indexentry = self.indextemplate % (name,) self.indexnode['entries'].append((indextype, indexentry, targetname, targetname)) self.env.domaindata['config']['objects'][self.objtype, name] = \ self.env.docname, targetname class ConfigOptionXRefRole(XRefRole): """ Cross-referencing role for configuration options (adds an index entry). """ def result_nodes(self, document, env, node, is_ref): if not is_ref: return [node], [] varname = node['reftarget'] tgtid = 'index-%s' % env.new_serialno('index') indexnode = addnodes.index() indexnode['entries'] = [ ('single', varname, tgtid, varname), ('single', 'configuration option; %s' % varname, tgtid, varname) ] targetnode = nodes.target('', '', ids=[tgtid]) document.note_explicit_target(targetnode) return [indexnode, targetnode, node], [] class ConfigFileDomain(Domain): name = 'config' label = 'Config' object_types = { 'option': ObjType('config option', 'option'), 'section': ObjType('config section', 'section'), } directives = { 'option': ConfigOption, 'section': ConfigSection, } roles = { 'option': ConfigOptionXRefRole(), 'section': ConfigSectionXRefRole(), } initial_data = { 'objects': {}, # (type, name) -> docname, labelid } def clear_doc(self, docname): for key, (fn, _) in self.data['objects'].items(): if fn == docname: del self.data['objects'][key] def resolve_xref(self, env, fromdocname, builder, typ, target, node, contnode): docname, labelid = self.data['objects'].get((typ, target), ('', '')) if not docname: return None else: return make_refnode(builder, fromdocname, docname, labelid, contnode) def get_objects(self): for (type, name), info in self.data['objects'].items(): yield (name, name, type, info[0], info[1], self.object_types[type].attrs['searchprio']) def setup(app): app.add_domain(ConfigFileDomain)
gpl-3.0
xuleiboy1234/autoTitle
tensorflow/tensorflow/tools/ci_build/update_version.py
4
12685
#!/usr/bin/python # Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # # Automatically update TensorFlow version in source files # # Usage: # ./tensorflow/tools/ci_build/update_version.py --version 1.4.0-rc0 # ./tensorflow/tools/ci_build/update_version.py --nightly # """Update version of TensorFlow script.""" # pylint: disable=superfluous-parens import argparse import fileinput import os import re import subprocess import time # File parameters TF_SRC_DIR = "tensorflow" VERSION_H = "%s/core/public/version.h" % TF_SRC_DIR SETUP_PY = "%s/tools/pip_package/setup.py" % TF_SRC_DIR README_MD = "./README.md" DEVEL_DOCKERFILE = "%s/tools/docker/Dockerfile.devel" % TF_SRC_DIR GPU_DEVEL_DOCKERFILE = "%s/tools/docker/Dockerfile.devel-gpu" % TF_SRC_DIR RELEVANT_FILES = [TF_SRC_DIR, VERSION_H, SETUP_PY, README_MD, DEVEL_DOCKERFILE, GPU_DEVEL_DOCKERFILE] # Version type parameters NIGHTLY_VERSION = 1 REGULAR_VERSION = 0 def replace_line(old_line, new_line, filename): """Replace a line in a file.""" for line in fileinput.input(filename, inplace=True): print(line.rstrip().replace(old_line, new_line)) def check_existence(filename): """Check the existence of file or dir.""" if not os.path.exists(filename): raise RuntimeError("%s not found. Are you under the TensorFlow source root" " directory?") def check_all_files(): """Check all relevant files necessary for upgrade.""" for file_name in RELEVANT_FILES: check_existence(file_name) def replace_with_sed(query, filename): """Replace with sed when regex is required.""" subprocess.check_call("sed -i -r -e \"%s\" \"%s\"" % (query, filename), shell=True) class Version(object): """Version class object that stores SemVer version information.""" def __init__(self, major, minor, patch, identifier_string, version_type): """Constructor. Args: major: major string eg. (1) minor: minor string eg. (3) patch: patch string eg. (1) identifier_string: extension string eg. (-rc0) version_type: version parameter ((REGULAR|NIGHTLY)_VERSION) """ self.string = "%s.%s.%s%s" % (major, minor, patch, identifier_string) self.major = major self.minor = minor self.patch = patch self.identifier_string = identifier_string self.version_type = version_type def __str__(self): return self.string @property def pep_440_str(self): if self.version_type == REGULAR_VERSION: return_string = "%s.%s.%s%s" % (self.major, self.minor, self.patch, self.identifier_string) return return_string.replace("-", "") else: return_string = "%s.%s.%s" % (self.major, self.minor, self.identifier_string) return return_string.replace("-", "") @staticmethod def parse_from_string(string, version_type): """Returns version object from Semver string. Args: string: version string version_type: version parameter Raises: RuntimeError: If the version string is not valid. """ # Check validity of new version string if not re.search(r"[0-9]+\.[0-9]+\.[a-zA-Z0-9]+", string): raise RuntimeError("Invalid version string: %s" % string) major, minor, extension = string.split(".", 2) # Isolate patch and identifier string if identifier string exists extension_split = extension.split("-", 1) patch = extension_split[0] if len(extension_split) == 2: identifier_string = "-" + extension_split[1] else: identifier_string = "" return Version(major, minor, patch, identifier_string, version_type) def get_current_semver_version(): """Returns a Version object of current version. Returns: version: Version object of current SemVer string based on information from core/public/version.h """ # Get current version information version_file = open(VERSION_H, "r") for line in version_file: major_match = re.search("^#define TF_MAJOR_VERSION ([0-9]+)", line) minor_match = re.search("^#define TF_MINOR_VERSION ([0-9]+)", line) patch_match = re.search("^#define TF_PATCH_VERSION ([0-9]+)", line) extension_match = re.search("^#define TF_VERSION_SUFFIX \"(.*)\"", line) if major_match: old_major = major_match.group(1) if minor_match: old_minor = minor_match.group(1) if patch_match: old_patch_num = patch_match.group(1) if extension_match: old_extension = extension_match.group(1) break if "dev" in old_extension: version_type = NIGHTLY_VERSION else: version_type = REGULAR_VERSION return Version(old_major, old_minor, old_patch_num, old_extension, version_type) def update_version_h(old_version, new_version): """Update tensorflow/core/public/version.h.""" replace_line("#define TF_MAJOR_VERSION %s" % old_version.major, "#define TF_MAJOR_VERSION %s" % new_version.major, VERSION_H) replace_line("#define TF_MINOR_VERSION %s" % old_version.minor, "#define TF_MINOR_VERSION %s" % new_version.minor, VERSION_H) replace_line("#define TF_PATCH_VERSION %s" % old_version.patch, "#define TF_PATCH_VERSION %s" % new_version.patch, VERSION_H) replace_line("#define TF_VERSION_SUFFIX \"%s\"" % old_version.identifier_string, "#define TF_VERSION_SUFFIX \"%s\"" % new_version.identifier_string, VERSION_H) def update_setup_dot_py(old_version, new_version): """Update setup.py.""" replace_line("_VERSION = '%s'" % old_version.string, "_VERSION = '%s'" % new_version.string, SETUP_PY) def update_readme(old_version, new_version): """Update README.""" pep_440_str = new_version.pep_440_str replace_with_sed(r"s/%s\.%s\.([[:alnum:]]+)-/%s-/g" % (old_version.major, old_version.minor, pep_440_str), README_MD) def update_md_files(old_version, new_version): """Update the md doc files. Args: old_version: Version object of current version new_version: Version object of new version """ old_pep_version = old_version.pep_440_str new_pep_version = new_version.pep_440_str for filename in ["linux", "mac", "windows", "sources"]: filepath = "%s/docs_src/install/install_%s.md" % (TF_SRC_DIR, filename) replace_with_sed("s/tensorflow-%s/tensorflow-%s/g" % (old_pep_version, new_pep_version), filepath) replace_with_sed("s/tensorflow_gpu-%s/tensorflow_gpu-%s/g" % (old_pep_version, new_pep_version), filepath) replace_with_sed("s/TensorFlow %s/TensorFlow %s/g" % (old_pep_version, new_pep_version), filepath) for filename in ["java", "go", "c"]: filepath = "%s/docs_src/install/install_%s.md" % (TF_SRC_DIR, filename) replace_with_sed(r"s/x86_64-%s/x86_64-%s/g" % (old_version, new_version), filepath) replace_with_sed(r"s/libtensorflow-%s.jar/libtensorflow-%s.jar/g" % (old_version, new_version), filepath) replace_with_sed(r"s/<version>%s<\/version>/<version>%s<\/version>/g" % (old_version, new_version), filepath) def major_minor_change(old_version, new_version): """Check if a major or minor change occurred.""" major_mismatch = old_version.major != new_version.major minor_mismatch = old_version.minor != new_version.minor if major_mismatch or minor_mismatch: return True return False def update_dockerfiles(old_version, new_version): """Update dockerfiles if there was a major change.""" if major_minor_change(old_version, new_version): old_r_major_minor = r"r%s\.%s" % (old_version.major, old_version.minor) old_r_major_minor_string = old_r_major_minor.replace("\\", "") r_major_minor = r"r%s\.%s" % (new_version.major, new_version.minor) r_major_minor_string = r_major_minor.replace("\\", "") print("Detected Major.Minor change.") print("Updating pattern %s to %s in additional files" % (old_r_major_minor_string, r_major_minor_string)) # Update dockerfiles replace_with_sed("s/%s/%s/g" % (old_r_major_minor, r_major_minor), DEVEL_DOCKERFILE) replace_with_sed("s/%s/%s/g" % (old_r_major_minor, r_major_minor), GPU_DEVEL_DOCKERFILE) def check_for_lingering_string(lingering_string): """Check for given lingering strings.""" formatted_string = lingering_string.replace(".", r"\.") try: linger_strs = subprocess.check_output("grep -rnoH \"%s\" \"%s\"" % (formatted_string, TF_SRC_DIR), shell=True).split("\n") except subprocess.CalledProcessError: linger_strs = [] if linger_strs: print("WARNING: Below are potentially instances of lingering old version " "string \"%s\" in source directory \"%s/\" that are not " "updated by this script. Please check them manually!" % (lingering_string, TF_SRC_DIR)) for linger_str in linger_strs: print(linger_str) else: print("No lingering old version strings \"%s\" found in source directory" " \"%s/\". Good." % (lingering_string, TF_SRC_DIR)) def check_for_old_version(old_version, new_version): """Check for old version references.""" for old_ver in [old_version.string, old_version.pep_440_str]: check_for_lingering_string(old_ver) if major_minor_change(old_version, new_version): old_r_major_minor = "r%s.%s" % (old_version.major, old_version.minor) check_for_lingering_string(old_r_major_minor) def main(): """This script updates all instances of version in the tensorflow directory. Requirements: version: The version tag OR nightly: Create a nightly tag with current date Raises: RuntimeError: If the script is not being run from tf source dir """ parser = argparse.ArgumentParser(description="Cherry picking automation.") group = parser.add_mutually_exclusive_group(required=True) # Arg information group.add_argument("--version", help="<new_major_ver>.<new_minor_ver>.<new_patch_ver>", default="") group.add_argument("--nightly", help="disable the service provisioning step", action="store_true") args = parser.parse_args() check_all_files() old_version = get_current_semver_version() if args.nightly: new_version = Version(old_version.major, old_version.minor, old_version.patch, "-dev" + time.strftime("%Y%m%d"), NIGHTLY_VERSION) else: new_version = Version.parse_from_string(args.version, REGULAR_VERSION) update_version_h(old_version, new_version) update_setup_dot_py(old_version, new_version) update_readme(old_version, new_version) update_md_files(old_version, new_version) update_dockerfiles(old_version, new_version) # Print transition details print("Major: %s -> %s" % (old_version.major, new_version.major)) print("Minor: %s -> %s" % (old_version.minor, new_version.minor)) print("Patch: %s -> %s\n" % (old_version.patch, new_version.patch)) check_for_old_version(old_version, new_version) if __name__ == "__main__": main()
mit
vikatory/kbengine
kbe/res/scripts/common/Lib/distutils/command/bdist_msi.py
152
35217
# Copyright (C) 2005, 2006 Martin von Löwis # Licensed to PSF under a Contributor Agreement. # The bdist_wininst command proper # based on bdist_wininst """ Implements the bdist_msi command. """ import sys, os from distutils.core import Command from distutils.dir_util import remove_tree from distutils.sysconfig import get_python_version from distutils.version import StrictVersion from distutils.errors import DistutilsOptionError from distutils.util import get_platform from distutils import log import msilib from msilib import schema, sequence, text from msilib import Directory, Feature, Dialog, add_data class PyDialog(Dialog): """Dialog class with a fixed layout: controls at the top, then a ruler, then a list of buttons: back, next, cancel. Optionally a bitmap at the left.""" def __init__(self, *args, **kw): """Dialog(database, name, x, y, w, h, attributes, title, first, default, cancel, bitmap=true)""" Dialog.__init__(self, *args) ruler = self.h - 36 bmwidth = 152*ruler/328 #if kw.get("bitmap", True): # self.bitmap("Bitmap", 0, 0, bmwidth, ruler, "PythonWin") self.line("BottomLine", 0, ruler, self.w, 0) def title(self, title): "Set the title text of the dialog at the top." # name, x, y, w, h, flags=Visible|Enabled|Transparent|NoPrefix, # text, in VerdanaBold10 self.text("Title", 15, 10, 320, 60, 0x30003, r"{\VerdanaBold10}%s" % title) def back(self, title, next, name = "Back", active = 1): """Add a back button with a given title, the tab-next button, its name in the Control table, possibly initially disabled. Return the button, so that events can be associated""" if active: flags = 3 # Visible|Enabled else: flags = 1 # Visible return self.pushbutton(name, 180, self.h-27 , 56, 17, flags, title, next) def cancel(self, title, next, name = "Cancel", active = 1): """Add a cancel button with a given title, the tab-next button, its name in the Control table, possibly initially disabled. Return the button, so that events can be associated""" if active: flags = 3 # Visible|Enabled else: flags = 1 # Visible return self.pushbutton(name, 304, self.h-27, 56, 17, flags, title, next) def next(self, title, next, name = "Next", active = 1): """Add a Next button with a given title, the tab-next button, its name in the Control table, possibly initially disabled. Return the button, so that events can be associated""" if active: flags = 3 # Visible|Enabled else: flags = 1 # Visible return self.pushbutton(name, 236, self.h-27, 56, 17, flags, title, next) def xbutton(self, name, title, next, xpos): """Add a button with a given title, the tab-next button, its name in the Control table, giving its x position; the y-position is aligned with the other buttons. Return the button, so that events can be associated""" return self.pushbutton(name, int(self.w*xpos - 28), self.h-27, 56, 17, 3, title, next) class bdist_msi(Command): description = "create a Microsoft Installer (.msi) binary distribution" user_options = [('bdist-dir=', None, "temporary directory for creating the distribution"), ('plat-name=', 'p', "platform name to embed in generated filenames " "(default: %s)" % get_platform()), ('keep-temp', 'k', "keep the pseudo-installation tree around after " + "creating the distribution archive"), ('target-version=', None, "require a specific python version" + " on the target system"), ('no-target-compile', 'c', "do not compile .py to .pyc on the target system"), ('no-target-optimize', 'o', "do not compile .py to .pyo (optimized)" "on the target system"), ('dist-dir=', 'd', "directory to put final built distributions in"), ('skip-build', None, "skip rebuilding everything (for testing/debugging)"), ('install-script=', None, "basename of installation script to be run after" "installation or before deinstallation"), ('pre-install-script=', None, "Fully qualified filename of a script to be run before " "any files are installed. This script need not be in the " "distribution"), ] boolean_options = ['keep-temp', 'no-target-compile', 'no-target-optimize', 'skip-build'] all_versions = ['2.0', '2.1', '2.2', '2.3', '2.4', '2.5', '2.6', '2.7', '2.8', '2.9', '3.0', '3.1', '3.2', '3.3', '3.4', '3.5', '3.6', '3.7', '3.8', '3.9'] other_version = 'X' def initialize_options(self): self.bdist_dir = None self.plat_name = None self.keep_temp = 0 self.no_target_compile = 0 self.no_target_optimize = 0 self.target_version = None self.dist_dir = None self.skip_build = None self.install_script = None self.pre_install_script = None self.versions = None def finalize_options(self): self.set_undefined_options('bdist', ('skip_build', 'skip_build')) if self.bdist_dir is None: bdist_base = self.get_finalized_command('bdist').bdist_base self.bdist_dir = os.path.join(bdist_base, 'msi') short_version = get_python_version() if (not self.target_version) and self.distribution.has_ext_modules(): self.target_version = short_version if self.target_version: self.versions = [self.target_version] if not self.skip_build and self.distribution.has_ext_modules()\ and self.target_version != short_version: raise DistutilsOptionError( "target version can only be %s, or the '--skip-build'" " option must be specified" % (short_version,)) else: self.versions = list(self.all_versions) self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'), ('plat_name', 'plat_name'), ) if self.pre_install_script: raise DistutilsOptionError( "the pre-install-script feature is not yet implemented") if self.install_script: for script in self.distribution.scripts: if self.install_script == os.path.basename(script): break else: raise DistutilsOptionError( "install_script '%s' not found in scripts" % self.install_script) self.install_script_key = None def run(self): if not self.skip_build: self.run_command('build') install = self.reinitialize_command('install', reinit_subcommands=1) install.prefix = self.bdist_dir install.skip_build = self.skip_build install.warn_dir = 0 install_lib = self.reinitialize_command('install_lib') # we do not want to include pyc or pyo files install_lib.compile = 0 install_lib.optimize = 0 if self.distribution.has_ext_modules(): # If we are building an installer for a Python version other # than the one we are currently running, then we need to ensure # our build_lib reflects the other Python version rather than ours. # Note that for target_version!=sys.version, we must have skipped the # build step, so there is no issue with enforcing the build of this # version. target_version = self.target_version if not target_version: assert self.skip_build, "Should have already checked this" target_version = sys.version[0:3] plat_specifier = ".%s-%s" % (self.plat_name, target_version) build = self.get_finalized_command('build') build.build_lib = os.path.join(build.build_base, 'lib' + plat_specifier) log.info("installing to %s", self.bdist_dir) install.ensure_finalized() # avoid warning of 'install_lib' about installing # into a directory not in sys.path sys.path.insert(0, os.path.join(self.bdist_dir, 'PURELIB')) install.run() del sys.path[0] self.mkpath(self.dist_dir) fullname = self.distribution.get_fullname() installer_name = self.get_installer_filename(fullname) installer_name = os.path.abspath(installer_name) if os.path.exists(installer_name): os.unlink(installer_name) metadata = self.distribution.metadata author = metadata.author if not author: author = metadata.maintainer if not author: author = "UNKNOWN" version = metadata.get_version() # ProductVersion must be strictly numeric # XXX need to deal with prerelease versions sversion = "%d.%d.%d" % StrictVersion(version).version # Prefix ProductName with Python x.y, so that # it sorts together with the other Python packages # in Add-Remove-Programs (APR) fullname = self.distribution.get_fullname() if self.target_version: product_name = "Python %s %s" % (self.target_version, fullname) else: product_name = "Python %s" % (fullname) self.db = msilib.init_database(installer_name, schema, product_name, msilib.gen_uuid(), sversion, author) msilib.add_tables(self.db, sequence) props = [('DistVersion', version)] email = metadata.author_email or metadata.maintainer_email if email: props.append(("ARPCONTACT", email)) if metadata.url: props.append(("ARPURLINFOABOUT", metadata.url)) if props: add_data(self.db, 'Property', props) self.add_find_python() self.add_files() self.add_scripts() self.add_ui() self.db.Commit() if hasattr(self.distribution, 'dist_files'): tup = 'bdist_msi', self.target_version or 'any', fullname self.distribution.dist_files.append(tup) if not self.keep_temp: remove_tree(self.bdist_dir, dry_run=self.dry_run) def add_files(self): db = self.db cab = msilib.CAB("distfiles") rootdir = os.path.abspath(self.bdist_dir) root = Directory(db, cab, None, rootdir, "TARGETDIR", "SourceDir") f = Feature(db, "Python", "Python", "Everything", 0, 1, directory="TARGETDIR") items = [(f, root, '')] for version in self.versions + [self.other_version]: target = "TARGETDIR" + version name = default = "Python" + version desc = "Everything" if version is self.other_version: title = "Python from another location" level = 2 else: title = "Python %s from registry" % version level = 1 f = Feature(db, name, title, desc, 1, level, directory=target) dir = Directory(db, cab, root, rootdir, target, default) items.append((f, dir, version)) db.Commit() seen = {} for feature, dir, version in items: todo = [dir] while todo: dir = todo.pop() for file in os.listdir(dir.absolute): afile = os.path.join(dir.absolute, file) if os.path.isdir(afile): short = "%s|%s" % (dir.make_short(file), file) default = file + version newdir = Directory(db, cab, dir, file, default, short) todo.append(newdir) else: if not dir.component: dir.start_component(dir.logical, feature, 0) if afile not in seen: key = seen[afile] = dir.add_file(file) if file==self.install_script: if self.install_script_key: raise DistutilsOptionError( "Multiple files with name %s" % file) self.install_script_key = '[#%s]' % key else: key = seen[afile] add_data(self.db, "DuplicateFile", [(key + version, dir.component, key, None, dir.logical)]) db.Commit() cab.commit(db) def add_find_python(self): """Adds code to the installer to compute the location of Python. Properties PYTHON.MACHINE.X.Y and PYTHON.USER.X.Y will be set from the registry for each version of Python. Properties TARGETDIRX.Y will be set from PYTHON.USER.X.Y if defined, else from PYTHON.MACHINE.X.Y. Properties PYTHONX.Y will be set to TARGETDIRX.Y\\python.exe""" start = 402 for ver in self.versions: install_path = r"SOFTWARE\Python\PythonCore\%s\InstallPath" % ver machine_reg = "python.machine." + ver user_reg = "python.user." + ver machine_prop = "PYTHON.MACHINE." + ver user_prop = "PYTHON.USER." + ver machine_action = "PythonFromMachine" + ver user_action = "PythonFromUser" + ver exe_action = "PythonExe" + ver target_dir_prop = "TARGETDIR" + ver exe_prop = "PYTHON" + ver if msilib.Win64: # type: msidbLocatorTypeRawValue + msidbLocatorType64bit Type = 2+16 else: Type = 2 add_data(self.db, "RegLocator", [(machine_reg, 2, install_path, None, Type), (user_reg, 1, install_path, None, Type)]) add_data(self.db, "AppSearch", [(machine_prop, machine_reg), (user_prop, user_reg)]) add_data(self.db, "CustomAction", [(machine_action, 51+256, target_dir_prop, "[" + machine_prop + "]"), (user_action, 51+256, target_dir_prop, "[" + user_prop + "]"), (exe_action, 51+256, exe_prop, "[" + target_dir_prop + "]\\python.exe"), ]) add_data(self.db, "InstallExecuteSequence", [(machine_action, machine_prop, start), (user_action, user_prop, start + 1), (exe_action, None, start + 2), ]) add_data(self.db, "InstallUISequence", [(machine_action, machine_prop, start), (user_action, user_prop, start + 1), (exe_action, None, start + 2), ]) add_data(self.db, "Condition", [("Python" + ver, 0, "NOT TARGETDIR" + ver)]) start += 4 assert start < 500 def add_scripts(self): if self.install_script: start = 6800 for ver in self.versions + [self.other_version]: install_action = "install_script." + ver exe_prop = "PYTHON" + ver add_data(self.db, "CustomAction", [(install_action, 50, exe_prop, self.install_script_key)]) add_data(self.db, "InstallExecuteSequence", [(install_action, "&Python%s=3" % ver, start)]) start += 1 # XXX pre-install scripts are currently refused in finalize_options() # but if this feature is completed, it will also need to add # entries for each version as the above code does if self.pre_install_script: scriptfn = os.path.join(self.bdist_dir, "preinstall.bat") f = open(scriptfn, "w") # The batch file will be executed with [PYTHON], so that %1 # is the path to the Python interpreter; %0 will be the path # of the batch file. # rem =""" # %1 %0 # exit # """ # <actual script> f.write('rem ="""\n%1 %0\nexit\n"""\n') f.write(open(self.pre_install_script).read()) f.close() add_data(self.db, "Binary", [("PreInstall", msilib.Binary(scriptfn)) ]) add_data(self.db, "CustomAction", [("PreInstall", 2, "PreInstall", None) ]) add_data(self.db, "InstallExecuteSequence", [("PreInstall", "NOT Installed", 450)]) def add_ui(self): db = self.db x = y = 50 w = 370 h = 300 title = "[ProductName] Setup" # see "Dialog Style Bits" modal = 3 # visible | modal modeless = 1 # visible track_disk_space = 32 # UI customization properties add_data(db, "Property", # See "DefaultUIFont Property" [("DefaultUIFont", "DlgFont8"), # See "ErrorDialog Style Bit" ("ErrorDialog", "ErrorDlg"), ("Progress1", "Install"), # modified in maintenance type dlg ("Progress2", "installs"), ("MaintenanceForm_Action", "Repair"), # possible values: ALL, JUSTME ("WhichUsers", "ALL") ]) # Fonts, see "TextStyle Table" add_data(db, "TextStyle", [("DlgFont8", "Tahoma", 9, None, 0), ("DlgFontBold8", "Tahoma", 8, None, 1), #bold ("VerdanaBold10", "Verdana", 10, None, 1), ("VerdanaRed9", "Verdana", 9, 255, 0), ]) # UI Sequences, see "InstallUISequence Table", "Using a Sequence Table" # Numbers indicate sequence; see sequence.py for how these action integrate add_data(db, "InstallUISequence", [("PrepareDlg", "Not Privileged or Windows9x or Installed", 140), ("WhichUsersDlg", "Privileged and not Windows9x and not Installed", 141), # In the user interface, assume all-users installation if privileged. ("SelectFeaturesDlg", "Not Installed", 1230), # XXX no support for resume installations yet #("ResumeDlg", "Installed AND (RESUME OR Preselected)", 1240), ("MaintenanceTypeDlg", "Installed AND NOT RESUME AND NOT Preselected", 1250), ("ProgressDlg", None, 1280)]) add_data(db, 'ActionText', text.ActionText) add_data(db, 'UIText', text.UIText) ##################################################################### # Standard dialogs: FatalError, UserExit, ExitDialog fatal=PyDialog(db, "FatalError", x, y, w, h, modal, title, "Finish", "Finish", "Finish") fatal.title("[ProductName] Installer ended prematurely") fatal.back("< Back", "Finish", active = 0) fatal.cancel("Cancel", "Back", active = 0) fatal.text("Description1", 15, 70, 320, 80, 0x30003, "[ProductName] setup ended prematurely because of an error. Your system has not been modified. To install this program at a later time, please run the installation again.") fatal.text("Description2", 15, 155, 320, 20, 0x30003, "Click the Finish button to exit the Installer.") c=fatal.next("Finish", "Cancel", name="Finish") c.event("EndDialog", "Exit") user_exit=PyDialog(db, "UserExit", x, y, w, h, modal, title, "Finish", "Finish", "Finish") user_exit.title("[ProductName] Installer was interrupted") user_exit.back("< Back", "Finish", active = 0) user_exit.cancel("Cancel", "Back", active = 0) user_exit.text("Description1", 15, 70, 320, 80, 0x30003, "[ProductName] setup was interrupted. Your system has not been modified. " "To install this program at a later time, please run the installation again.") user_exit.text("Description2", 15, 155, 320, 20, 0x30003, "Click the Finish button to exit the Installer.") c = user_exit.next("Finish", "Cancel", name="Finish") c.event("EndDialog", "Exit") exit_dialog = PyDialog(db, "ExitDialog", x, y, w, h, modal, title, "Finish", "Finish", "Finish") exit_dialog.title("Completing the [ProductName] Installer") exit_dialog.back("< Back", "Finish", active = 0) exit_dialog.cancel("Cancel", "Back", active = 0) exit_dialog.text("Description", 15, 235, 320, 20, 0x30003, "Click the Finish button to exit the Installer.") c = exit_dialog.next("Finish", "Cancel", name="Finish") c.event("EndDialog", "Return") ##################################################################### # Required dialog: FilesInUse, ErrorDlg inuse = PyDialog(db, "FilesInUse", x, y, w, h, 19, # KeepModeless|Modal|Visible title, "Retry", "Retry", "Retry", bitmap=False) inuse.text("Title", 15, 6, 200, 15, 0x30003, r"{\DlgFontBold8}Files in Use") inuse.text("Description", 20, 23, 280, 20, 0x30003, "Some files that need to be updated are currently in use.") inuse.text("Text", 20, 55, 330, 50, 3, "The following applications are using files that need to be updated by this setup. Close these applications and then click Retry to continue the installation or Cancel to exit it.") inuse.control("List", "ListBox", 20, 107, 330, 130, 7, "FileInUseProcess", None, None, None) c=inuse.back("Exit", "Ignore", name="Exit") c.event("EndDialog", "Exit") c=inuse.next("Ignore", "Retry", name="Ignore") c.event("EndDialog", "Ignore") c=inuse.cancel("Retry", "Exit", name="Retry") c.event("EndDialog","Retry") # See "Error Dialog". See "ICE20" for the required names of the controls. error = Dialog(db, "ErrorDlg", 50, 10, 330, 101, 65543, # Error|Minimize|Modal|Visible title, "ErrorText", None, None) error.text("ErrorText", 50,9,280,48,3, "") #error.control("ErrorIcon", "Icon", 15, 9, 24, 24, 5242881, None, "py.ico", None, None) error.pushbutton("N",120,72,81,21,3,"No",None).event("EndDialog","ErrorNo") error.pushbutton("Y",240,72,81,21,3,"Yes",None).event("EndDialog","ErrorYes") error.pushbutton("A",0,72,81,21,3,"Abort",None).event("EndDialog","ErrorAbort") error.pushbutton("C",42,72,81,21,3,"Cancel",None).event("EndDialog","ErrorCancel") error.pushbutton("I",81,72,81,21,3,"Ignore",None).event("EndDialog","ErrorIgnore") error.pushbutton("O",159,72,81,21,3,"Ok",None).event("EndDialog","ErrorOk") error.pushbutton("R",198,72,81,21,3,"Retry",None).event("EndDialog","ErrorRetry") ##################################################################### # Global "Query Cancel" dialog cancel = Dialog(db, "CancelDlg", 50, 10, 260, 85, 3, title, "No", "No", "No") cancel.text("Text", 48, 15, 194, 30, 3, "Are you sure you want to cancel [ProductName] installation?") #cancel.control("Icon", "Icon", 15, 15, 24, 24, 5242881, None, # "py.ico", None, None) c=cancel.pushbutton("Yes", 72, 57, 56, 17, 3, "Yes", "No") c.event("EndDialog", "Exit") c=cancel.pushbutton("No", 132, 57, 56, 17, 3, "No", "Yes") c.event("EndDialog", "Return") ##################################################################### # Global "Wait for costing" dialog costing = Dialog(db, "WaitForCostingDlg", 50, 10, 260, 85, modal, title, "Return", "Return", "Return") costing.text("Text", 48, 15, 194, 30, 3, "Please wait while the installer finishes determining your disk space requirements.") c = costing.pushbutton("Return", 102, 57, 56, 17, 3, "Return", None) c.event("EndDialog", "Exit") ##################################################################### # Preparation dialog: no user input except cancellation prep = PyDialog(db, "PrepareDlg", x, y, w, h, modeless, title, "Cancel", "Cancel", "Cancel") prep.text("Description", 15, 70, 320, 40, 0x30003, "Please wait while the Installer prepares to guide you through the installation.") prep.title("Welcome to the [ProductName] Installer") c=prep.text("ActionText", 15, 110, 320, 20, 0x30003, "Pondering...") c.mapping("ActionText", "Text") c=prep.text("ActionData", 15, 135, 320, 30, 0x30003, None) c.mapping("ActionData", "Text") prep.back("Back", None, active=0) prep.next("Next", None, active=0) c=prep.cancel("Cancel", None) c.event("SpawnDialog", "CancelDlg") ##################################################################### # Feature (Python directory) selection seldlg = PyDialog(db, "SelectFeaturesDlg", x, y, w, h, modal, title, "Next", "Next", "Cancel") seldlg.title("Select Python Installations") seldlg.text("Hint", 15, 30, 300, 20, 3, "Select the Python locations where %s should be installed." % self.distribution.get_fullname()) seldlg.back("< Back", None, active=0) c = seldlg.next("Next >", "Cancel") order = 1 c.event("[TARGETDIR]", "[SourceDir]", ordering=order) for version in self.versions + [self.other_version]: order += 1 c.event("[TARGETDIR]", "[TARGETDIR%s]" % version, "FEATURE_SELECTED AND &Python%s=3" % version, ordering=order) c.event("SpawnWaitDialog", "WaitForCostingDlg", ordering=order + 1) c.event("EndDialog", "Return", ordering=order + 2) c = seldlg.cancel("Cancel", "Features") c.event("SpawnDialog", "CancelDlg") c = seldlg.control("Features", "SelectionTree", 15, 60, 300, 120, 3, "FEATURE", None, "PathEdit", None) c.event("[FEATURE_SELECTED]", "1") ver = self.other_version install_other_cond = "FEATURE_SELECTED AND &Python%s=3" % ver dont_install_other_cond = "FEATURE_SELECTED AND &Python%s<>3" % ver c = seldlg.text("Other", 15, 200, 300, 15, 3, "Provide an alternate Python location") c.condition("Enable", install_other_cond) c.condition("Show", install_other_cond) c.condition("Disable", dont_install_other_cond) c.condition("Hide", dont_install_other_cond) c = seldlg.control("PathEdit", "PathEdit", 15, 215, 300, 16, 1, "TARGETDIR" + ver, None, "Next", None) c.condition("Enable", install_other_cond) c.condition("Show", install_other_cond) c.condition("Disable", dont_install_other_cond) c.condition("Hide", dont_install_other_cond) ##################################################################### # Disk cost cost = PyDialog(db, "DiskCostDlg", x, y, w, h, modal, title, "OK", "OK", "OK", bitmap=False) cost.text("Title", 15, 6, 200, 15, 0x30003, "{\DlgFontBold8}Disk Space Requirements") cost.text("Description", 20, 20, 280, 20, 0x30003, "The disk space required for the installation of the selected features.") cost.text("Text", 20, 53, 330, 60, 3, "The highlighted volumes (if any) do not have enough disk space " "available for the currently selected features. You can either " "remove some files from the highlighted volumes, or choose to " "install less features onto local drive(s), or select different " "destination drive(s).") cost.control("VolumeList", "VolumeCostList", 20, 100, 330, 150, 393223, None, "{120}{70}{70}{70}{70}", None, None) cost.xbutton("OK", "Ok", None, 0.5).event("EndDialog", "Return") ##################################################################### # WhichUsers Dialog. Only available on NT, and for privileged users. # This must be run before FindRelatedProducts, because that will # take into account whether the previous installation was per-user # or per-machine. We currently don't support going back to this # dialog after "Next" was selected; to support this, we would need to # find how to reset the ALLUSERS property, and how to re-run # FindRelatedProducts. # On Windows9x, the ALLUSERS property is ignored on the command line # and in the Property table, but installer fails according to the documentation # if a dialog attempts to set ALLUSERS. whichusers = PyDialog(db, "WhichUsersDlg", x, y, w, h, modal, title, "AdminInstall", "Next", "Cancel") whichusers.title("Select whether to install [ProductName] for all users of this computer.") # A radio group with two options: allusers, justme g = whichusers.radiogroup("AdminInstall", 15, 60, 260, 50, 3, "WhichUsers", "", "Next") g.add("ALL", 0, 5, 150, 20, "Install for all users") g.add("JUSTME", 0, 25, 150, 20, "Install just for me") whichusers.back("Back", None, active=0) c = whichusers.next("Next >", "Cancel") c.event("[ALLUSERS]", "1", 'WhichUsers="ALL"', 1) c.event("EndDialog", "Return", ordering = 2) c = whichusers.cancel("Cancel", "AdminInstall") c.event("SpawnDialog", "CancelDlg") ##################################################################### # Installation Progress dialog (modeless) progress = PyDialog(db, "ProgressDlg", x, y, w, h, modeless, title, "Cancel", "Cancel", "Cancel", bitmap=False) progress.text("Title", 20, 15, 200, 15, 0x30003, "{\DlgFontBold8}[Progress1] [ProductName]") progress.text("Text", 35, 65, 300, 30, 3, "Please wait while the Installer [Progress2] [ProductName]. " "This may take several minutes.") progress.text("StatusLabel", 35, 100, 35, 20, 3, "Status:") c=progress.text("ActionText", 70, 100, w-70, 20, 3, "Pondering...") c.mapping("ActionText", "Text") #c=progress.text("ActionData", 35, 140, 300, 20, 3, None) #c.mapping("ActionData", "Text") c=progress.control("ProgressBar", "ProgressBar", 35, 120, 300, 10, 65537, None, "Progress done", None, None) c.mapping("SetProgress", "Progress") progress.back("< Back", "Next", active=False) progress.next("Next >", "Cancel", active=False) progress.cancel("Cancel", "Back").event("SpawnDialog", "CancelDlg") ################################################################### # Maintenance type: repair/uninstall maint = PyDialog(db, "MaintenanceTypeDlg", x, y, w, h, modal, title, "Next", "Next", "Cancel") maint.title("Welcome to the [ProductName] Setup Wizard") maint.text("BodyText", 15, 63, 330, 42, 3, "Select whether you want to repair or remove [ProductName].") g=maint.radiogroup("RepairRadioGroup", 15, 108, 330, 60, 3, "MaintenanceForm_Action", "", "Next") #g.add("Change", 0, 0, 200, 17, "&Change [ProductName]") g.add("Repair", 0, 18, 200, 17, "&Repair [ProductName]") g.add("Remove", 0, 36, 200, 17, "Re&move [ProductName]") maint.back("< Back", None, active=False) c=maint.next("Finish", "Cancel") # Change installation: Change progress dialog to "Change", then ask # for feature selection #c.event("[Progress1]", "Change", 'MaintenanceForm_Action="Change"', 1) #c.event("[Progress2]", "changes", 'MaintenanceForm_Action="Change"', 2) # Reinstall: Change progress dialog to "Repair", then invoke reinstall # Also set list of reinstalled features to "ALL" c.event("[REINSTALL]", "ALL", 'MaintenanceForm_Action="Repair"', 5) c.event("[Progress1]", "Repairing", 'MaintenanceForm_Action="Repair"', 6) c.event("[Progress2]", "repairs", 'MaintenanceForm_Action="Repair"', 7) c.event("Reinstall", "ALL", 'MaintenanceForm_Action="Repair"', 8) # Uninstall: Change progress to "Remove", then invoke uninstall # Also set list of removed features to "ALL" c.event("[REMOVE]", "ALL", 'MaintenanceForm_Action="Remove"', 11) c.event("[Progress1]", "Removing", 'MaintenanceForm_Action="Remove"', 12) c.event("[Progress2]", "removes", 'MaintenanceForm_Action="Remove"', 13) c.event("Remove", "ALL", 'MaintenanceForm_Action="Remove"', 14) # Close dialog when maintenance action scheduled c.event("EndDialog", "Return", 'MaintenanceForm_Action<>"Change"', 20) #c.event("NewDialog", "SelectFeaturesDlg", 'MaintenanceForm_Action="Change"', 21) maint.cancel("Cancel", "RepairRadioGroup").event("SpawnDialog", "CancelDlg") def get_installer_filename(self, fullname): # Factored out to allow overriding in subclasses if self.target_version: base_name = "%s.%s-py%s.msi" % (fullname, self.plat_name, self.target_version) else: base_name = "%s.%s.msi" % (fullname, self.plat_name) installer_name = os.path.join(self.dist_dir, base_name) return installer_name
lgpl-3.0
suyashphadtare/sajil-final-erp
erpnext/startup/report_data_map.py
21
8867
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals # mappings for table dumps # "remember to add indexes!" data_map = { "Company": { "columns": ["name"], "conditions": ["docstatus < 2"] }, "Fiscal Year": { "columns": ["name", "year_start_date", "year_end_date"], "conditions": ["docstatus < 2"], }, # Accounts "Account": { "columns": ["name", "parent_account", "lft", "rgt", "report_type", "company", "group_or_ledger"], "conditions": ["docstatus < 2"], "order_by": "lft", "links": { "company": ["Company", "name"], } }, "Cost Center": { "columns": ["name", "lft", "rgt"], "conditions": ["docstatus < 2"], "order_by": "lft" }, "GL Entry": { "columns": ["name", "account", "posting_date", "cost_center", "debit", "credit", "is_opening", "company", "voucher_type", "voucher_no", "remarks"], "order_by": "posting_date, account", "links": { "account": ["Account", "name"], "company": ["Company", "name"], "cost_center": ["Cost Center", "name"] } }, # Stock "Item": { "columns": ["name", "if(item_name=name, '', item_name) as item_name", "description", "item_group as parent_item_group", "stock_uom", "brand", "valuation_method", "re_order_level", "re_order_qty"], # "conditions": ["docstatus < 2"], "order_by": "name", "links": { "parent_item_group": ["Item Group", "name"], "brand": ["Brand", "name"] } }, "Item Group": { "columns": ["name", "parent_item_group"], # "conditions": ["docstatus < 2"], "order_by": "lft" }, "Brand": { "columns": ["name"], "conditions": ["docstatus < 2"], "order_by": "name" }, "Project": { "columns": ["name"], "conditions": ["docstatus < 2"], "order_by": "name" }, "Warehouse": { "columns": ["name"], "conditions": ["docstatus < 2"], "order_by": "name" }, "Stock Ledger Entry": { "columns": ["name", "posting_date", "posting_time", "item_code", "warehouse", "actual_qty as qty", "voucher_type", "voucher_no", "project", "ifnull(incoming_rate,0) as incoming_rate", "stock_uom", "serial_no", "qty_after_transaction", "valuation_rate"], "order_by": "posting_date, posting_time, name", "links": { "item_code": ["Item", "name"], "warehouse": ["Warehouse", "name"], "project": ["Project", "name"] }, "force_index": "posting_sort_index" }, "Serial No": { "columns": ["name", "purchase_rate as incoming_rate"], "conditions": ["docstatus < 2"], "order_by": "name" }, "Stock Entry": { "columns": ["name", "purpose"], "conditions": ["docstatus=1"], "order_by": "posting_date, posting_time, name", }, "Production Order": { "columns": ["name", "production_item as item_code", "(ifnull(qty, 0) - ifnull(produced_qty, 0)) as qty", "fg_warehouse as warehouse"], "conditions": ["docstatus=1", "status != 'Stopped'", "ifnull(fg_warehouse, '')!=''", "ifnull(qty, 0) > ifnull(produced_qty, 0)"], "links": { "item_code": ["Item", "name"], "warehouse": ["Warehouse", "name"] }, }, "Material Request Item": { "columns": ["item.name as name", "item_code", "warehouse", "(ifnull(qty, 0) - ifnull(ordered_qty, 0)) as qty"], "from": "`tabMaterial Request Item` item, `tabMaterial Request` main", "conditions": ["item.parent = main.name", "main.docstatus=1", "main.status != 'Stopped'", "ifnull(warehouse, '')!=''", "ifnull(qty, 0) > ifnull(ordered_qty, 0)"], "links": { "item_code": ["Item", "name"], "warehouse": ["Warehouse", "name"] }, }, "Purchase Order Item": { "columns": ["item.name as name", "item_code", "warehouse", "(ifnull(qty, 0) - ifnull(received_qty, 0)) as qty"], "from": "`tabPurchase Order Item` item, `tabPurchase Order` main", "conditions": ["item.parent = main.name", "main.docstatus=1", "main.status != 'Stopped'", "ifnull(warehouse, '')!=''", "ifnull(qty, 0) > ifnull(received_qty, 0)"], "links": { "item_code": ["Item", "name"], "warehouse": ["Warehouse", "name"] }, }, "Sales Order Item": { "columns": ["item.name as name", "item_code", "(ifnull(qty, 0) - ifnull(delivered_qty, 0)) as qty", "warehouse"], "from": "`tabSales Order Item` item, `tabSales Order` main", "conditions": ["item.parent = main.name", "main.docstatus=1", "main.status != 'Stopped'", "ifnull(warehouse, '')!=''", "ifnull(qty, 0) > ifnull(delivered_qty, 0)"], "links": { "item_code": ["Item", "name"], "warehouse": ["Warehouse", "name"] }, }, # Sales "Customer": { "columns": ["name", "if(customer_name=name, '', customer_name) as customer_name", "customer_group as parent_customer_group", "territory as parent_territory"], "conditions": ["docstatus < 2"], "order_by": "name", "links": { "parent_customer_group": ["Customer Group", "name"], "parent_territory": ["Territory", "name"], } }, "Customer Group": { "columns": ["name", "parent_customer_group"], "conditions": ["docstatus < 2"], "order_by": "lft" }, "Territory": { "columns": ["name", "parent_territory"], "conditions": ["docstatus < 2"], "order_by": "lft" }, "Sales Invoice": { "columns": ["name", "customer", "posting_date", "company"], "conditions": ["docstatus=1"], "order_by": "posting_date", "links": { "customer": ["Customer", "name"], "company":["Company", "name"] } }, "Sales Invoice Item": { "columns": ["name", "parent", "item_code", "qty", "base_amount"], "conditions": ["docstatus=1", "ifnull(parent, '')!=''"], "order_by": "parent", "links": { "parent": ["Sales Invoice", "name"], "item_code": ["Item", "name"] } }, "Sales Order": { "columns": ["name", "customer", "transaction_date as posting_date", "company"], "conditions": ["docstatus=1"], "order_by": "transaction_date", "links": { "customer": ["Customer", "name"], "company":["Company", "name"] } }, "Sales Order Item[Sales Analytics]": { "columns": ["name", "parent", "item_code", "qty", "base_amount"], "conditions": ["docstatus=1", "ifnull(parent, '')!=''"], "order_by": "parent", "links": { "parent": ["Sales Order", "name"], "item_code": ["Item", "name"] } }, "Delivery Note": { "columns": ["name", "customer", "posting_date", "company"], "conditions": ["docstatus=1"], "order_by": "posting_date", "links": { "customer": ["Customer", "name"], "company":["Company", "name"] } }, "Delivery Note Item[Sales Analytics]": { "columns": ["name", "parent", "item_code", "qty", "base_amount"], "conditions": ["docstatus=1", "ifnull(parent, '')!=''"], "order_by": "parent", "links": { "parent": ["Delivery Note", "name"], "item_code": ["Item", "name"] } }, "Supplier": { "columns": ["name", "if(supplier_name=name, '', supplier_name) as supplier_name", "supplier_type as parent_supplier_type"], "conditions": ["docstatus < 2"], "order_by": "name", "links": { "parent_supplier_type": ["Supplier Type", "name"], } }, "Supplier Type": { "columns": ["name"], "conditions": ["docstatus < 2"], "order_by": "name" }, "Purchase Invoice": { "columns": ["name", "supplier", "posting_date", "company"], "conditions": ["docstatus=1"], "order_by": "posting_date", "links": { "supplier": ["Supplier", "name"], "company":["Company", "name"] } }, "Purchase Invoice Item": { "columns": ["name", "parent", "item_code", "qty", "base_amount"], "conditions": ["docstatus=1", "ifnull(parent, '')!=''"], "order_by": "parent", "links": { "parent": ["Purchase Invoice", "name"], "item_code": ["Item", "name"] } }, "Purchase Order": { "columns": ["name", "supplier", "transaction_date as posting_date", "company"], "conditions": ["docstatus=1"], "order_by": "posting_date", "links": { "supplier": ["Supplier", "name"], "company":["Company", "name"] } }, "Purchase Order Item[Purchase Analytics]": { "columns": ["name", "parent", "item_code", "qty", "base_amount"], "conditions": ["docstatus=1", "ifnull(parent, '')!=''"], "order_by": "parent", "links": { "parent": ["Purchase Order", "name"], "item_code": ["Item", "name"] } }, "Purchase Receipt": { "columns": ["name", "supplier", "posting_date", "company"], "conditions": ["docstatus=1"], "order_by": "posting_date", "links": { "supplier": ["Supplier", "name"], "company":["Company", "name"] } }, "Purchase Receipt Item[Purchase Analytics]": { "columns": ["name", "parent", "item_code", "qty", "base_amount"], "conditions": ["docstatus=1", "ifnull(parent, '')!=''"], "order_by": "parent", "links": { "parent": ["Purchase Receipt", "name"], "item_code": ["Item", "name"] } }, # Support "Support Ticket": { "columns": ["name","status","creation","resolution_date","first_responded_on"], "conditions": ["docstatus < 2"], "order_by": "creation" } }
agpl-3.0
tovmeod/anaf
anaf/knowledge/forms.py
1
4996
""" Knowledge base model forms """ from django.forms import ModelForm, Form, ChoiceField from models import KnowledgeFolder, KnowledgeItem, KnowledgeCategory from anaf.core.models import Object from anaf.core.decorators import preprocess_form from django.utils.translation import ugettext as _ from django.core.urlresolvers import reverse preprocess_form() class MassActionForm(Form): """ Mass action form for Reports """ delete = ChoiceField(label=_("Delete"), choices=(('', '-----'), ('delete', _('Delete Completely')), ('trash', _('Move to Trash'))), required=False) instance = None def __init__(self, user, *args, **kwargs): if 'instance' in kwargs: self.instance = kwargs['instance'] del kwargs['instance'] super(MassActionForm, self).__init__(*args, **kwargs) self.fields['delete'] = ChoiceField(label=_("Delete"), choices=(('', '-----'), ('delete', _( 'Delete Completely')), ('trash', _('Move to Trash'))), required=False) def save(self, *args, **kwargs): "Process form" if self.instance and self.is_valid() and self.cleaned_data['delete']: if self.cleaned_data['delete'] == 'delete': self.instance.delete() if self.cleaned_data['delete'] == 'trash': self.instance.trash = True self.instance.save() class KnowledgeFolderForm(ModelForm): """ Knowledge folder form """ def __init__(self, user, knowledgeType_id, *args, **kwargs): super(KnowledgeFolderForm, self).__init__(*args, **kwargs) self.fields['name'].label = _("Name") self.fields['parent'].label = _("Parent") self.fields['parent'].queryset = KnowledgeFolder.objects self.fields['parent'].queryset = Object.filter_permitted( user, KnowledgeFolder.objects, mode='x') if knowledgeType_id: self.fields['parent'].initial = knowledgeType_id self.fields['details'].label = _("Details") class Meta: "KnowledgeFolder" model = KnowledgeFolder fields = ('name', 'parent', 'details') class KnowledgeItemForm(ModelForm): """ Knowledge item form """ def __init__(self, user, knowledgeType_id, *args, **kwargs): super(KnowledgeItemForm, self).__init__(*args, **kwargs) self.fields['name'].label = _("Name") self.fields['folder'].label = _("Folder") self.fields['folder'].queryset = Object.filter_permitted( user, KnowledgeFolder.objects, mode='x') self.fields['folder'].widget.attrs.update( {'popuplink': reverse('knowledge_folder_add')}) if knowledgeType_id: self.fields['folder'].initial = knowledgeType_id self.fields['category'].label = _("Category") self.fields['category'].queryset = Object.filter_permitted( user, KnowledgeCategory.objects, mode='x') self.fields['category'].widget.attrs.update( {'popuplink': reverse('knowledge_category_add')}) self.fields['body'].label = _("Body") self.fields['body'].widget.attrs.update({'class': 'full-editor'}) class Meta: "KnowledgeItem" model = KnowledgeItem fields = ('name', 'folder', 'category', 'body') class KnowledgeCategoryForm(ModelForm): """ Knowledge category form """ def __init__(self, *args, **kwargs): super(KnowledgeCategoryForm, self).__init__(*args, **kwargs) self.fields['name'].label = _("Name") self.fields['details'].label = _("Details") class Meta: "KnowledgeCategory" model = KnowledgeCategory fields = ('name', 'details') class FilterForm(ModelForm): """ Filter form definition """ def __init__(self, user, skip=None, *args, **kwargs): if skip is None: skip = [] super(FilterForm, self).__init__(*args, **kwargs) if 'folder' in skip: del self.fields['folder'] else: self.fields['folder'].queryset = Object.filter_permitted( user, KnowledgeFolder.objects, mode='x') # self.fields['folder'].required = False self.fields['folder'].label = _("Folder") if 'category' in skip: del self.fields['category'] else: self.fields['category'].queryset = Object.filter_permitted(user, KnowledgeCategory.objects, mode='x') self.fields['category'].required = False self.fields['category'].label = _("Category") class Meta: "Filter" model = KnowledgeItem fields = ('folder', 'category')
bsd-3-clause
newemailjdm/scipy
scipy/weave/examples/object.py
100
1679
""" Attribute and method access on Python objects from C++. Note: std::cout type operations currently crash python... Not sure what is up with this... """ from __future__ import absolute_import, print_function import scipy.weave as weave #---------------------------------------------------------------------------- # get/set attribute and call methods example #---------------------------------------------------------------------------- class Foo(object): def __init__(self): self.val = 1 def inc(self,amount): self.val += amount return self.val obj = Foo() code = """ py::tuple result(3); int i = obj.attr("val"); result[0] = i; py::tuple args(1); args[0] = 2; i = obj.mcall("inc",args); result[1] = i; obj.set_attr("val",5); i = obj.attr("val"); result[2] = i; return_val = result; """ print('initial, inc(2), set(5)/get:', weave.inline(code,['obj'])) #---------------------------------------------------------------------------- # indexing of values. #---------------------------------------------------------------------------- from UserList import UserList obj = UserList([1,[1,2],"hello"]) code = """ int i; // find obj length and access each of its items //std::cout << "UserList items: "; //for(i = 0; i < obj.length(); i++) // std::cout << obj[i].str() << " "; //std::cout << std::endl; // assign new values to each of its items for(i = 0; i < obj.length(); i++) obj[i] = "goodbye"; """ weave.inline(code,['obj']) print("obj with new values:", obj)
bsd-3-clause
salfab/CouchPotatoServer
libs/elixir/ext/perform_ddl.py
29
3315
''' DDL statements for Elixir. Entities having the perform_ddl statement, will automatically execute the given DDL statement, at the given moment: ether before or after the table creation in SQL. The 'when' argument can be either 'before-create' or 'after-create'. The 'statement' argument can be one of: - a single string statement - a list of string statements, in which case, each of them will be executed in turn. - a callable which should take no argument and return either a single string or a list of strings. In each string statement, you may use the special '%(fullname)s' construct, that will be replaced with the real table name including schema, if unknown to you. Also, self explained '%(table)s' and '%(schema)s' may be used here. You would use this extension to handle non elixir sql statemts, like triggers etc. .. sourcecode:: python class Movie(Entity): title = Field(Unicode(30), primary_key=True) year = Field(Integer) perform_ddl('after-create', "insert into %(fullname)s values ('Alien', 1979)") preload_data is a more specific statement meant to preload data in your entity table from a list of tuples (of fields values for each row). .. sourcecode:: python class Movie(Entity): title = Field(Unicode(30), primary_key=True) year = Field(Integer) preload_data(('title', 'year'), [(u'Alien', 1979), (u'Star Wars', 1977)]) preload_data(('year', 'title'), [(1982, u'Blade Runner')]) preload_data(data=[(u'Batman', 1966)]) ''' from elixir.statements import Statement from elixir.properties import EntityBuilder from sqlalchemy import DDL __all__ = ['perform_ddl', 'preload_data'] __doc_all__ = [] # # the perform_ddl statement # class PerformDDLEntityBuilder(EntityBuilder): def __init__(self, entity, when, statement, on=None, context=None): self.entity = entity self.when = when self.statement = statement self.on = on self.context = context def after_table(self): statement = self.statement if hasattr(statement, '__call__'): statement = statement() if not isinstance(statement, list): statement = [statement] for s in statement: ddl = DDL(s, self.on, self.context) ddl.execute_at(self.when, self.entity.table) perform_ddl = Statement(PerformDDLEntityBuilder) # # the preload_data statement # class PreloadDataEntityBuilder(EntityBuilder): def __init__(self, entity, columns=None, data=None): self.entity = entity self.columns = columns self.data = data def after_table(self): all_columns = [col.name for col in self.entity.table.columns] def onload(event, schema_item, connection): columns = self.columns if columns is None: columns = all_columns data = self.data if hasattr(data, '__call__'): data = data() insert = schema_item.insert() connection.execute(insert, [dict(zip(columns, values)) for values in data]) self.entity.table.append_ddl_listener('after-create', onload) preload_data = Statement(PreloadDataEntityBuilder)
gpl-3.0
googleapis/python-talent
google/cloud/talent_v4/services/tenant_service/transports/base.py
1
9826
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc from typing import Awaitable, Callable, Dict, Optional, Sequence, Union import packaging.version import pkg_resources import google.auth # type: ignore import google.api_core # type: ignore from google.api_core import exceptions as core_exceptions # type: ignore from google.api_core import gapic_v1 # type: ignore from google.api_core import retry as retries # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore from google.cloud.talent_v4.types import tenant from google.cloud.talent_v4.types import tenant as gct_tenant from google.cloud.talent_v4.types import tenant_service from google.protobuf import empty_pb2 # type: ignore try: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=pkg_resources.get_distribution("google-cloud-talent",).version, ) except pkg_resources.DistributionNotFound: DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo() try: # google.auth.__version__ was added in 1.26.0 _GOOGLE_AUTH_VERSION = google.auth.__version__ except AttributeError: try: # try pkg_resources if it is available _GOOGLE_AUTH_VERSION = pkg_resources.get_distribution("google-auth").version except pkg_resources.DistributionNotFound: # pragma: NO COVER _GOOGLE_AUTH_VERSION = None class TenantServiceTransport(abc.ABC): """Abstract transport class for TenantService.""" AUTH_SCOPES = ( "https://www.googleapis.com/auth/cloud-platform", "https://www.googleapis.com/auth/jobs", ) DEFAULT_HOST: str = "jobs.googleapis.com" def __init__( self, *, host: str = DEFAULT_HOST, credentials: ga_credentials.Credentials = None, credentials_file: Optional[str] = None, scopes: Optional[Sequence[str]] = None, quota_project_id: Optional[str] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, always_use_jwt_access: Optional[bool] = False, **kwargs, ) -> None: """Instantiate the transport. Args: host (Optional[str]): The hostname to connect to. credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none are specified, the client will attempt to ascertain the credentials from the environment. credentials_file (Optional[str]): A file with credentials that can be loaded with :func:`google.auth.load_credentials_from_file`. This argument is mutually exclusive with credentials. scopes (Optional[Sequence[str]]): A list of scopes. quota_project_id (Optional[str]): An optional project to use for billing and quota. client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. Generally, you only need to set this if you're developing your own client library. always_use_jwt_access (Optional[bool]): Whether self signed JWT should be used for service account credentials. """ # Save the hostname. Default to port 443 (HTTPS) if none is specified. if ":" not in host: host += ":443" self._host = host scopes_kwargs = self._get_scopes_kwargs(self._host, scopes) # Save the scopes. self._scopes = scopes # If no credentials are provided, then determine the appropriate # defaults. if credentials and credentials_file: raise core_exceptions.DuplicateCredentialArgs( "'credentials_file' and 'credentials' are mutually exclusive" ) if credentials_file is not None: credentials, _ = google.auth.load_credentials_from_file( credentials_file, **scopes_kwargs, quota_project_id=quota_project_id ) elif credentials is None: credentials, _ = google.auth.default( **scopes_kwargs, quota_project_id=quota_project_id ) # If the credentials is service account credentials, then always try to use self signed JWT. if ( always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access") ): credentials = credentials.with_always_use_jwt_access(True) # Save the credentials. self._credentials = credentials # TODO(busunkim): This method is in the base transport # to avoid duplicating code across the transport classes. These functions # should be deleted once the minimum required versions of google-auth is increased. # TODO: Remove this function once google-auth >= 1.25.0 is required @classmethod def _get_scopes_kwargs( cls, host: str, scopes: Optional[Sequence[str]] ) -> Dict[str, Optional[Sequence[str]]]: """Returns scopes kwargs to pass to google-auth methods depending on the google-auth version""" scopes_kwargs = {} if _GOOGLE_AUTH_VERSION and ( packaging.version.parse(_GOOGLE_AUTH_VERSION) >= packaging.version.parse("1.25.0") ): scopes_kwargs = {"scopes": scopes, "default_scopes": cls.AUTH_SCOPES} else: scopes_kwargs = {"scopes": scopes or cls.AUTH_SCOPES} return scopes_kwargs def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { self.create_tenant: gapic_v1.method.wrap_method( self.create_tenant, default_timeout=30.0, client_info=client_info, ), self.get_tenant: gapic_v1.method.wrap_method( self.get_tenant, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=30.0, ), default_timeout=30.0, client_info=client_info, ), self.update_tenant: gapic_v1.method.wrap_method( self.update_tenant, default_timeout=30.0, client_info=client_info, ), self.delete_tenant: gapic_v1.method.wrap_method( self.delete_tenant, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=30.0, ), default_timeout=30.0, client_info=client_info, ), self.list_tenants: gapic_v1.method.wrap_method( self.list_tenants, default_retry=retries.Retry( initial=0.1, maximum=60.0, multiplier=1.3, predicate=retries.if_exception_type( core_exceptions.DeadlineExceeded, core_exceptions.ServiceUnavailable, ), deadline=30.0, ), default_timeout=30.0, client_info=client_info, ), } @property def create_tenant( self, ) -> Callable[ [tenant_service.CreateTenantRequest], Union[gct_tenant.Tenant, Awaitable[gct_tenant.Tenant]], ]: raise NotImplementedError() @property def get_tenant( self, ) -> Callable[ [tenant_service.GetTenantRequest], Union[tenant.Tenant, Awaitable[tenant.Tenant]], ]: raise NotImplementedError() @property def update_tenant( self, ) -> Callable[ [tenant_service.UpdateTenantRequest], Union[gct_tenant.Tenant, Awaitable[gct_tenant.Tenant]], ]: raise NotImplementedError() @property def delete_tenant( self, ) -> Callable[ [tenant_service.DeleteTenantRequest], Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]], ]: raise NotImplementedError() @property def list_tenants( self, ) -> Callable[ [tenant_service.ListTenantsRequest], Union[ tenant_service.ListTenantsResponse, Awaitable[tenant_service.ListTenantsResponse], ], ]: raise NotImplementedError() __all__ = ("TenantServiceTransport",)
apache-2.0
thedep2/CouchPotatoServer
libs/guessit/transfo/__init__.py
94
4117
#!/usr/bin/env python # -*- coding: utf-8 -*- # # GuessIt - A library for guessing information from filenames # Copyright (c) 2012 Nicolas Wack <[email protected]> # # GuessIt is free software; you can redistribute it and/or modify it under # the terms of the Lesser GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # GuessIt is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # Lesser GNU General Public License for more details. # # You should have received a copy of the Lesser GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # from __future__ import unicode_literals from guessit import base_text_type, Guess from guessit.patterns import canonical_form from guessit.textutils import clean_string import logging log = logging.getLogger(__name__) def found_property(node, name, confidence): node.guess = Guess({name: node.clean_value}, confidence=confidence, raw=node.value) log.debug('Found with confidence %.2f: %s' % (confidence, node.guess)) def format_guess(guess): """Format all the found values to their natural type. For instance, a year would be stored as an int value, etc... Note that this modifies the dictionary given as input. """ for prop, value in guess.items(): if prop in ('season', 'episodeNumber', 'year', 'cdNumber', 'cdNumberTotal', 'bonusNumber', 'filmNumber'): guess[prop] = int(guess[prop]) elif isinstance(value, base_text_type): if prop in ('edition',): value = clean_string(value) guess[prop] = canonical_form(value).replace('\\', '') return guess def find_and_split_node(node, strategy, logger): string = ' %s ' % node.value # add sentinels for matcher, confidence, args, kwargs in strategy: all_args = [string] if getattr(matcher, 'use_node', False): all_args.append(node) if args: all_args.append(args) if kwargs: result, span = matcher(*all_args, **kwargs) else: result, span = matcher(*all_args) if result: # readjust span to compensate for sentinels span = (span[0] - 1, span[1] - 1) if isinstance(result, Guess): if confidence is None: confidence = result.confidence(list(result.keys())[0]) else: if confidence is None: confidence = 1.0 guess = format_guess(Guess(result, confidence=confidence, raw=string[span[0] + 1:span[1] + 1])) msg = 'Found with confidence %.2f: %s' % (confidence, guess) (logger or log).debug(msg) node.partition(span) absolute_span = (span[0] + node.offset, span[1] + node.offset) for child in node.children: if child.span == absolute_span: child.guess = guess else: find_and_split_node(child, strategy, logger) return class SingleNodeGuesser(object): def __init__(self, guess_func, confidence, logger, *args, **kwargs): self.guess_func = guess_func self.confidence = confidence self.logger = logger self.args = args self.kwargs = kwargs def process(self, mtree): # strategy is a list of pairs (guesser, confidence) # - if the guesser returns a guessit.Guess and confidence is specified, # it will override it, otherwise it will leave the guess confidence # - if the guesser returns a simple dict as a guess and confidence is # specified, it will use it, or 1.0 otherwise strategy = [ (self.guess_func, self.confidence, self.args, self.kwargs) ] for node in mtree.unidentified_leaves(): find_and_split_node(node, strategy, self.logger)
gpl-3.0
freedesktop-unofficial-mirror/papyon
papyon/service/ContentRoaming/scenario/store_profile.py
6
3579
# -*- coding: utf-8 -*- # # Copyright (C) 2007 Johann Prieur <[email protected]> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # from base import * from papyon.util.async import * __all__ = ['StoreProfileScenario'] class StoreProfileScenario(BaseScenario): def __init__(self, storage, callback, errback, cid, profile_id, expression_profile_id, display_picture_id, display_name='', personal_message='', display_picture=''): """Updates the roaming profile stored on the server @param storage: the storage service @param callback: tuple(callable, *args) @param errback: tuple(callable, *args) """ BaseScenario.__init__(self, 'RoamingIdentityChanged', callback, errback) self.__storage = storage self.__cid = cid self.__profile_id = profile_id self.__expression_profile_id = expression_profile_id self.__display_picture_id = display_picture_id self.display_name = display_name self.personal_message = personal_message self.display_picture = display_picture def execute(self): self.__storage.UpdateProfile((self.__update_profile_callback,), self._errback, self._scenario, self.__profile_id, self.display_name, self.personal_message, 0) def __update_profile_callback(self): if not self.display_picture or not self.__display_picture_id: run(self._callback) elif not self.__cid: self.__delete_relationship_profile_callback() else: self.__storage.DeleteRelationships( (self.__delete_relationship_profile_callback,), self._errback, self._scenario, self.__display_picture_id, self.__cid, None) def __delete_relationship_profile_callback(self): if not self.__expression_profile_id: self.__delete_relationship_expression_callback() else: self.__storage.DeleteRelationships( (self.__delete_relationship_expression_callback,), self._errback, self._scenario, self.__display_picture_id, None, self.__expression_profile_id) def __delete_relationship_expression_callback(self): # FIXME : add support for dp name self.__storage.CreateDocument( (self.__create_document_callback,), self._errback, self._scenario, self.__cid, "roaming", self.display_picture[0], self.display_picture[1].encode('base64')) def __create_document_callback(self, document_rid): self.__storage.CreateRelationships(self._callback, self._errback, self._scenario, self.__expression_profile_id, document_rid)
gpl-2.0
google/deepvariant
deepvariant/model_train.py
1
11333
# Copyright 2017 Google LLC. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Trains the DeepVariant model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import sys if 'google' in sys.modules and 'google.protobuf' not in sys.modules: del sys.modules['google'] import json import os from absl import flags from absl import logging import tensorflow as tf from third_party.nucleus.util import proto_utils from deepvariant import data_providers from deepvariant import logging_level from deepvariant import modeling from deepvariant import tf_utils FLAGS = flags.FLAGS # Data set selection parameters flags.DEFINE_string('dataset_config_pbtxt', None, 'The path to the dataset config file.') flags.DEFINE_string('model_name', 'inception_v3', 'The name of the model to use for predictions.') flags.DEFINE_integer('batch_size', 4096, 'The number of samples in each batch.') # Cloud TPU Cluster Resolvers flags.DEFINE_string( 'gcp_project', None, 'Project name for the Cloud TPU-enabled project. If not specified, we ' 'will attempt to automatically detect the GCE project from metadata.') flags.DEFINE_string( 'tpu_zone', None, 'GCE zone where the Cloud TPU is located in. If not specified, we ' 'will attempt to automatically detect the GCE project from metadata.') flags.DEFINE_string( 'tpu_name', None, 'Name of the Cloud TPU for Cluster Resolvers. You must specify either ' 'this flag or --master.') flags.DEFINE_string( 'master', None, 'GRPC URL of the master (e.g. grpc://ip.address.of.tpu:8470). You ' 'must specify either this flag or --tpu_name.') flags.DEFINE_string('train_dir', '/tmp/deepvariant/', 'Directory where to write event logs.') flags.DEFINE_boolean('use_tpu', False, 'use tpu if available') flags.DEFINE_integer('worker_replicas', 1, 'Number of worker replicas.') flags.DEFINE_integer( 'ps_tasks', 0, 'The number of parameter servers. If the value is 0, then the parameters ' 'are handled locally by the worker.') flags.DEFINE_integer('task', 0, 'Task id of the replica running the training.') flags.DEFINE_integer('number_of_steps', 8000000, 'Maximum number of global steps to take when training.') flags.DEFINE_integer( 'num_retries', 0, 'The number of times to retry on InternalError or UnavailableError.') flags.DEFINE_integer( 'max_examples', None, 'The maximum number of examples to use in training. If None, all examples ' 'will be used. If not None, the first max_examples examples from the ' 'dataset will be used, with those same examples repeating over and over.') # Pre-trained model parameters flags.DEFINE_string( 'start_from_checkpoint', 'model_default', 'A path to a checkpoint of model weights to initialize our model at the ' 'start of training. If None or "", the model will start from random weights' '. The special value "model_default" will use the default pretrained ' 'path for the selected model.') flags.DEFINE_integer( 'max_checkpoints_to_keep', 10, 'Number of last checkpoints to keep during training. ' 'Passing "0" preserves all checkpoints.') flags.DEFINE_string( 'kmp_blocktime', '0', 'Value to set the KMP_BLOCKTIME environment variable to for efficient MKL ' 'training. See https://www.tensorflow.org/performance/performance_guide ' 'for more information. The default value is 0, which provides the best ' 'performance in our tests. Set this flag to "" to not set the variable.') flags.DEFINE_integer( 'random_seed', 400620758, 'Random seed value to use for TensorFlow. Providing a value != 0 will ' 'result in a call to tf.set_random_seed(FLAGS.random_seed), making ' 'training more deterministic. If set to 0, the TensorFlow random seed ' 'will not be set at all, and TensorFlow will assign it a pseudo-random ' 'value each time model_train is run.') def loss(logits, one_hot_labels, label_smoothing): """Creates a loss function for training logits against one_hot_labels. Args: logits: tensor. logits of the model we want to train. one_hot_labels: One-hot encoded truth labels that we want to train this model to predict. label_smoothing: float. label_smoothing value for softmax_cross_entropy. Returns: A `Tensor` whose value represents the total loss. """ tf.compat.v1.losses.softmax_cross_entropy( logits, one_hot_labels, label_smoothing=label_smoothing, weights=1.0) return tf.compat.v1.losses.get_total_loss() def run(target, unused_is_chief, device_fn, use_tpu): """Run training. Args: target: The target of the TensorFlow standard server to use. Can be the empty string to run locally using an inprocess server. device_fn: Device function used to assign ops to devices. use_tpu: turn on tpu code path. """ if not FLAGS.dataset_config_pbtxt: logging.error('Need to specify --dataset_config_pbtxt') return g = tf.Graph() with g.as_default(): with tf.device(device_fn): # If ps_tasks is zero, the local device is used. When using multiple # (non-local) replicas, the ReplicaDeviceSetter distributes the variables # across the different devices. tf_dataset = data_providers.get_input_fn_from_dataset( dataset_config_filename=FLAGS.dataset_config_pbtxt, mode=tf.estimator.ModeKeys.TRAIN, max_examples=FLAGS.max_examples, use_tpu=use_tpu) model = modeling.get_model(FLAGS.model_name) logging.info('Running training on %s with model %s and tpu %s', tf_dataset, FLAGS.model_name, use_tpu) batches_per_epoch = tf_dataset.num_examples // FLAGS.batch_size logging.info('Batches per epoch %s', batches_per_epoch) params = dict(batches_per_epoch=batches_per_epoch,) estimator = model.make_estimator( batch_size=FLAGS.batch_size, model_dir=FLAGS.train_dir, params=params, use_tpu=use_tpu, master=target, start_from_checkpoint=FLAGS.start_from_checkpoint, ) estimator.train( input_fn=tf_dataset, max_steps=FLAGS.number_of_steps, hooks=None) def parse_and_run(): """Parse TF_CONFIG to cluster_spec and call run(). TF_CONFIG environment variable is available when running using gcloud either locally or on cloud. It has all the information required to create a ClusterSpec which is important for running distributed code. Raises: ValueError: If flags are invalid. """ tf_config = os.environ.get('TF_CONFIG') logging.info('TF_CONFIG %s', tf_config) for name in ['master', 'task', 'ps_tasks']: if getattr(FLAGS, name) and tf_config: raise ValueError( 'Either the flag --%s or the environment variable TF_CONFIG can be' ' set but not both.' % name) # redacted # # If TF_CONFIG is not available we are either running locally in Cloud # or distributed inside Google. On Cloud the default values of # FLAGS.master and FLAGS.task correspond to running training locally. # Inside Google they will be set as needed to configure local or distributed # training. Inside Google we don't need to explicitly set worker_device # in replica_device_setter becaue this will be set automatically based # on various flags. if not tf_config: device_fn = tf.compat.v1.train.replica_device_setter(FLAGS.ps_tasks) # pylint: disable=g-long-ternary master = tf_utils.resolve_master(FLAGS.master, FLAGS.tpu_name, FLAGS.tpu_zone, FLAGS.gcp_project) if FLAGS.use_tpu else '' return run( master, FLAGS.task == 0, device_fn=device_fn, use_tpu=FLAGS.use_tpu) tf_config_json = json.loads(tf_config) cluster = tf_config_json.get('cluster') job_name = tf_config_json.get('task', {}).get('type') task_index = tf_config_json.get('task', {}).get('index') # If cluster information is empty run local if job_name is None or task_index is None: device_fn = tf.compat.v1.train.replica_device_setter(0) return run('', True, device_fn=device_fn, use_tpu=FLAGS.use_tpu) ps = cluster.get('ps', []) num_ps = len(ps) cluster_spec = tf.train.ClusterSpec(cluster) server = tf.distribute.Server( cluster_spec, job_name=job_name, task_index=task_index) if job_name == 'ps': server.join() return elif job_name in ['master', 'worker']: device_fn = tf.compat.v1.train.replica_device_setter( num_ps, worker_device='/job:%s/task:%d' % (job_name, task_index), cluster=cluster_spec) return run( server.target, job_name == 'master', device_fn=device_fn, use_tpu=FLAGS.use_tpu) def main(_): """Run and handle retryable errors.""" proto_utils.uses_fast_cpp_protos_or_die() logging_level.set_from_flag() if FLAGS.random_seed: logging.info('Setting tf.random_seed to %d', FLAGS.random_seed) tf.compat.v1.set_random_seed(FLAGS.random_seed) else: logging.info('Not setting tf.random_seed, will be assigned a random value') if FLAGS.kmp_blocktime: os.environ['KMP_BLOCKTIME'] = FLAGS.kmp_blocktime logging.info('Set KMP_BLOCKTIME to %s', os.environ['KMP_BLOCKTIME']) for _ in range(FLAGS.num_retries + 1): try: parse_and_run() return except tf.errors.UnavailableError as e: # An UnavailableError indicates a gRPC error, typically this is # retryable. logging.error('Caught UnavailableError %s; will retry.', e) except tf.errors.InternalError as e: # Retry on an InternalError. logging.error('Caught InternalError %s; will retry.', e) if __name__ == '__main__': tf.compat.v1.app.run()
bsd-3-clause
avlach/univbris-ocf
optin_manager/src/python/openflow/optin_manager/sfa/rspecs/.oldversions/sfav1.py
2
8905
from copy import deepcopy from lxml import etree from openflow.optin_manager.sfa.util.sfalogging import logger from openflow.optin_manager.sfa.util.xrn import hrn_to_urn, urn_to_hrn from openflow.optin_manager.sfa.rspecs.version import RSpecVersion from openflow.optin_manager.sfa.rspecs.elements.element import Element from openflow.optin_manager.sfa.rspecs.elements.versions.pgv2Link import PGv2Link from openflow.optin_manager.sfa.rspecs.elements.versions.sfav1Node import SFAv1Node from openflow.optin_manager.sfa.rspecs.elements.versions.sfav1Sliver import SFAv1Sliver from openflow.optin_manager.sfa.rspecs.elements.versions.sfav1Lease import SFAv1Lease class SFAv1(RSpecVersion): enabled = True type = 'SFA' content_type = '*' version = '1' schema = None namespace = None extensions = {} namespaces = None template = '<RSpec type="%s"></RSpec>' % type # Network def get_networks(self): network_elems = self.xml.xpath('//network') networks = [network_elem.get_instance(fields=['name', 'slice']) for \ network_elem in network_elems] return networks def add_network(self, network): network_tags = self.xml.xpath('//network[@name="%s"]' % network) if not network_tags: network_tag = self.xml.add_element('network', name=network) else: network_tag = network_tags[0] return network_tag # Nodes def get_nodes(self, filter=None): return SFAv1Node.get_nodes(self.xml, filter) def get_nodes_with_slivers(self): return SFAv1Node.get_nodes_with_slivers(self.xml) def add_nodes(self, nodes, network = None, no_dupes=False): SFAv1Node.add_nodes(self.xml, nodes) def merge_node(self, source_node_tag, network, no_dupes=False): if no_dupes and self.get_node_element(node['hostname']): # node already exists return network_tag = self.add_network(network) network_tag.append(deepcopy(source_node_tag)) # Slivers def add_slivers(self, hostnames, attributes=[], sliver_urn=None, append=False): # add slice name to network tag network_tags = self.xml.xpath('//network') if network_tags: network_tag = network_tags[0] network_tag.set('slice', urn_to_hrn(sliver_urn)[0]) # add slivers sliver = {'name':sliver_urn, 'pl_tags': attributes} for hostname in hostnames: if sliver_urn: sliver['name'] = sliver_urn node_elems = self.get_nodes({'component_id': '*%s*' % hostname}) if not node_elems: continue node_elem = node_elems[0] SFAv1Sliver.add_slivers(node_elem.element, sliver) # remove all nodes without slivers if not append: for node_elem in self.get_nodes(): if not node_elem['slivers']: parent = node_elem.element.getparent() parent.remove(node_elem.element) def remove_slivers(self, slivers, network=None, no_dupes=False): SFAv1Node.remove_slivers(self.xml, slivers) def get_slice_attributes(self, network=None): attributes = [] nodes_with_slivers = self.get_nodes_with_slivers() for default_attribute in self.get_default_sliver_attributes(network): attribute = default_attribute.copy() attribute['node_id'] = None attributes.append(attribute) for node in nodes_with_slivers: nodename=node['component_name'] sliver_attributes = self.get_sliver_attributes(nodename, network) for sliver_attribute in sliver_attributes: sliver_attribute['node_id'] = nodename attributes.append(sliver_attribute) return attributes def add_sliver_attribute(self, component_id, name, value, network=None): nodes = self.get_nodes({'component_id': '*%s*' % component_id}) if nodes is not None and isinstance(nodes, list) and len(nodes) > 0: node = nodes[0] slivers = SFAv1Sliver.get_slivers(node) if slivers: sliver = slivers[0] SFAv1Sliver.add_sliver_attribute(sliver, name, value) else: # should this be an assert / raise an exception? logger.error("WARNING: failed to find component_id %s" % component_id) def get_sliver_attributes(self, component_id, network=None): nodes = self.get_nodes({'component_id': '*%s*' % component_id}) attribs = [] if nodes is not None and isinstance(nodes, list) and len(nodes) > 0: node = nodes[0] slivers = SFAv1Sliver.get_slivers(node.element) if slivers is not None and isinstance(slivers, list) and len(slivers) > 0: sliver = slivers[0] attribs = SFAv1Sliver.get_sliver_attributes(sliver.element) return attribs def remove_sliver_attribute(self, component_id, name, value, network=None): attribs = self.get_sliver_attributes(component_id) for attrib in attribs: if attrib['name'] == name and attrib['value'] == value: #attrib.element.delete() parent = attrib.element.getparent() parent.remove(attrib.element) def add_default_sliver_attribute(self, name, value, network=None): if network: defaults = self.xml.xpath("//network[@name='%s']/sliver_defaults" % network) else: defaults = self.xml.xpath("//sliver_defaults") if not defaults: if network: network_tag = self.xml.xpath("//network[@name='%s']" % network) else: network_tag = self.xml.xpath("//network") if isinstance(network_tag, list): network_tag = network_tag[0] defaults = network_tag.add_element('sliver_defaults') elif isinstance(defaults, list): defaults = defaults[0] SFAv1Sliver.add_sliver_attribute(defaults, name, value) def get_default_sliver_attributes(self, network=None): if network: defaults = self.xml.xpath("//network[@name='%s']/sliver_defaults" % network) else: defaults = self.xml.xpath("//sliver_defaults") if not defaults: return [] return SFAv1Sliver.get_sliver_attributes(defaults[0]) def remove_default_sliver_attribute(self, name, value, network=None): attribs = self.get_default_sliver_attributes(network) for attrib in attribs: if attrib['name'] == name and attrib['value'] == value: #attrib.element.delete() parent = attrib.element.getparent() parent.remove(attrib.element) # Links def get_links(self, network=None): return PGv2Link.get_links(self.xml) def get_link_requests(self): return PGv2Link.get_link_requests(self.xml) def add_links(self, links): networks = self.get_networks() if len(networks) > 0: xml = networks[0].element else: xml = self.xml PGv2Link.add_links(xml, links) def add_link_requests(self, links): PGv2Link.add_link_requests(self.xml, links) # utility def merge(self, in_rspec): """ Merge contents for specified rspec with current rspec """ if not in_rspec: return from openflow.optin_manager.sfa.rspecs.rspec import RSpec if isinstance(in_rspec, RSpec): rspec = in_rspec else: rspec = RSpec(in_rspec) if rspec.version.type.lower() == 'protogeni': from openflow.optin_manager.sfa.rspecs.rspec_converter import RSpecConverter in_rspec = RSpecConverter.to_sfa_rspec(rspec.toxml()) rspec = RSpec(in_rspec) # just copy over all networks current_networks = self.get_networks() networks = rspec.version.get_networks() for network in networks: current_network = network.get('name') if current_network and current_network not in current_networks: self.xml.append(network.element) current_networks.append(current_network) # Leases def get_leases(self, filter=None): return SFAv1Lease.get_leases(self.xml, filter) def add_leases(self, leases, network = None, no_dupes=False): SFAv1Lease.add_leases(self.xml, leases) if __name__ == '__main__': from openflow.optin_manager.sfa.rspecs.rspec import RSpec from openflow.optin_manager.sfa.rspecs.rspec_elements import * r = RSpec('/tmp/resources.rspec') r.load_rspec_elements(SFAv1.elements) print r.get(RSpecElements.NODE)
bsd-3-clause
popazerty/openblackhole-SH4
lib/python/Screens/ServiceScan.py
6
4074
import Screens.InfoBar from enigma import eServiceReference from Screens.Screen import Screen from Components.ServiceScan import ServiceScan as CScan from Components.ProgressBar import ProgressBar from Components.Label import Label from Components.ActionMap import ActionMap from Components.FIFOList import FIFOList from Components.Sources.FrontendInfo import FrontendInfo from Components.config import config class ServiceScanSummary(Screen): skin = """ <screen position="0,0" size="132,64"> <widget name="Title" position="6,4" size="120,42" font="Regular;16" transparent="1" /> <widget name="scan_progress" position="6,50" zPosition="1" borderWidth="1" size="56,12" backgroundColor="dark" /> <widget name="Service" position="6,22" size="120,26" font="Regular;12" transparent="1" /> </screen>""" def __init__(self, session, parent, showStepSlider = True): Screen.__init__(self, session, parent) self["Title"] = Label(parent.title or _("Service scan")) self["Service"] = Label(_("No service")) self["scan_progress"] = ProgressBar() def updateProgress(self, value): self["scan_progress"].setValue(value) def updateService(self, name): self["Service"].setText(name) class ServiceScan(Screen): def ok(self): if self["scan"].isDone(): if self.currentInfobar.__class__.__name__ == "InfoBar": selectedService = self["servicelist"].getCurrentSelection() if selectedService and self.currentServiceList is not None: self.currentServiceList.setTvMode() bouquets = self.currentServiceList.getBouquetList() last_scanned_bouquet = bouquets and next((x[1] for x in bouquets if x[0] == "Last Scanned"), None) if last_scanned_bouquet: self.currentServiceList.enterUserbouquet(last_scanned_bouquet) self.currentServiceList.setCurrentSelection(eServiceReference(selectedService[1])) service = self.currentServiceList.getCurrentSelection() if not self.session.postScanService or service != self.session.postScanService: self.session.postScanService = service self.currentServiceList.addToHistory(service) config.servicelist.lastmode.save() self.currentServiceList.saveChannel(service) self.doCloseRecursive() self.cancel() def cancel(self): self.exit(False) def doCloseRecursive(self): self.exit(True) def exit(self, returnValue): if self.currentInfobar.__class__.__name__ == "InfoBar": self.close(returnValue) self.close() def __init__(self, session, scanList): Screen.__init__(self, session) self["Title"] = Label(_("Scanning...")) self.scanList = scanList if hasattr(session, 'infobar'): self.currentInfobar = Screens.InfoBar.InfoBar.instance if self.currentInfobar: self.currentServiceList = self.currentInfobar.servicelist if self.session.pipshown and self.currentServiceList: if self.currentServiceList.dopipzap: self.currentServiceList.togglePipzap() if hasattr(self.session, 'pip'): del self.session.pip self.session.pipshown = False else: self.currentInfobar = None self.session.nav.stopService() self["scan_progress"] = ProgressBar() self["scan_state"] = Label(_("scan state")) self["network"] = Label() self["transponder"] = Label() self["pass"] = Label("") self["servicelist"] = FIFOList() self["FrontendInfo"] = FrontendInfo() self["key_red"] = Label(_("Cancel")) self["key_green"] = Label(_("OK")) self["actions"] = ActionMap(["SetupActions", "MenuActions"], { "ok": self.ok, "save": self.ok, "cancel": self.cancel, "menu": self.doCloseRecursive }, -2) self.setTitle("Service scan") self.onFirstExecBegin.append(self.doServiceScan) def doServiceScan(self): self["servicelist"].len = self["servicelist"].instance.size().height() / self["servicelist"].l.getItemSize().height() self["scan"] = CScan(self["scan_progress"], self["scan_state"], self["servicelist"], self["pass"], self.scanList, self["network"], self["transponder"], self["FrontendInfo"], self.session.summary) def createSummary(self): return ServiceScanSummary
gpl-2.0
jabesq/home-assistant
tests/components/mobile_app/test_entity.py
2
4044
"""Entity tests for mobile_app.""" # pylint: disable=redefined-outer-name,unused-import import logging _LOGGER = logging.getLogger(__name__) async def test_sensor(hass, create_registrations, webhook_client): # noqa: F401, F811, E501 """Test that sensors can be registered and updated.""" webhook_id = create_registrations[1]['webhook_id'] webhook_url = '/api/webhook/{}'.format(webhook_id) reg_resp = await webhook_client.post( webhook_url, json={ 'type': 'register_sensor', 'data': { 'attributes': { 'foo': 'bar' }, 'device_class': 'battery', 'icon': 'mdi:battery', 'name': 'Battery State', 'state': 100, 'type': 'sensor', 'unique_id': 'battery_state', 'unit_of_measurement': '%' } } ) assert reg_resp.status == 201 json = await reg_resp.json() assert json == {'success': True} await hass.async_block_till_done() entity = hass.states.get('sensor.battery_state') assert entity is not None assert entity.attributes['device_class'] == 'battery' assert entity.attributes['icon'] == 'mdi:battery' assert entity.attributes['unit_of_measurement'] == '%' assert entity.attributes['foo'] == 'bar' assert entity.domain == 'sensor' assert entity.name == 'Battery State' assert entity.state == '100' update_resp = await webhook_client.post( webhook_url, json={ 'type': 'update_sensor_states', 'data': [ { 'icon': 'mdi:battery-unknown', 'state': 123, 'type': 'sensor', 'unique_id': 'battery_state' } ] } ) assert update_resp.status == 200 updated_entity = hass.states.get('sensor.battery_state') assert updated_entity.state == '123' async def test_sensor_must_register(hass, create_registrations, # noqa: F401, F811, E501 webhook_client): # noqa: F401, F811, E501 """Test that sensors must be registered before updating.""" webhook_id = create_registrations[1]['webhook_id'] webhook_url = '/api/webhook/{}'.format(webhook_id) resp = await webhook_client.post( webhook_url, json={ 'type': 'update_sensor_states', 'data': [ { 'state': 123, 'type': 'sensor', 'unique_id': 'battery_state' } ] } ) assert resp.status == 200 json = await resp.json() assert json['battery_state']['success'] is False assert json['battery_state']['error']['code'] == 'not_registered' async def test_sensor_id_no_dupes(hass, create_registrations, # noqa: F401, F811, E501 webhook_client): # noqa: F401, F811, E501 """Test that sensors must have a unique ID.""" webhook_id = create_registrations[1]['webhook_id'] webhook_url = '/api/webhook/{}'.format(webhook_id) payload = { 'type': 'register_sensor', 'data': { 'attributes': { 'foo': 'bar' }, 'device_class': 'battery', 'icon': 'mdi:battery', 'name': 'Battery State', 'state': 100, 'type': 'sensor', 'unique_id': 'battery_state', 'unit_of_measurement': '%' } } reg_resp = await webhook_client.post(webhook_url, json=payload) assert reg_resp.status == 201 reg_json = await reg_resp.json() assert reg_json == {'success': True} dupe_resp = await webhook_client.post(webhook_url, json=payload) assert dupe_resp.status == 409 dupe_json = await dupe_resp.json() assert dupe_json['success'] is False assert dupe_json['error']['code'] == 'duplicate_unique_id'
apache-2.0
yodalee/servo
tests/wpt/web-platform-tests/encrypted-media/polyfill/make-polyfill-tests.py
16
1180
#!/usr/bin/python import os, re, os.path, glob head = re.compile( r"^(\s*</head>)", re.MULTILINE ) runtest = re.compile( r"runTest\(\s*(\S.*?)\s*\)", re.DOTALL ) scripts = ''' <!-- Polyfill files (NOTE: These are added by auto-generation script) --> <script src=/encrypted-media/polyfill/chrome-polyfill.js></script> <script src=/encrypted-media/polyfill/firefox-polyfill.js></script> <script src=/encrypted-media/polyfill/edge-polyfill.js></script> <script src=/encrypted-media/polyfill/clearkey-polyfill.js></script>''' def process_file( infile, outfile ) : with open( outfile, "w" ) as output : with open( infile, "r" ) as input : output.write( runtest.sub( r"runTest( \1, 'polyfill: ' )", head.sub( scripts + r"\1", input.read() ) ) ) if __name__ == '__main__' : if (not os.getcwd().endswith('polyfill')) : print "Please run from polyfill directory" exit( 1 ) for infile in glob.glob( "../*.html" ) : process_file( infile, os.path.basename( infile ) ) for infile in glob.glob( "../resources/*.html" ) : process_file( infile, os.path.join( "resources", os.path.basename( infile ) ) )
mpl-2.0
NixaSoftware/CVis
venv/lib/python2.7/site-packages/pandas/tests/test_sorting.py
4
17560
import pytest from itertools import product from collections import defaultdict import warnings from datetime import datetime import numpy as np from numpy import nan import pandas as pd from pandas.core import common as com from pandas import DataFrame, MultiIndex, merge, concat, Series, compat from pandas.util import testing as tm from pandas.util.testing import assert_frame_equal, assert_series_equal from pandas.core.sorting import (is_int64_overflow_possible, decons_group_index, get_group_index, nargsort, lexsort_indexer, safe_sort) class TestSorting(object): @pytest.mark.slow def test_int64_overflow(self): B = np.concatenate((np.arange(1000), np.arange(1000), np.arange(500))) A = np.arange(2500) df = DataFrame({'A': A, 'B': B, 'C': A, 'D': B, 'E': A, 'F': B, 'G': A, 'H': B, 'values': np.random.randn(2500)}) lg = df.groupby(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H']) rg = df.groupby(['H', 'G', 'F', 'E', 'D', 'C', 'B', 'A']) left = lg.sum()['values'] right = rg.sum()['values'] exp_index, _ = left.index.sortlevel() tm.assert_index_equal(left.index, exp_index) exp_index, _ = right.index.sortlevel(0) tm.assert_index_equal(right.index, exp_index) tups = list(map(tuple, df[['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H' ]].values)) tups = com._asarray_tuplesafe(tups) expected = df.groupby(tups).sum()['values'] for k, v in compat.iteritems(expected): assert left[k] == right[k[::-1]] assert left[k] == v assert len(left) == len(right) def test_int64_overflow_moar(self): # GH9096 values = range(55109) data = pd.DataFrame.from_dict({'a': values, 'b': values, 'c': values, 'd': values}) grouped = data.groupby(['a', 'b', 'c', 'd']) assert len(grouped) == len(values) arr = np.random.randint(-1 << 12, 1 << 12, (1 << 15, 5)) i = np.random.choice(len(arr), len(arr) * 4) arr = np.vstack((arr, arr[i])) # add sume duplicate rows i = np.random.permutation(len(arr)) arr = arr[i] # shuffle rows df = DataFrame(arr, columns=list('abcde')) df['jim'], df['joe'] = np.random.randn(2, len(df)) * 10 gr = df.groupby(list('abcde')) # verify this is testing what it is supposed to test! assert is_int64_overflow_possible(gr.grouper.shape) # mannually compute groupings jim, joe = defaultdict(list), defaultdict(list) for key, a, b in zip(map(tuple, arr), df['jim'], df['joe']): jim[key].append(a) joe[key].append(b) assert len(gr) == len(jim) mi = MultiIndex.from_tuples(jim.keys(), names=list('abcde')) def aggr(func): f = lambda a: np.fromiter(map(func, a), dtype='f8') arr = np.vstack((f(jim.values()), f(joe.values()))).T res = DataFrame(arr, columns=['jim', 'joe'], index=mi) return res.sort_index() assert_frame_equal(gr.mean(), aggr(np.mean)) assert_frame_equal(gr.median(), aggr(np.median)) def test_lexsort_indexer(self): keys = [[nan] * 5 + list(range(100)) + [nan] * 5] # orders=True, na_position='last' result = lexsort_indexer(keys, orders=True, na_position='last') exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110)) tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp)) # orders=True, na_position='first' result = lexsort_indexer(keys, orders=True, na_position='first') exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105)) tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp)) # orders=False, na_position='last' result = lexsort_indexer(keys, orders=False, na_position='last') exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)) tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp)) # orders=False, na_position='first' result = lexsort_indexer(keys, orders=False, na_position='first') exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)) tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp)) def test_nargsort(self): # np.argsort(items) places NaNs last items = [nan] * 5 + list(range(100)) + [nan] * 5 # np.argsort(items2) may not place NaNs first items2 = np.array(items, dtype='O') try: # GH 2785; due to a regression in NumPy1.6.2 np.argsort(np.array([[1, 2], [1, 3], [1, 2]], dtype='i')) np.argsort(items2, kind='mergesort') except TypeError: pytest.skip('requested sort not available for type') # mergesort is the most difficult to get right because we want it to be # stable. # According to numpy/core/tests/test_multiarray, """The number of # sorted items must be greater than ~50 to check the actual algorithm # because quick and merge sort fall over to insertion sort for small # arrays.""" # mergesort, ascending=True, na_position='last' result = nargsort(items, kind='mergesort', ascending=True, na_position='last') exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110)) tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False) # mergesort, ascending=True, na_position='first' result = nargsort(items, kind='mergesort', ascending=True, na_position='first') exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105)) tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False) # mergesort, ascending=False, na_position='last' result = nargsort(items, kind='mergesort', ascending=False, na_position='last') exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)) tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False) # mergesort, ascending=False, na_position='first' result = nargsort(items, kind='mergesort', ascending=False, na_position='first') exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)) tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False) # mergesort, ascending=True, na_position='last' result = nargsort(items2, kind='mergesort', ascending=True, na_position='last') exp = list(range(5, 105)) + list(range(5)) + list(range(105, 110)) tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False) # mergesort, ascending=True, na_position='first' result = nargsort(items2, kind='mergesort', ascending=True, na_position='first') exp = list(range(5)) + list(range(105, 110)) + list(range(5, 105)) tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False) # mergesort, ascending=False, na_position='last' result = nargsort(items2, kind='mergesort', ascending=False, na_position='last') exp = list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)) tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False) # mergesort, ascending=False, na_position='first' result = nargsort(items2, kind='mergesort', ascending=False, na_position='first') exp = list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)) tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False) class TestMerge(object): @pytest.mark.slow def test_int64_overflow_issues(self): # #2690, combinatorial explosion df1 = DataFrame(np.random.randn(1000, 7), columns=list('ABCDEF') + ['G1']) df2 = DataFrame(np.random.randn(1000, 7), columns=list('ABCDEF') + ['G2']) # it works! result = merge(df1, df2, how='outer') assert len(result) == 2000 low, high, n = -1 << 10, 1 << 10, 1 << 20 left = DataFrame(np.random.randint(low, high, (n, 7)), columns=list('ABCDEFG')) left['left'] = left.sum(axis=1) # one-2-one match i = np.random.permutation(len(left)) right = left.iloc[i].copy() right.columns = right.columns[:-1].tolist() + ['right'] right.index = np.arange(len(right)) right['right'] *= -1 out = merge(left, right, how='outer') assert len(out) == len(left) assert_series_equal(out['left'], - out['right'], check_names=False) result = out.iloc[:, :-2].sum(axis=1) assert_series_equal(out['left'], result, check_names=False) assert result.name is None out.sort_values(out.columns.tolist(), inplace=True) out.index = np.arange(len(out)) for how in ['left', 'right', 'outer', 'inner']: assert_frame_equal(out, merge(left, right, how=how, sort=True)) # check that left merge w/ sort=False maintains left frame order out = merge(left, right, how='left', sort=False) assert_frame_equal(left, out[left.columns.tolist()]) out = merge(right, left, how='left', sort=False) assert_frame_equal(right, out[right.columns.tolist()]) # one-2-many/none match n = 1 << 11 left = DataFrame(np.random.randint(low, high, (n, 7)).astype('int64'), columns=list('ABCDEFG')) # confirm that this is checking what it is supposed to check shape = left.apply(Series.nunique).values assert is_int64_overflow_possible(shape) # add duplicates to left frame left = concat([left, left], ignore_index=True) right = DataFrame(np.random.randint(low, high, (n // 2, 7)) .astype('int64'), columns=list('ABCDEFG')) # add duplicates & overlap with left to the right frame i = np.random.choice(len(left), n) right = concat([right, right, left.iloc[i]], ignore_index=True) left['left'] = np.random.randn(len(left)) right['right'] = np.random.randn(len(right)) # shuffle left & right frames i = np.random.permutation(len(left)) left = left.iloc[i].copy() left.index = np.arange(len(left)) i = np.random.permutation(len(right)) right = right.iloc[i].copy() right.index = np.arange(len(right)) # manually compute outer merge ldict, rdict = defaultdict(list), defaultdict(list) for idx, row in left.set_index(list('ABCDEFG')).iterrows(): ldict[idx].append(row['left']) for idx, row in right.set_index(list('ABCDEFG')).iterrows(): rdict[idx].append(row['right']) vals = [] for k, lval in ldict.items(): rval = rdict.get(k, [np.nan]) for lv, rv in product(lval, rval): vals.append(k + tuple([lv, rv])) for k, rval in rdict.items(): if k not in ldict: for rv in rval: vals.append(k + tuple([np.nan, rv])) def align(df): df = df.sort_values(df.columns.tolist()) df.index = np.arange(len(df)) return df def verify_order(df): kcols = list('ABCDEFG') assert_frame_equal(df[kcols].copy(), df[kcols].sort_values(kcols, kind='mergesort')) out = DataFrame(vals, columns=list('ABCDEFG') + ['left', 'right']) out = align(out) jmask = {'left': out['left'].notna(), 'right': out['right'].notna(), 'inner': out['left'].notna() & out['right'].notna(), 'outer': np.ones(len(out), dtype='bool')} for how in 'left', 'right', 'outer', 'inner': mask = jmask[how] frame = align(out[mask].copy()) assert mask.all() ^ mask.any() or how == 'outer' for sort in [False, True]: res = merge(left, right, how=how, sort=sort) if sort: verify_order(res) # as in GH9092 dtypes break with outer/right join assert_frame_equal(frame, align(res), check_dtype=how not in ('right', 'outer')) def test_decons(): def testit(label_list, shape): group_index = get_group_index(label_list, shape, sort=True, xnull=True) label_list2 = decons_group_index(group_index, shape) for a, b in zip(label_list, label_list2): assert (np.array_equal(a, b)) shape = (4, 5, 6) label_list = [np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100), np.tile( [0, 2, 4, 3, 0, 1, 2, 3], 100), np.tile( [5, 1, 0, 2, 3, 0, 5, 4], 100)] testit(label_list, shape) shape = (10000, 10000) label_list = [np.tile(np.arange(10000), 5), np.tile(np.arange(10000), 5)] testit(label_list, shape) class TestSafeSort(object): def test_basic_sort(self): values = [3, 1, 2, 0, 4] result = safe_sort(values) expected = np.array([0, 1, 2, 3, 4]) tm.assert_numpy_array_equal(result, expected) values = list("baaacb") result = safe_sort(values) expected = np.array(list("aaabbc"), dtype='object') tm.assert_numpy_array_equal(result, expected) values = [] result = safe_sort(values) expected = np.array([]) tm.assert_numpy_array_equal(result, expected) def test_labels(self): values = [3, 1, 2, 0, 4] expected = np.array([0, 1, 2, 3, 4]) labels = [0, 1, 1, 2, 3, 0, -1, 4] result, result_labels = safe_sort(values, labels) expected_labels = np.array([3, 1, 1, 2, 0, 3, -1, 4], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) tm.assert_numpy_array_equal(result_labels, expected_labels) # na_sentinel labels = [0, 1, 1, 2, 3, 0, 99, 4] result, result_labels = safe_sort(values, labels, na_sentinel=99) expected_labels = np.array([3, 1, 1, 2, 0, 3, 99, 4], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) tm.assert_numpy_array_equal(result_labels, expected_labels) # out of bound indices labels = [0, 101, 102, 2, 3, 0, 99, 4] result, result_labels = safe_sort(values, labels) expected_labels = np.array([3, -1, -1, 2, 0, 3, -1, 4], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) tm.assert_numpy_array_equal(result_labels, expected_labels) labels = [] result, result_labels = safe_sort(values, labels) expected_labels = np.array([], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) tm.assert_numpy_array_equal(result_labels, expected_labels) def test_mixed_integer(self): values = np.array(['b', 1, 0, 'a', 0, 'b'], dtype=object) result = safe_sort(values) expected = np.array([0, 0, 1, 'a', 'b', 'b'], dtype=object) tm.assert_numpy_array_equal(result, expected) values = np.array(['b', 1, 0, 'a'], dtype=object) labels = [0, 1, 2, 3, 0, -1, 1] result, result_labels = safe_sort(values, labels) expected = np.array([0, 1, 'a', 'b'], dtype=object) expected_labels = np.array([3, 1, 0, 2, 3, -1, 1], dtype=np.intp) tm.assert_numpy_array_equal(result, expected) tm.assert_numpy_array_equal(result_labels, expected_labels) def test_mixed_integer_from_list(self): values = ['b', 1, 0, 'a', 0, 'b'] result = safe_sort(values) expected = np.array([0, 0, 1, 'a', 'b', 'b'], dtype=object) tm.assert_numpy_array_equal(result, expected) def test_unsortable(self): # GH 13714 arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object) if compat.PY2 and not pd._np_version_under1p10: # RuntimeWarning: tp_compare didn't return -1 or -2 for exception with warnings.catch_warnings(): pytest.raises(TypeError, safe_sort, arr) else: pytest.raises(TypeError, safe_sort, arr) def test_exceptions(self): with tm.assert_raises_regex(TypeError, "Only list-like objects are allowed"): safe_sort(values=1) with tm.assert_raises_regex(TypeError, "Only list-like objects or None"): safe_sort(values=[0, 1, 2], labels=1) with tm.assert_raises_regex(ValueError, "values should be unique"): safe_sort(values=[0, 1, 2, 1], labels=[0, 1])
apache-2.0
zstyblik/infernal-twin
build/pillow/Tests/test_file_webp_metadata.py
11
3033
from helper import unittest, PillowTestCase from PIL import Image class TestFileWebpMetadata(PillowTestCase): def setUp(self): try: from PIL import _webp except ImportError: self.skipTest('WebP support not installed') return if not _webp.HAVE_WEBPMUX: self.skipTest('WebPMux support not installed') def test_read_exif_metadata(self): file_path = "Tests/images/flower.webp" image = Image.open(file_path) self.assertEqual(image.format, "WEBP") exif_data = image.info.get("exif", None) self.assertTrue(exif_data) exif = image._getexif() # camera make self.assertEqual(exif[271], "Canon") jpeg_image = Image.open('Tests/images/flower.jpg') expected_exif = jpeg_image.info['exif'] self.assertEqual(exif_data, expected_exif) def test_write_exif_metadata(self): from io import BytesIO file_path = "Tests/images/flower.jpg" image = Image.open(file_path) expected_exif = image.info['exif'] test_buffer = BytesIO() image.save(test_buffer, "webp", exif=expected_exif) test_buffer.seek(0) webp_image = Image.open(test_buffer) webp_exif = webp_image.info.get('exif', None) self.assertTrue(webp_exif) if webp_exif: self.assertEqual( webp_exif, expected_exif, "WebP EXIF didn't match") def test_read_icc_profile(self): file_path = "Tests/images/flower2.webp" image = Image.open(file_path) self.assertEqual(image.format, "WEBP") self.assertTrue(image.info.get("icc_profile", None)) icc = image.info['icc_profile'] jpeg_image = Image.open('Tests/images/flower2.jpg') expected_icc = jpeg_image.info['icc_profile'] self.assertEqual(icc, expected_icc) def test_write_icc_metadata(self): from io import BytesIO file_path = "Tests/images/flower2.jpg" image = Image.open(file_path) expected_icc_profile = image.info['icc_profile'] test_buffer = BytesIO() image.save(test_buffer, "webp", icc_profile=expected_icc_profile) test_buffer.seek(0) webp_image = Image.open(test_buffer) webp_icc_profile = webp_image.info.get('icc_profile', None) self.assertTrue(webp_icc_profile) if webp_icc_profile: self.assertEqual( webp_icc_profile, expected_icc_profile, "Webp ICC didn't match") def test_read_no_exif(self): from io import BytesIO file_path = "Tests/images/flower.jpg" image = Image.open(file_path) self.assertTrue('exif' in image.info) test_buffer = BytesIO() image.save(test_buffer, "webp") test_buffer.seek(0) webp_image = Image.open(test_buffer) self.assertFalse(webp_image._getexif()) if __name__ == '__main__': unittest.main() # End of file
gpl-3.0
KhronosGroup/COLLADA-CTS
StandardDataSets/1_5/collada/library_cameras/camera/_reference/_reference_optics_orthographic_zfar_znear/_reference_optics_orthographic_zfar_znear.py
8
3605
# Copyright (c) 2012 The Khronos Group Inc. # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and /or associated documentation files (the "Materials "), to deal in the Materials without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Materials, and to permit persons to whom the Materials are furnished to do so, subject to # the following conditions: # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Materials. # THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. # See Core.Logic.FJudgementContext for the information # of the 'context' parameter. # This sample judging object does the following: # # JudgeBaseline: just verifies that the standard steps did not crash. # JudgeSuperior: also verifies that the validation steps are not in error. # JudgeExemplary: same as intermediate badge. # We import an assistant script that includes the common verifications # methods. The assistant buffers its checks, so that running them again # does not incurs an unnecessary performance hint. from StandardDataSets.scripts import JudgeAssistant # Please feed your node list here: tagLst = [] attrName = '' attrVal = '' dataToCheck = '' class SimpleJudgingObject: def __init__(self, _tagLst, _attrName, _attrVal, _data): self.tagList = _tagLst self.attrName = _attrName self.attrVal = _attrVal self.dataToCheck = _data self.status_baseline = False self.status_superior = False self.status_exemplary = False self.__assistant = JudgeAssistant.JudgeAssistant() # Baseline def JudgeBaseline(self, context): # No step should crash self.__assistant.CheckCrashes(context) # Import/export/validate must exist and pass, while Render must only exist. self.__assistant.CheckSteps(context, ["Import", "Export", "Validate"], ["Render"]) self.status_baseline = self.__assistant.GetResults() return self.status_baseline # Superior def JudgeSuperior(self, context): # if baseline fails, no point in further checking if (self.status_baseline == False): self.status_superior = self.status_baseline return self.status_superior # Compare the rendered images self.__assistant.CompareRenderedImages(context) self.status_superior = self.__assistant.DeferJudgement(context) return self.status_superior # To pass advanced you need to pass intermediate, this object could also include additional # tests that were specific to the advanced badge def JudgeExemplary(self, context): self.status_exemplary = self.status_superior return self.status_exemplary # This is where all the work occurs: "judgingObject" is an absolutely necessary token. # The dynamic loader looks very specifically for a class instance named "judgingObject". # judgingObject = SimpleJudgingObject(tagLst, attrName, attrVal, dataToCheck);
mit
Adel-Magebinary/odoo
addons/mrp_operations/report/mrp_code_barcode.py
381
1511
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.report import report_sxw class code_barcode(report_sxw.rml_parse): def __init__(self, cr, uid, name, context): super(code_barcode, self).__init__(cr, uid, name, context=context) self.localcontext.update({ 'time': time, }) report_sxw.report_sxw('report.mrp.code.barcode', 'mrp_operations.operation.code', 'addons/mrp_operations/report/mrp_code_barcode.rml',parser=code_barcode,header=False) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
cl4rke/scikit-learn
sklearn/cluster/k_means_.py
128
54694
"""K-means clustering""" # Authors: Gael Varoquaux <[email protected]> # Thomas Rueckstiess <[email protected]> # James Bergstra <[email protected]> # Jan Schlueter <[email protected]> # Nelle Varoquaux # Peter Prettenhofer <[email protected]> # Olivier Grisel <[email protected]> # Mathieu Blondel <[email protected]> # Robert Layton <[email protected]> # License: BSD 3 clause import warnings import numpy as np import scipy.sparse as sp from ..base import BaseEstimator, ClusterMixin, TransformerMixin from ..metrics.pairwise import euclidean_distances from ..utils.extmath import row_norms, squared_norm from ..utils.sparsefuncs_fast import assign_rows_csr from ..utils.sparsefuncs import mean_variance_axis from ..utils.fixes import astype from ..utils import check_array from ..utils import check_random_state from ..utils import as_float_array from ..utils import gen_batches from ..utils.validation import check_is_fitted from ..utils.validation import FLOAT_DTYPES from ..utils.random import choice from ..externals.joblib import Parallel from ..externals.joblib import delayed from . import _k_means ############################################################################### # Initialization heuristic def _k_init(X, n_clusters, x_squared_norms, random_state, n_local_trials=None): """Init n_clusters seeds according to k-means++ Parameters ----------- X: array or sparse matrix, shape (n_samples, n_features) The data to pick seeds for. To avoid memory copy, the input data should be double precision (dtype=np.float64). n_clusters: integer The number of seeds to choose x_squared_norms: array, shape (n_samples,) Squared Euclidean norm of each data point. random_state: numpy.RandomState The generator used to initialize the centers. n_local_trials: integer, optional The number of seeding trials for each center (except the first), of which the one reducing inertia the most is greedily chosen. Set to None to make the number of trials depend logarithmically on the number of seeds (2+log(k)); this is the default. Notes ----- Selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. see: Arthur, D. and Vassilvitskii, S. "k-means++: the advantages of careful seeding". ACM-SIAM symposium on Discrete algorithms. 2007 Version ported from http://www.stanford.edu/~darthur/kMeansppTest.zip, which is the implementation used in the aforementioned paper. """ n_samples, n_features = X.shape centers = np.empty((n_clusters, n_features)) assert x_squared_norms is not None, 'x_squared_norms None in _k_init' # Set the number of local seeding trials if none is given if n_local_trials is None: # This is what Arthur/Vassilvitskii tried, but did not report # specific results for other than mentioning in the conclusion # that it helped. n_local_trials = 2 + int(np.log(n_clusters)) # Pick first center randomly center_id = random_state.randint(n_samples) if sp.issparse(X): centers[0] = X[center_id].toarray() else: centers[0] = X[center_id] # Initialize list of closest distances and calculate current potential closest_dist_sq = euclidean_distances( centers[0], X, Y_norm_squared=x_squared_norms, squared=True) current_pot = closest_dist_sq.sum() # Pick the remaining n_clusters-1 points for c in range(1, n_clusters): # Choose center candidates by sampling with probability proportional # to the squared distance to the closest existing center rand_vals = random_state.random_sample(n_local_trials) * current_pot candidate_ids = np.searchsorted(closest_dist_sq.cumsum(), rand_vals) # Compute distances to center candidates distance_to_candidates = euclidean_distances( X[candidate_ids], X, Y_norm_squared=x_squared_norms, squared=True) # Decide which candidate is the best best_candidate = None best_pot = None best_dist_sq = None for trial in range(n_local_trials): # Compute potential when including center candidate new_dist_sq = np.minimum(closest_dist_sq, distance_to_candidates[trial]) new_pot = new_dist_sq.sum() # Store result if it is the best local trial so far if (best_candidate is None) or (new_pot < best_pot): best_candidate = candidate_ids[trial] best_pot = new_pot best_dist_sq = new_dist_sq # Permanently add best center candidate found in local tries if sp.issparse(X): centers[c] = X[best_candidate].toarray() else: centers[c] = X[best_candidate] current_pot = best_pot closest_dist_sq = best_dist_sq return centers ############################################################################### # K-means batch estimation by EM (expectation maximization) def _tolerance(X, tol): """Return a tolerance which is independent of the dataset""" if sp.issparse(X): variances = mean_variance_axis(X, axis=0)[1] else: variances = np.var(X, axis=0) return np.mean(variances) * tol def k_means(X, n_clusters, init='k-means++', precompute_distances='auto', n_init=10, max_iter=300, verbose=False, tol=1e-4, random_state=None, copy_x=True, n_jobs=1, return_n_iter=False): """K-means clustering algorithm. Read more in the :ref:`User Guide <k_means>`. Parameters ---------- X : array-like or sparse matrix, shape (n_samples, n_features) The observations to cluster. n_clusters : int The number of clusters to form as well as the number of centroids to generate. max_iter : int, optional, default 300 Maximum number of iterations of the k-means algorithm to run. n_init : int, optional, default: 10 Number of time the k-means algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of inertia. init : {'k-means++', 'random', or ndarray, or a callable}, optional Method for initialization, default to 'k-means++': 'k-means++' : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. See section Notes in k_init for more details. 'random': generate k centroids from a Gaussian with mean and variance estimated from the data. If an ndarray is passed, it should be of shape (n_clusters, n_features) and gives the initial centers. If a callable is passed, it should take arguments X, k and and a random state and return an initialization. precompute_distances : {'auto', True, False} Precompute distances (faster but takes more memory). 'auto' : do not precompute distances if n_samples * n_clusters > 12 million. This corresponds to about 100MB overhead per job using double precision. True : always precompute distances False : never precompute distances tol : float, optional The relative increment in the results before declaring convergence. verbose : boolean, optional Verbosity mode. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. copy_x : boolean, optional When pre-computing distances it is more numerically accurate to center the data first. If copy_x is True, then the original data is not modified. If False, the original data is modified, and put back before the function returns, but small numerical differences may be introduced by subtracting and then adding the data mean. n_jobs : int The number of jobs to use for the computation. This works by computing each of the n_init runs in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. return_n_iter : bool, optional Whether or not to return the number of iterations. Returns ------- centroid : float ndarray with shape (k, n_features) Centroids found at the last iteration of k-means. label : integer ndarray with shape (n_samples,) label[i] is the code or index of the centroid the i'th observation is closest to. inertia : float The final value of the inertia criterion (sum of squared distances to the closest centroid for all observations in the training set). best_n_iter: int Number of iterations corresponding to the best results. Returned only if `return_n_iter` is set to True. """ if n_init <= 0: raise ValueError("Invalid number of initializations." " n_init=%d must be bigger than zero." % n_init) random_state = check_random_state(random_state) best_inertia = np.infty X = as_float_array(X, copy=copy_x) tol = _tolerance(X, tol) # If the distances are precomputed every job will create a matrix of shape # (n_clusters, n_samples). To stop KMeans from eating up memory we only # activate this if the created matrix is guaranteed to be under 100MB. 12 # million entries consume a little under 100MB if they are of type double. if precompute_distances == 'auto': n_samples = X.shape[0] precompute_distances = (n_clusters * n_samples) < 12e6 elif isinstance(precompute_distances, bool): pass else: raise ValueError("precompute_distances should be 'auto' or True/False" ", but a value of %r was passed" % precompute_distances) # subtract of mean of x for more accurate distance computations if not sp.issparse(X) or hasattr(init, '__array__'): X_mean = X.mean(axis=0) if not sp.issparse(X): # The copy was already done above X -= X_mean if hasattr(init, '__array__'): init = np.asarray(init).copy() init -= X_mean if n_init != 1: warnings.warn( 'Explicit initial center position passed: ' 'performing only one init in k-means instead of n_init=%d' % n_init, RuntimeWarning, stacklevel=2) n_init = 1 # precompute squared norms of data points x_squared_norms = row_norms(X, squared=True) best_labels, best_inertia, best_centers = None, None, None if n_jobs == 1: # For a single thread, less memory is needed if we just store one set # of the best results (as opposed to one set per run per thread). for it in range(n_init): # run a k-means once labels, inertia, centers, n_iter_ = _kmeans_single( X, n_clusters, max_iter=max_iter, init=init, verbose=verbose, precompute_distances=precompute_distances, tol=tol, x_squared_norms=x_squared_norms, random_state=random_state) # determine if these results are the best so far if best_inertia is None or inertia < best_inertia: best_labels = labels.copy() best_centers = centers.copy() best_inertia = inertia best_n_iter = n_iter_ else: # parallelisation of k-means runs seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init) results = Parallel(n_jobs=n_jobs, verbose=0)( delayed(_kmeans_single)(X, n_clusters, max_iter=max_iter, init=init, verbose=verbose, tol=tol, precompute_distances=precompute_distances, x_squared_norms=x_squared_norms, # Change seed to ensure variety random_state=seed) for seed in seeds) # Get results with the lowest inertia labels, inertia, centers, n_iters = zip(*results) best = np.argmin(inertia) best_labels = labels[best] best_inertia = inertia[best] best_centers = centers[best] best_n_iter = n_iters[best] if not sp.issparse(X): if not copy_x: X += X_mean best_centers += X_mean if return_n_iter: return best_centers, best_labels, best_inertia, best_n_iter else: return best_centers, best_labels, best_inertia def _kmeans_single(X, n_clusters, x_squared_norms, max_iter=300, init='k-means++', verbose=False, random_state=None, tol=1e-4, precompute_distances=True): """A single run of k-means, assumes preparation completed prior. Parameters ---------- X: array-like of floats, shape (n_samples, n_features) The observations to cluster. n_clusters: int The number of clusters to form as well as the number of centroids to generate. max_iter: int, optional, default 300 Maximum number of iterations of the k-means algorithm to run. init: {'k-means++', 'random', or ndarray, or a callable}, optional Method for initialization, default to 'k-means++': 'k-means++' : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. See section Notes in k_init for more details. 'random': generate k centroids from a Gaussian with mean and variance estimated from the data. If an ndarray is passed, it should be of shape (k, p) and gives the initial centers. If a callable is passed, it should take arguments X, k and and a random state and return an initialization. tol: float, optional The relative increment in the results before declaring convergence. verbose: boolean, optional Verbosity mode x_squared_norms: array Precomputed x_squared_norms. precompute_distances : boolean, default: True Precompute distances (faster but takes more memory). random_state: integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. Returns ------- centroid: float ndarray with shape (k, n_features) Centroids found at the last iteration of k-means. label: integer ndarray with shape (n_samples,) label[i] is the code or index of the centroid the i'th observation is closest to. inertia: float The final value of the inertia criterion (sum of squared distances to the closest centroid for all observations in the training set). n_iter : int Number of iterations run. """ random_state = check_random_state(random_state) best_labels, best_inertia, best_centers = None, None, None # init centers = _init_centroids(X, n_clusters, init, random_state=random_state, x_squared_norms=x_squared_norms) if verbose: print("Initialization complete") # Allocate memory to store the distances for each sample to its # closer center for reallocation in case of ties distances = np.zeros(shape=(X.shape[0],), dtype=np.float64) # iterations for i in range(max_iter): centers_old = centers.copy() # labels assignment is also called the E-step of EM labels, inertia = \ _labels_inertia(X, x_squared_norms, centers, precompute_distances=precompute_distances, distances=distances) # computation of the means is also called the M-step of EM if sp.issparse(X): centers = _k_means._centers_sparse(X, labels, n_clusters, distances) else: centers = _k_means._centers_dense(X, labels, n_clusters, distances) if verbose: print("Iteration %2d, inertia %.3f" % (i, inertia)) if best_inertia is None or inertia < best_inertia: best_labels = labels.copy() best_centers = centers.copy() best_inertia = inertia if squared_norm(centers_old - centers) <= tol: if verbose: print("Converged at iteration %d" % i) break return best_labels, best_inertia, best_centers, i + 1 def _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances): """Compute labels and inertia using a full distance matrix. This will overwrite the 'distances' array in-place. Parameters ---------- X : numpy array, shape (n_sample, n_features) Input data. x_squared_norms : numpy array, shape (n_samples,) Precomputed squared norms of X. centers : numpy array, shape (n_clusters, n_features) Cluster centers which data is assigned to. distances : numpy array, shape (n_samples,) Pre-allocated array in which distances are stored. Returns ------- labels : numpy array, dtype=np.int, shape (n_samples,) Indices of clusters that samples are assigned to. inertia : float Sum of distances of samples to their closest cluster center. """ n_samples = X.shape[0] k = centers.shape[0] all_distances = euclidean_distances(centers, X, x_squared_norms, squared=True) labels = np.empty(n_samples, dtype=np.int32) labels.fill(-1) mindist = np.empty(n_samples) mindist.fill(np.infty) for center_id in range(k): dist = all_distances[center_id] labels[dist < mindist] = center_id mindist = np.minimum(dist, mindist) if n_samples == distances.shape[0]: # distances will be changed in-place distances[:] = mindist inertia = mindist.sum() return labels, inertia def _labels_inertia(X, x_squared_norms, centers, precompute_distances=True, distances=None): """E step of the K-means EM algorithm. Compute the labels and the inertia of the given samples and centers. This will compute the distances in-place. Parameters ---------- X: float64 array-like or CSR sparse matrix, shape (n_samples, n_features) The input samples to assign to the labels. x_squared_norms: array, shape (n_samples,) Precomputed squared euclidean norm of each data point, to speed up computations. centers: float64 array, shape (k, n_features) The cluster centers. precompute_distances : boolean, default: True Precompute distances (faster but takes more memory). distances: float64 array, shape (n_samples,) Pre-allocated array to be filled in with each sample's distance to the closest center. Returns ------- labels: int array of shape(n) The resulting assignment inertia : float Sum of distances of samples to their closest cluster center. """ n_samples = X.shape[0] # set the default value of centers to -1 to be able to detect any anomaly # easily labels = -np.ones(n_samples, np.int32) if distances is None: distances = np.zeros(shape=(0,), dtype=np.float64) # distances will be changed in-place if sp.issparse(X): inertia = _k_means._assign_labels_csr( X, x_squared_norms, centers, labels, distances=distances) else: if precompute_distances: return _labels_inertia_precompute_dense(X, x_squared_norms, centers, distances) inertia = _k_means._assign_labels_array( X, x_squared_norms, centers, labels, distances=distances) return labels, inertia def _init_centroids(X, k, init, random_state=None, x_squared_norms=None, init_size=None): """Compute the initial centroids Parameters ---------- X: array, shape (n_samples, n_features) k: int number of centroids init: {'k-means++', 'random' or ndarray or callable} optional Method for initialization random_state: integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. x_squared_norms: array, shape (n_samples,), optional Squared euclidean norm of each data point. Pass it if you have it at hands already to avoid it being recomputed here. Default: None init_size : int, optional Number of samples to randomly sample for speeding up the initialization (sometimes at the expense of accuracy): the only algorithm is initialized by running a batch KMeans on a random subset of the data. This needs to be larger than k. Returns ------- centers: array, shape(k, n_features) """ random_state = check_random_state(random_state) n_samples = X.shape[0] if init_size is not None and init_size < n_samples: if init_size < k: warnings.warn( "init_size=%d should be larger than k=%d. " "Setting it to 3*k" % (init_size, k), RuntimeWarning, stacklevel=2) init_size = 3 * k init_indices = random_state.random_integers( 0, n_samples - 1, init_size) X = X[init_indices] x_squared_norms = x_squared_norms[init_indices] n_samples = X.shape[0] elif n_samples < k: raise ValueError( "n_samples=%d should be larger than k=%d" % (n_samples, k)) if init == 'k-means++': centers = _k_init(X, k, random_state=random_state, x_squared_norms=x_squared_norms) elif init == 'random': seeds = random_state.permutation(n_samples)[:k] centers = X[seeds] elif hasattr(init, '__array__'): centers = init elif callable(init): centers = init(X, k, random_state=random_state) else: raise ValueError("the init parameter for the k-means should " "be 'k-means++' or 'random' or an ndarray, " "'%s' (type '%s') was passed." % (init, type(init))) if sp.issparse(centers): centers = centers.toarray() if len(centers) != k: raise ValueError('The shape of the initial centers (%s) ' 'does not match the number of clusters %i' % (centers.shape, k)) return centers class KMeans(BaseEstimator, ClusterMixin, TransformerMixin): """K-Means clustering Read more in the :ref:`User Guide <k_means>`. Parameters ---------- n_clusters : int, optional, default: 8 The number of clusters to form as well as the number of centroids to generate. max_iter : int, default: 300 Maximum number of iterations of the k-means algorithm for a single run. n_init : int, default: 10 Number of time the k-means algorithm will be run with different centroid seeds. The final results will be the best output of n_init consecutive runs in terms of inertia. init : {'k-means++', 'random' or an ndarray} Method for initialization, defaults to 'k-means++': 'k-means++' : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. See section Notes in k_init for more details. 'random': choose k observations (rows) at random from data for the initial centroids. If an ndarray is passed, it should be of shape (n_clusters, n_features) and gives the initial centers. precompute_distances : {'auto', True, False} Precompute distances (faster but takes more memory). 'auto' : do not precompute distances if n_samples * n_clusters > 12 million. This corresponds to about 100MB overhead per job using double precision. True : always precompute distances False : never precompute distances tol : float, default: 1e-4 Relative tolerance with regards to inertia to declare convergence n_jobs : int The number of jobs to use for the computation. This works by computing each of the n_init runs in parallel. If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. verbose : int, default 0 Verbosity mode. copy_x : boolean, default True When pre-computing distances it is more numerically accurate to center the data first. If copy_x is True, then the original data is not modified. If False, the original data is modified, and put back before the function returns, but small numerical differences may be introduced by subtracting and then adding the data mean. Attributes ---------- cluster_centers_ : array, [n_clusters, n_features] Coordinates of cluster centers labels_ : Labels of each point inertia_ : float Sum of distances of samples to their closest cluster center. Notes ------ The k-means problem is solved using Lloyd's algorithm. The average complexity is given by O(k n T), were n is the number of samples and T is the number of iteration. The worst case complexity is given by O(n^(k+2/p)) with n = n_samples, p = n_features. (D. Arthur and S. Vassilvitskii, 'How slow is the k-means method?' SoCG2006) In practice, the k-means algorithm is very fast (one of the fastest clustering algorithms available), but it falls in local minima. That's why it can be useful to restart it several times. See also -------- MiniBatchKMeans: Alternative online implementation that does incremental updates of the centers positions using mini-batches. For large scale learning (say n_samples > 10k) MiniBatchKMeans is probably much faster to than the default batch implementation. """ def __init__(self, n_clusters=8, init='k-means++', n_init=10, max_iter=300, tol=1e-4, precompute_distances='auto', verbose=0, random_state=None, copy_x=True, n_jobs=1): if hasattr(init, '__array__'): n_clusters = init.shape[0] init = np.asarray(init, dtype=np.float64) self.n_clusters = n_clusters self.init = init self.max_iter = max_iter self.tol = tol self.precompute_distances = precompute_distances self.n_init = n_init self.verbose = verbose self.random_state = random_state self.copy_x = copy_x self.n_jobs = n_jobs def _check_fit_data(self, X): """Verify that the number of samples given is larger than k""" X = check_array(X, accept_sparse='csr', dtype=np.float64) if X.shape[0] < self.n_clusters: raise ValueError("n_samples=%d should be >= n_clusters=%d" % ( X.shape[0], self.n_clusters)) return X def _check_test_data(self, X): X = check_array(X, accept_sparse='csr', dtype=FLOAT_DTYPES, warn_on_dtype=True) n_samples, n_features = X.shape expected_n_features = self.cluster_centers_.shape[1] if not n_features == expected_n_features: raise ValueError("Incorrect number of features. " "Got %d features, expected %d" % ( n_features, expected_n_features)) return X def fit(self, X, y=None): """Compute k-means clustering. Parameters ---------- X : array-like or sparse matrix, shape=(n_samples, n_features) """ random_state = check_random_state(self.random_state) X = self._check_fit_data(X) self.cluster_centers_, self.labels_, self.inertia_, self.n_iter_ = \ k_means( X, n_clusters=self.n_clusters, init=self.init, n_init=self.n_init, max_iter=self.max_iter, verbose=self.verbose, return_n_iter=True, precompute_distances=self.precompute_distances, tol=self.tol, random_state=random_state, copy_x=self.copy_x, n_jobs=self.n_jobs) return self def fit_predict(self, X, y=None): """Compute cluster centers and predict cluster index for each sample. Convenience method; equivalent to calling fit(X) followed by predict(X). """ return self.fit(X).labels_ def fit_transform(self, X, y=None): """Compute clustering and transform X to cluster-distance space. Equivalent to fit(X).transform(X), but more efficiently implemented. """ # Currently, this just skips a copy of the data if it is not in # np.array or CSR format already. # XXX This skips _check_test_data, which may change the dtype; # we should refactor the input validation. X = self._check_fit_data(X) return self.fit(X)._transform(X) def transform(self, X, y=None): """Transform X to a cluster-distance space. In the new space, each dimension is the distance to the cluster centers. Note that even if X is sparse, the array returned by `transform` will typically be dense. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] New data to transform. Returns ------- X_new : array, shape [n_samples, k] X transformed in the new space. """ check_is_fitted(self, 'cluster_centers_') X = self._check_test_data(X) return self._transform(X) def _transform(self, X): """guts of transform method; no input validation""" return euclidean_distances(X, self.cluster_centers_) def predict(self, X): """Predict the closest cluster each sample in X belongs to. In the vector quantization literature, `cluster_centers_` is called the code book and each value returned by `predict` is the index of the closest code in the code book. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] New data to predict. Returns ------- labels : array, shape [n_samples,] Index of the cluster each sample belongs to. """ check_is_fitted(self, 'cluster_centers_') X = self._check_test_data(X) x_squared_norms = row_norms(X, squared=True) return _labels_inertia(X, x_squared_norms, self.cluster_centers_)[0] def score(self, X, y=None): """Opposite of the value of X on the K-means objective. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] New data. Returns ------- score : float Opposite of the value of X on the K-means objective. """ check_is_fitted(self, 'cluster_centers_') X = self._check_test_data(X) x_squared_norms = row_norms(X, squared=True) return -_labels_inertia(X, x_squared_norms, self.cluster_centers_)[1] def _mini_batch_step(X, x_squared_norms, centers, counts, old_center_buffer, compute_squared_diff, distances, random_reassign=False, random_state=None, reassignment_ratio=.01, verbose=False): """Incremental update of the centers for the Minibatch K-Means algorithm. Parameters ---------- X : array, shape (n_samples, n_features) The original data array. x_squared_norms : array, shape (n_samples,) Squared euclidean norm of each data point. centers : array, shape (k, n_features) The cluster centers. This array is MODIFIED IN PLACE counts : array, shape (k,) The vector in which we keep track of the numbers of elements in a cluster. This array is MODIFIED IN PLACE distances : array, dtype float64, shape (n_samples), optional If not None, should be a pre-allocated array that will be used to store the distances of each sample to its closest center. May not be None when random_reassign is True. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. random_reassign : boolean, optional If True, centers with very low counts are randomly reassigned to observations. reassignment_ratio : float, optional Control the fraction of the maximum number of counts for a center to be reassigned. A higher value means that low count centers are more likely to be reassigned, which means that the model will take longer to converge, but should converge in a better clustering. verbose : bool, optional, default False Controls the verbosity. compute_squared_diff : bool If set to False, the squared diff computation is skipped. old_center_buffer : int Copy of old centers for monitoring convergence. Returns ------- inertia : float Sum of distances of samples to their closest cluster center. squared_diff : numpy array, shape (n_clusters,) Squared distances between previous and updated cluster centers. """ # Perform label assignment to nearest centers nearest_center, inertia = _labels_inertia(X, x_squared_norms, centers, distances=distances) if random_reassign and reassignment_ratio > 0: random_state = check_random_state(random_state) # Reassign clusters that have very low counts to_reassign = counts < reassignment_ratio * counts.max() # pick at most .5 * batch_size samples as new centers if to_reassign.sum() > .5 * X.shape[0]: indices_dont_reassign = np.argsort(counts)[int(.5 * X.shape[0]):] to_reassign[indices_dont_reassign] = False n_reassigns = to_reassign.sum() if n_reassigns: # Pick new clusters amongst observations with uniform probability new_centers = choice(X.shape[0], replace=False, size=n_reassigns, random_state=random_state) if verbose: print("[MiniBatchKMeans] Reassigning %i cluster centers." % n_reassigns) if sp.issparse(X) and not sp.issparse(centers): assign_rows_csr(X, astype(new_centers, np.intp), astype(np.where(to_reassign)[0], np.intp), centers) else: centers[to_reassign] = X[new_centers] # reset counts of reassigned centers, but don't reset them too small # to avoid instant reassignment. This is a pretty dirty hack as it # also modifies the learning rates. counts[to_reassign] = np.min(counts[~to_reassign]) # implementation for the sparse CSR representation completely written in # cython if sp.issparse(X): return inertia, _k_means._mini_batch_update_csr( X, x_squared_norms, centers, counts, nearest_center, old_center_buffer, compute_squared_diff) # dense variant in mostly numpy (not as memory efficient though) k = centers.shape[0] squared_diff = 0.0 for center_idx in range(k): # find points from minibatch that are assigned to this center center_mask = nearest_center == center_idx count = center_mask.sum() if count > 0: if compute_squared_diff: old_center_buffer[:] = centers[center_idx] # inplace remove previous count scaling centers[center_idx] *= counts[center_idx] # inplace sum with new points members of this cluster centers[center_idx] += np.sum(X[center_mask], axis=0) # update the count statistics for this center counts[center_idx] += count # inplace rescale to compute mean of all points (old and new) centers[center_idx] /= counts[center_idx] # update the squared diff if necessary if compute_squared_diff: diff = centers[center_idx].ravel() - old_center_buffer.ravel() squared_diff += np.dot(diff, diff) return inertia, squared_diff def _mini_batch_convergence(model, iteration_idx, n_iter, tol, n_samples, centers_squared_diff, batch_inertia, context, verbose=0): """Helper function to encapsulte the early stopping logic""" # Normalize inertia to be able to compare values when # batch_size changes batch_inertia /= model.batch_size centers_squared_diff /= model.batch_size # Compute an Exponentially Weighted Average of the squared # diff to monitor the convergence while discarding # minibatch-local stochastic variability: # https://en.wikipedia.org/wiki/Moving_average ewa_diff = context.get('ewa_diff') ewa_inertia = context.get('ewa_inertia') if ewa_diff is None: ewa_diff = centers_squared_diff ewa_inertia = batch_inertia else: alpha = float(model.batch_size) * 2.0 / (n_samples + 1) alpha = 1.0 if alpha > 1.0 else alpha ewa_diff = ewa_diff * (1 - alpha) + centers_squared_diff * alpha ewa_inertia = ewa_inertia * (1 - alpha) + batch_inertia * alpha # Log progress to be able to monitor convergence if verbose: progress_msg = ( 'Minibatch iteration %d/%d:' ' mean batch inertia: %f, ewa inertia: %f ' % ( iteration_idx + 1, n_iter, batch_inertia, ewa_inertia)) print(progress_msg) # Early stopping based on absolute tolerance on squared change of # centers position (using EWA smoothing) if tol > 0.0 and ewa_diff <= tol: if verbose: print('Converged (small centers change) at iteration %d/%d' % (iteration_idx + 1, n_iter)) return True # Early stopping heuristic due to lack of improvement on smoothed inertia ewa_inertia_min = context.get('ewa_inertia_min') no_improvement = context.get('no_improvement', 0) if ewa_inertia_min is None or ewa_inertia < ewa_inertia_min: no_improvement = 0 ewa_inertia_min = ewa_inertia else: no_improvement += 1 if (model.max_no_improvement is not None and no_improvement >= model.max_no_improvement): if verbose: print('Converged (lack of improvement in inertia)' ' at iteration %d/%d' % (iteration_idx + 1, n_iter)) return True # update the convergence context to maintain state across successive calls: context['ewa_diff'] = ewa_diff context['ewa_inertia'] = ewa_inertia context['ewa_inertia_min'] = ewa_inertia_min context['no_improvement'] = no_improvement return False class MiniBatchKMeans(KMeans): """Mini-Batch K-Means clustering Parameters ---------- n_clusters : int, optional, default: 8 The number of clusters to form as well as the number of centroids to generate. max_iter : int, optional Maximum number of iterations over the complete dataset before stopping independently of any early stopping criterion heuristics. max_no_improvement : int, default: 10 Control early stopping based on the consecutive number of mini batches that does not yield an improvement on the smoothed inertia. To disable convergence detection based on inertia, set max_no_improvement to None. tol : float, default: 0.0 Control early stopping based on the relative center changes as measured by a smoothed, variance-normalized of the mean center squared position changes. This early stopping heuristics is closer to the one used for the batch variant of the algorithms but induces a slight computational and memory overhead over the inertia heuristic. To disable convergence detection based on normalized center change, set tol to 0.0 (default). batch_size : int, optional, default: 100 Size of the mini batches. init_size : int, optional, default: 3 * batch_size Number of samples to randomly sample for speeding up the initialization (sometimes at the expense of accuracy): the only algorithm is initialized by running a batch KMeans on a random subset of the data. This needs to be larger than n_clusters. init : {'k-means++', 'random' or an ndarray}, default: 'k-means++' Method for initialization, defaults to 'k-means++': 'k-means++' : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. See section Notes in k_init for more details. 'random': choose k observations (rows) at random from data for the initial centroids. If an ndarray is passed, it should be of shape (n_clusters, n_features) and gives the initial centers. n_init : int, default=3 Number of random initializations that are tried. In contrast to KMeans, the algorithm is only run once, using the best of the ``n_init`` initializations as measured by inertia. compute_labels : boolean, default=True Compute label assignment and inertia for the complete dataset once the minibatch optimization has converged in fit. random_state : integer or numpy.RandomState, optional The generator used to initialize the centers. If an integer is given, it fixes the seed. Defaults to the global numpy random number generator. reassignment_ratio : float, default: 0.01 Control the fraction of the maximum number of counts for a center to be reassigned. A higher value means that low count centers are more easily reassigned, which means that the model will take longer to converge, but should converge in a better clustering. verbose : boolean, optional Verbosity mode. Attributes ---------- cluster_centers_ : array, [n_clusters, n_features] Coordinates of cluster centers labels_ : Labels of each point (if compute_labels is set to True). inertia_ : float The value of the inertia criterion associated with the chosen partition (if compute_labels is set to True). The inertia is defined as the sum of square distances of samples to their nearest neighbor. Notes ----- See http://www.eecs.tufts.edu/~dsculley/papers/fastkmeans.pdf """ def __init__(self, n_clusters=8, init='k-means++', max_iter=100, batch_size=100, verbose=0, compute_labels=True, random_state=None, tol=0.0, max_no_improvement=10, init_size=None, n_init=3, reassignment_ratio=0.01): super(MiniBatchKMeans, self).__init__( n_clusters=n_clusters, init=init, max_iter=max_iter, verbose=verbose, random_state=random_state, tol=tol, n_init=n_init) self.max_no_improvement = max_no_improvement self.batch_size = batch_size self.compute_labels = compute_labels self.init_size = init_size self.reassignment_ratio = reassignment_ratio def fit(self, X, y=None): """Compute the centroids on X by chunking it into mini-batches. Parameters ---------- X : array-like, shape = [n_samples, n_features] Coordinates of the data points to cluster """ random_state = check_random_state(self.random_state) X = check_array(X, accept_sparse="csr", order='C', dtype=np.float64) n_samples, n_features = X.shape if n_samples < self.n_clusters: raise ValueError("Number of samples smaller than number " "of clusters.") n_init = self.n_init if hasattr(self.init, '__array__'): self.init = np.ascontiguousarray(self.init, dtype=np.float64) if n_init != 1: warnings.warn( 'Explicit initial center position passed: ' 'performing only one init in MiniBatchKMeans instead of ' 'n_init=%d' % self.n_init, RuntimeWarning, stacklevel=2) n_init = 1 x_squared_norms = row_norms(X, squared=True) if self.tol > 0.0: tol = _tolerance(X, self.tol) # using tol-based early stopping needs the allocation of a # dedicated before which can be expensive for high dim data: # hence we allocate it outside of the main loop old_center_buffer = np.zeros(n_features, np.double) else: tol = 0.0 # no need for the center buffer if tol-based early stopping is # disabled old_center_buffer = np.zeros(0, np.double) distances = np.zeros(self.batch_size, dtype=np.float64) n_batches = int(np.ceil(float(n_samples) / self.batch_size)) n_iter = int(self.max_iter * n_batches) init_size = self.init_size if init_size is None: init_size = 3 * self.batch_size if init_size > n_samples: init_size = n_samples self.init_size_ = init_size validation_indices = random_state.random_integers( 0, n_samples - 1, init_size) X_valid = X[validation_indices] x_squared_norms_valid = x_squared_norms[validation_indices] # perform several inits with random sub-sets best_inertia = None for init_idx in range(n_init): if self.verbose: print("Init %d/%d with method: %s" % (init_idx + 1, n_init, self.init)) counts = np.zeros(self.n_clusters, dtype=np.int32) # TODO: once the `k_means` function works with sparse input we # should refactor the following init to use it instead. # Initialize the centers using only a fraction of the data as we # expect n_samples to be very large when using MiniBatchKMeans cluster_centers = _init_centroids( X, self.n_clusters, self.init, random_state=random_state, x_squared_norms=x_squared_norms, init_size=init_size) # Compute the label assignment on the init dataset batch_inertia, centers_squared_diff = _mini_batch_step( X_valid, x_squared_norms[validation_indices], cluster_centers, counts, old_center_buffer, False, distances=None, verbose=self.verbose) # Keep only the best cluster centers across independent inits on # the common validation set _, inertia = _labels_inertia(X_valid, x_squared_norms_valid, cluster_centers) if self.verbose: print("Inertia for init %d/%d: %f" % (init_idx + 1, n_init, inertia)) if best_inertia is None or inertia < best_inertia: self.cluster_centers_ = cluster_centers self.counts_ = counts best_inertia = inertia # Empty context to be used inplace by the convergence check routine convergence_context = {} # Perform the iterative optimization until the final convergence # criterion for iteration_idx in range(n_iter): # Sample a minibatch from the full dataset minibatch_indices = random_state.random_integers( 0, n_samples - 1, self.batch_size) # Perform the actual update step on the minibatch data batch_inertia, centers_squared_diff = _mini_batch_step( X[minibatch_indices], x_squared_norms[minibatch_indices], self.cluster_centers_, self.counts_, old_center_buffer, tol > 0.0, distances=distances, # Here we randomly choose whether to perform # random reassignment: the choice is done as a function # of the iteration index, and the minimum number of # counts, in order to force this reassignment to happen # every once in a while random_reassign=((iteration_idx + 1) % (10 + self.counts_.min()) == 0), random_state=random_state, reassignment_ratio=self.reassignment_ratio, verbose=self.verbose) # Monitor convergence and do early stopping if necessary if _mini_batch_convergence( self, iteration_idx, n_iter, tol, n_samples, centers_squared_diff, batch_inertia, convergence_context, verbose=self.verbose): break self.n_iter_ = iteration_idx + 1 if self.compute_labels: self.labels_, self.inertia_ = self._labels_inertia_minibatch(X) return self def _labels_inertia_minibatch(self, X): """Compute labels and inertia using mini batches. This is slightly slower than doing everything at once but preventes memory errors / segfaults. Parameters ---------- X : array-like, shape (n_samples, n_features) Input data. Returns ------- labels : array, shap (n_samples,) Cluster labels for each point. inertia : float Sum of squared distances of points to nearest cluster. """ if self.verbose: print('Computing label assignment and total inertia') x_squared_norms = row_norms(X, squared=True) slices = gen_batches(X.shape[0], self.batch_size) results = [_labels_inertia(X[s], x_squared_norms[s], self.cluster_centers_) for s in slices] labels, inertia = zip(*results) return np.hstack(labels), np.sum(inertia) def partial_fit(self, X, y=None): """Update k means estimate on a single mini-batch X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Coordinates of the data points to cluster. """ X = check_array(X, accept_sparse="csr") n_samples, n_features = X.shape if hasattr(self.init, '__array__'): self.init = np.ascontiguousarray(self.init, dtype=np.float64) if n_samples == 0: return self x_squared_norms = row_norms(X, squared=True) self.random_state_ = getattr(self, "random_state_", check_random_state(self.random_state)) if (not hasattr(self, 'counts_') or not hasattr(self, 'cluster_centers_')): # this is the first call partial_fit on this object: # initialize the cluster centers self.cluster_centers_ = _init_centroids( X, self.n_clusters, self.init, random_state=self.random_state_, x_squared_norms=x_squared_norms, init_size=self.init_size) self.counts_ = np.zeros(self.n_clusters, dtype=np.int32) random_reassign = False distances = None else: # The lower the minimum count is, the more we do random # reassignment, however, we don't want to do random # reassignment too often, to allow for building up counts random_reassign = self.random_state_.randint( 10 * (1 + self.counts_.min())) == 0 distances = np.zeros(X.shape[0], dtype=np.float64) _mini_batch_step(X, x_squared_norms, self.cluster_centers_, self.counts_, np.zeros(0, np.double), 0, random_reassign=random_reassign, distances=distances, random_state=self.random_state_, reassignment_ratio=self.reassignment_ratio, verbose=self.verbose) if self.compute_labels: self.labels_, self.inertia_ = _labels_inertia( X, x_squared_norms, self.cluster_centers_) return self def predict(self, X): """Predict the closest cluster each sample in X belongs to. In the vector quantization literature, `cluster_centers_` is called the code book and each value returned by `predict` is the index of the closest code in the code book. Parameters ---------- X : {array-like, sparse matrix}, shape = [n_samples, n_features] New data to predict. Returns ------- labels : array, shape [n_samples,] Index of the cluster each sample belongs to. """ check_is_fitted(self, 'cluster_centers_') X = self._check_test_data(X) return self._labels_inertia_minibatch(X)[0]
bsd-3-clause
hidat/audio_pipeline
audio_pipeline/test/AudioFileTest.py
1
4157
import os import unittest import mutagen from . import TestUtil from .. import util test_files = "audio_pipeline\\test\\test_files\\audio\\tag_test_files" class TestAudioFileTags(TestUtil.TestUtilMixin): def test_artist_name(self): tag = self.format.album_artist(self.meta) self.check_af_tag(tag, self.af.album_artist) def test_mbid(self): tag = self.format.mbid(self.meta) self.check_af_tag(tag, self.af.mbid) def test_album(self): tag = self.format.album(self.meta) self.check_af_tag(tag, self.af.album) def test_release_date(self): tag = self.format.release_date(self.meta) self.check_af_tag(tag, self.af.release_date) def test_title(self): tag = self.format.title(self.meta) self.check_af_tag(tag, self.af.title) def test_artist(self): tag = self.format.artist(self.meta) self.check_af_tag(tag, self.af.artist) def test_disc_num(self): tag = self.format.disc_num(self.meta) self.check_af_tag(tag, self.af.disc_num) def test_track_num(self): tag = self.format.track_num(self.meta) self.check_af_tag(tag, self.af.track_num) def test_length(self): tag = self.format.length(self.meta) self.check_af_tag(tag, self.af.length) def test_custom_release(self): for tag_name in self.af.custom_release_tags.keys(): tag = self.format.custom_tag(tag_name, self.meta) self.check_af_tag(tag, self.af.custom_release_tags[tag_name]) def test_custom_track(self): for tag_name in self.af.custom_track_tags.keys(): tag = self.format.custom_tag(tag_name, self.meta) self.check_af_tag(tag, self.af.custom_track_tags[tag_name]) class TestAudioFileVorbis_t1(TestAudioFileTags, unittest.TestCase): meta = mutagen.File(os.path.join(test_files, "t1.flac")) format = util.format.Vorbis.Format af = util.AudioFile.BaseAudioFile(os.path.join(test_files, "t1.flac")) class TestAudioFileVorbis_picard(TestAudioFileTags, unittest.TestCase): meta = mutagen.File(os.path.join(test_files, "picard.flac")) format = util.format.Vorbis.Format af = util.AudioFile.BaseAudioFile(os.path.join(test_files, "picard.flac")) class TestAudioFileVorbis_unknown(TestAudioFileTags, unittest.TestCase): meta = mutagen.File(os.path.join(test_files, "unknown.flac")) format = util.format.Vorbis.Format af = util.AudioFile.BaseAudioFile(os.path.join(test_files, "unknown.flac")) class TestAudioFileAAC_t1(TestAudioFileTags, unittest.TestCase): meta = mutagen.File(os.path.join(test_files, "t1.m4a")) format = util.format.AAC.Format af = util.AudioFile.BaseAudioFile(os.path.join(test_files, "t1.m4a")) class TestAudioFileAAC_picard(TestAudioFileTags, unittest.TestCase): meta = mutagen.File(os.path.join(test_files, "picard.m4a")) format = util.format.AAC.Format af = util.AudioFile.BaseAudioFile(os.path.join(test_files, "picard.m4a")) class TestAudioFileAAC_unknown(TestAudioFileTags, unittest.TestCase): meta = mutagen.File(os.path.join(test_files, "unknown.m4a")) format = util.format.AAC.Format af = util.AudioFile.BaseAudioFile(os.path.join(test_files, "unknown.m4a")) class TestAudioFileID3_t1(TestAudioFileTags, unittest.TestCase): meta = mutagen.File(os.path.join(test_files, "t1.mp3")) format = util.format.ID3.Format af = util.AudioFile.BaseAudioFile(os.path.join(test_files, "t1.mp3")) class TestAudioFileID3_picard(TestAudioFileTags, unittest.TestCase): meta = mutagen.File(os.path.join(test_files, "picard.mp3")) format = util.format.ID3.Format af = util.AudioFile.BaseAudioFile(os.path.join(test_files, "picard.mp3")) class TestAudioFileID3_unknown(TestAudioFileTags, unittest.TestCase): meta = mutagen.File(os.path.join(test_files, "unknown.mp3")) format = util.format.ID3.Format af = util.AudioFile.BaseAudioFile(os.path.join(test_files, "unknown.mp3"))
mit
liikGit/MissionPlanner
Lib/site-packages/numpy/lib/tests/test_index_tricks.py
53
4333
from numpy.testing import * import numpy as np from numpy import ( array, ones, r_, mgrid, unravel_index, zeros, where, ndenumerate, fill_diagonal, diag_indices, diag_indices_from, s_, index_exp ) class TestUnravelIndex(TestCase): def test_basic(self): assert unravel_index(2,(2,2)) == (1,0) assert unravel_index(254,(17,94)) == (2, 66) assert_raises(ValueError, unravel_index, 4,(2,2)) class TestGrid(TestCase): def test_basic(self): a = mgrid[-1:1:10j] b = mgrid[-1:1:0.1] assert(a.shape == (10,)) assert(b.shape == (20,)) assert(a[0] == -1) assert_almost_equal(a[-1],1) assert(b[0] == -1) assert_almost_equal(b[1]-b[0],0.1,11) assert_almost_equal(b[-1],b[0]+19*0.1,11) assert_almost_equal(a[1]-a[0],2.0/9.0,11) def test_linspace_equivalence(self): y,st = np.linspace(2,10,retstep=1) assert_almost_equal(st,8/49.0) assert_array_almost_equal(y,mgrid[2:10:50j],13) def test_nd(self): c = mgrid[-1:1:10j,-2:2:10j] d = mgrid[-1:1:0.1,-2:2:0.2] assert(c.shape == (2,10,10)) assert(d.shape == (2,20,20)) assert_array_equal(c[0][0,:],-ones(10,'d')) assert_array_equal(c[1][:,0],-2*ones(10,'d')) assert_array_almost_equal(c[0][-1,:],ones(10,'d'),11) assert_array_almost_equal(c[1][:,-1],2*ones(10,'d'),11) assert_array_almost_equal(d[0,1,:]-d[0,0,:], 0.1*ones(20,'d'),11) assert_array_almost_equal(d[1,:,1]-d[1,:,0], 0.2*ones(20,'d'),11) class TestConcatenator(TestCase): def test_1d(self): assert_array_equal(r_[1,2,3,4,5,6],array([1,2,3,4,5,6])) b = ones(5) c = r_[b,0,0,b] assert_array_equal(c,[1,1,1,1,1,0,0,1,1,1,1,1]) def test_mixed_type(self): g = r_[10.1, 1:10] assert(g.dtype == 'f8') def test_more_mixed_type(self): g = r_[-10.1, array([1]), array([2,3,4]), 10.0] assert(g.dtype == 'f8') def test_2d(self): b = rand(5,5) c = rand(5,5) d = r_['1',b,c] # append columns assert(d.shape == (5,10)) assert_array_equal(d[:,:5],b) assert_array_equal(d[:,5:],c) d = r_[b,c] assert(d.shape == (10,5)) assert_array_equal(d[:5,:],b) assert_array_equal(d[5:,:],c) class TestNdenumerate(TestCase): def test_basic(self): a = array([[1,2], [3,4]]) assert_equal(list(ndenumerate(a)), [((0,0), 1), ((0,1), 2), ((1,0), 3), ((1,1), 4)]) class TestIndexExpression(TestCase): def test_regression_1(self): # ticket #1196 a = np.arange(2) assert_equal(a[:-1], a[s_[:-1]]) assert_equal(a[:-1], a[index_exp[:-1]]) def test_simple_1(self): a = np.random.rand(4,5,6) assert_equal(a[:,:3,[1,2]], a[index_exp[:,:3,[1,2]]]) assert_equal(a[:,:3,[1,2]], a[s_[:,:3,[1,2]]]) def test_fill_diagonal(): a = zeros((3, 3),int) fill_diagonal(a, 5) yield (assert_array_equal, a, array([[5, 0, 0], [0, 5, 0], [0, 0, 5]])) # The same function can operate on a 4-d array: a = zeros((3, 3, 3, 3), int) fill_diagonal(a, 4) i = array([0, 1, 2]) yield (assert_equal, where(a != 0), (i, i, i, i)) def test_diag_indices(): di = diag_indices(4) a = array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]) a[di] = 100 yield (assert_array_equal, a, array([[100, 2, 3, 4], [ 5, 100, 7, 8], [ 9, 10, 100, 12], [ 13, 14, 15, 100]])) # Now, we create indices to manipulate a 3-d array: d3 = diag_indices(2, 3) # And use it to set the diagonal of a zeros array to 1: a = zeros((2, 2, 2),int) a[d3] = 1 yield (assert_array_equal, a, array([[[1, 0], [0, 0]], [[0, 0], [0, 1]]]) ) def test_diag_indices_from(): x = np.random.random((4, 4)) r, c = diag_indices_from(x) assert_array_equal(r, np.arange(4)) assert_array_equal(c, np.arange(4)) if __name__ == "__main__": run_module_suite()
gpl-3.0
awkspace/ansible
lib/ansible/modules/source_control/gitlab_project.py
10
14539
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2019, Guillaume Martinez ([email protected]) # Copyright: (c) 2015, Werner Dijkerman ([email protected]) # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: gitlab_project short_description: Creates/updates/deletes Gitlab Projects description: - When the project does not exist in Gitlab, it will be created. - When the project does exists and state=absent, the project will be deleted. - When changes are made to the project, the project will be updated. version_added: "2.1" author: - Werner Dijkerman (@dj-wasabi) - Guillaume Martinez (@Lunik) requirements: - python >= 2.7 - python-gitlab python module extends_documentation_fragment: - auth_basic options: server_url: description: - The URL of the Gitlab server, with protocol (i.e. http or https). required: true type: str login_user: description: - Gitlab user name. type: str login_password: description: - Gitlab password for login_user type: str api_token: description: - Gitlab token for logging in. type: str aliases: - login_token group: description: - Id or The full path of the group of which this projects belongs to. type: str name: description: - The name of the project required: true type: str path: description: - The path of the project you want to create, this will be server_url/<group>/path - If not supplied, name will be used. type: str description: description: - An description for the project. type: str issues_enabled: description: - Whether you want to create issues or not. - Possible values are true and false. type: bool default: yes merge_requests_enabled: description: - If merge requests can be made or not. - Possible values are true and false. type: bool default: yes wiki_enabled: description: - If an wiki for this project should be available or not. - Possible values are true and false. type: bool default: yes snippets_enabled: description: - If creating snippets should be available or not. - Possible values are true and false. type: bool default: yes visibility: description: - Private. Project access must be granted explicitly for each user. - Internal. The project can be cloned by any logged in user. - Public. The project can be cloned without any authentication. default: private type: str choices: ["private", "internal", "public"] aliases: - visibility_level import_url: description: - Git repository which will be imported into gitlab. - Gitlab server needs read access to this git repository. required: false type: str state: description: - create or delete project. - Possible values are present and absent. default: present type: str choices: ["present", "absent"] ''' EXAMPLES = ''' - name: Delete Gitlab Project gitlab_project: api_url: https://gitlab.example.com/ api_token: "{{ access_token }}" validate_certs: False name: my_first_project state: absent delegate_to: localhost - name: Create Gitlab Project in group Ansible gitlab_project: api_url: https://gitlab.example.com/ validate_certs: True api_username: dj-wasabi api_password: "MySecretPassword" name: my_first_project group: ansible issues_enabled: False wiki_enabled: True snippets_enabled: True import_url: http://git.example.com/example/lab.git state: present delegate_to: localhost ''' RETURN = ''' msg: description: Success or failure message returned: always type: str sample: "Success" result: description: json parsed response from the server returned: always type: dict error: description: the error message returned by the Gitlab API returned: failed type: str sample: "400: path is already in use" project: description: API object returned: always type: dict ''' import os import traceback GITLAB_IMP_ERR = None try: import gitlab HAS_GITLAB_PACKAGE = True except Exception: GITLAB_IMP_ERR = traceback.format_exc() HAS_GITLAB_PACKAGE = False from ansible.module_utils.api import basic_auth_argument_spec from ansible.module_utils.basic import AnsibleModule, missing_required_lib from ansible.module_utils._text import to_native from ansible.module_utils.gitlab import findGroup, findProject class GitLabProject(object): def __init__(self, module, gitlab_instance): self._module = module self._gitlab = gitlab_instance self.projectObject = None ''' @param project_name Name of the project @param namespace Namespace Object (User or Group) @param options Options of the project ''' def createOrUpdateProject(self, project_name, namespace, options): changed = False # Because we have already call userExists in main() if self.projectObject is None: project = self.createProject(namespace, { 'name': project_name, 'path': options['path'], 'description': options['description'], 'issues_enabled': options['issues_enabled'], 'merge_requests_enabled': options['merge_requests_enabled'], 'wiki_enabled': options['wiki_enabled'], 'snippets_enabled': options['snippets_enabled'], 'visibility': options['visibility'], 'import_url': options['import_url']}) changed = True else: changed, project = self.updateProject(self.projectObject, { 'name': project_name, 'description': options['description'], 'issues_enabled': options['issues_enabled'], 'merge_requests_enabled': options['merge_requests_enabled'], 'wiki_enabled': options['wiki_enabled'], 'snippets_enabled': options['snippets_enabled'], 'visibility': options['visibility']}) self.projectObject = project if changed: if self._module.check_mode: self._module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name) try: project.save() except Exception as e: self._module.fail_json(msg="Failed update project: %s " % e) return True else: return False ''' @param namespace Namespace Object (User or Group) @param arguments Attributs of the project ''' def createProject(self, namespace, arguments): if self._module.check_mode: return True arguments['namespace_id'] = namespace.id try: project = self._gitlab.projects.create(arguments) except (gitlab.exceptions.GitlabCreateError) as e: self._module.fail_json(msg="Failed to create project: %s " % to_native(e)) return project ''' @param project Project Object @param arguments Attributs of the project ''' def updateProject(self, project, arguments): changed = False for arg_key, arg_value in arguments.items(): if arguments[arg_key] is not None: if getattr(project, arg_key) != arguments[arg_key]: setattr(project, arg_key, arguments[arg_key]) changed = True return (changed, project) def deleteProject(self): if self._module.check_mode: return True project = self.projectObject return project.delete() ''' @param namespace User/Group object @param name Name of the project ''' def existsProject(self, namespace, path): # When project exists, object will be stored in self.projectObject. project = findProject(self._gitlab, namespace.full_path + '/' + path) if project: self.projectObject = project return True return False def deprecation_warning(module): deprecated_aliases = ['login_token'] module.deprecate("Aliases \'{aliases}\' are deprecated".format(aliases='\', \''.join(deprecated_aliases)), 2.10) def main(): argument_spec = basic_auth_argument_spec() argument_spec.update(dict( server_url=dict(type='str', required=True, removed_in_version=2.10), login_user=dict(type='str', no_log=True, removed_in_version=2.10), login_password=dict(type='str', no_log=True, removed_in_version=2.10), api_token=dict(type='str', no_log=True, aliases=["login_token"]), group=dict(type='str'), name=dict(type='str', required=True), path=dict(type='str'), description=dict(type='str'), issues_enabled=dict(type='bool', default=True), merge_requests_enabled=dict(type='bool', default=True), wiki_enabled=dict(type='bool', default=True), snippets_enabled=dict(default=True, type='bool'), visibility=dict(type='str', default="private", choices=["internal", "private", "public"], aliases=["visibility_level"]), import_url=dict(type='str'), state=dict(type='str', default="present", choices=["absent", "present"]), )) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[ ['api_url', 'server_url'], ['api_username', 'login_user'], ['api_password', 'login_password'], ['api_username', 'api_token'], ['api_password', 'api_token'], ['login_user', 'login_token'], ['login_password', 'login_token'] ], required_together=[ ['api_username', 'api_password'], ['login_user', 'login_password'], ], required_one_of=[ ['api_username', 'api_token', 'login_user', 'login_token'] ], supports_check_mode=True, ) deprecation_warning(module) server_url = module.params['server_url'] login_user = module.params['login_user'] login_password = module.params['login_password'] api_url = module.params['api_url'] validate_certs = module.params['validate_certs'] api_user = module.params['api_username'] api_password = module.params['api_password'] gitlab_url = server_url if api_url is None else api_url gitlab_user = login_user if api_user is None else api_user gitlab_password = login_password if api_password is None else api_password gitlab_token = module.params['api_token'] group_identifier = module.params['group'] project_name = module.params['name'] project_path = module.params['path'] project_description = module.params['description'] issues_enabled = module.params['issues_enabled'] merge_requests_enabled = module.params['merge_requests_enabled'] wiki_enabled = module.params['wiki_enabled'] snippets_enabled = module.params['snippets_enabled'] visibility = module.params['visibility'] import_url = module.params['import_url'] state = module.params['state'] if not HAS_GITLAB_PACKAGE: module.fail_json(msg=missing_required_lib("python-gitlab"), exception=GITLAB_IMP_ERR) try: gitlab_instance = gitlab.Gitlab(url=gitlab_url, ssl_verify=validate_certs, email=gitlab_user, password=gitlab_password, private_token=gitlab_token, api_version=4) gitlab_instance.auth() except (gitlab.exceptions.GitlabAuthenticationError, gitlab.exceptions.GitlabGetError) as e: module.fail_json(msg="Failed to connect to Gitlab server: %s" % to_native(e)) except (gitlab.exceptions.GitlabHttpError) as e: module.fail_json(msg="Failed to connect to Gitlab server: %s. \ Gitlab remove Session API now that private tokens are removed from user API endpoints since version 10.2." % to_native(e)) # Set project_path to project_name if it is empty. if project_path is None: project_path = project_name.replace(" ", "_") gitlab_project = GitLabProject(module, gitlab_instance) if group_identifier: group = findGroup(gitlab_instance, group_identifier) if group is None: module.fail_json(msg="Failed to create project: group %s doesn't exists" % group_identifier) namespace = gitlab_instance.namespaces.get(group.id) project_exists = gitlab_project.existsProject(namespace, project_path) else: user = gitlab_instance.users.list(username=gitlab_instance.user.username)[0] namespace = gitlab_instance.namespaces.get(user.id) project_exists = gitlab_project.existsProject(namespace, project_path) if state == 'absent': if project_exists: gitlab_project.deleteProject() module.exit_json(changed=True, msg="Successfully deleted project %s" % project_name) else: module.exit_json(changed=False, msg="Project deleted or does not exists") if state == 'present': if gitlab_project.createOrUpdateProject(project_name, namespace, { "path": project_path, "description": project_description, "issues_enabled": issues_enabled, "merge_requests_enabled": merge_requests_enabled, "wiki_enabled": wiki_enabled, "snippets_enabled": snippets_enabled, "visibility": visibility, "import_url": import_url}): module.exit_json(changed=True, msg="Successfully created or updated the project %s" % project_name, project=gitlab_project.projectObject._attrs) else: module.exit_json(changed=False, msg="No need to update the project %s" % project_name, project=gitlab_project.projectObject._attrs) if __name__ == '__main__': main()
gpl-3.0
tomaslaz/KLMC_Analysis
thirdparty/JPype-0.5.4.2/src/python/jpype/_refdaemon.py
8
1084
#***************************************************************************** # Copyright 2004-2008 Steve Menard # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # #***************************************************************************** import thread import _jpype def startJava(): _jpype.startReferenceQueue(1) def startPython(): def _run() : _jpype.attachThreadToJVM() _jpype.startReferenceQueue(0) thread.start_new_thread(_run, tuple()) def stop(): _jpype.stopReferenceQueue()
gpl-3.0