repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
manaris/jythonMusic | library/jython2.5.3/Lib/wsgiref/validate.py | 162 | 14737 | # (c) 2005 Ian Bicking and contributors; written for Paste (http://pythonpaste.org)
# Licensed under the MIT license: http://www.opensource.org/licenses/mit-license.php
# Also licenced under the Apache License, 2.0: http://opensource.org/licenses/apache2.0.php
# Licensed to PSF under a Contributor Agreement
"""
Middleware to check for obedience to the WSGI specification.
Some of the things this checks:
* Signature of the application and start_response (including that
keyword arguments are not used).
* Environment checks:
- Environment is a dictionary (and not a subclass).
- That all the required keys are in the environment: REQUEST_METHOD,
SERVER_NAME, SERVER_PORT, wsgi.version, wsgi.input, wsgi.errors,
wsgi.multithread, wsgi.multiprocess, wsgi.run_once
- That HTTP_CONTENT_TYPE and HTTP_CONTENT_LENGTH are not in the
environment (these headers should appear as CONTENT_LENGTH and
CONTENT_TYPE).
- Warns if QUERY_STRING is missing, as the cgi module acts
unpredictably in that case.
- That CGI-style variables (that don't contain a .) have
(non-unicode) string values
- That wsgi.version is a tuple
- That wsgi.url_scheme is 'http' or 'https' (@@: is this too
restrictive?)
- Warns if the REQUEST_METHOD is not known (@@: probably too
restrictive).
- That SCRIPT_NAME and PATH_INFO are empty or start with /
- That at least one of SCRIPT_NAME or PATH_INFO are set.
- That CONTENT_LENGTH is a positive integer.
- That SCRIPT_NAME is not '/' (it should be '', and PATH_INFO should
be '/').
- That wsgi.input has the methods read, readline, readlines, and
__iter__
- That wsgi.errors has the methods flush, write, writelines
* The status is a string, contains a space, starts with an integer,
and that integer is in range (> 100).
* That the headers is a list (not a subclass, not another kind of
sequence).
* That the items of the headers are tuples of strings.
* That there is no 'status' header (that is used in CGI, but not in
WSGI).
* That the headers don't contain newlines or colons, end in _ or -, or
contain characters codes below 037.
* That Content-Type is given if there is content (CGI often has a
default content type, but WSGI does not).
* That no Content-Type is given when there is no content (@@: is this
too restrictive?)
* That the exc_info argument to start_response is a tuple or None.
* That all calls to the writer are with strings, and no other methods
on the writer are accessed.
* That wsgi.input is used properly:
- .read() is called with zero or one argument
- That it returns a string
- That readline, readlines, and __iter__ return strings
- That .close() is not called
- No other methods are provided
* That wsgi.errors is used properly:
- .write() and .writelines() is called with a string
- That .close() is not called, and no other methods are provided.
* The response iterator:
- That it is not a string (it should be a list of a single string; a
string will work, but perform horribly).
- That .next() returns a string
- That the iterator is not iterated over until start_response has
been called (that can signal either a server or application
error).
- That .close() is called (doesn't raise exception, only prints to
sys.stderr, because we only know it isn't called when the object
is garbage collected).
"""
__all__ = ['validator']
import re
import sys
from types import DictType, StringType, TupleType, ListType
import warnings
header_re = re.compile(r'^[a-zA-Z][a-zA-Z0-9\-_]*$')
bad_header_value_re = re.compile(r'[\000-\037]')
class WSGIWarning(Warning):
"""
Raised in response to WSGI-spec-related warnings
"""
def assert_(cond, *args):
if not cond:
raise AssertionError(*args)
def validator(application):
"""
When applied between a WSGI server and a WSGI application, this
middleware will check for WSGI compliancy on a number of levels.
This middleware does not modify the request or response in any
way, but will throw an AssertionError if anything seems off
(except for a failure to close the application iterator, which
will be printed to stderr -- there's no way to throw an exception
at that point).
"""
def lint_app(*args, **kw):
assert_(len(args) == 2, "Two arguments required")
assert_(not kw, "No keyword arguments allowed")
environ, start_response = args
check_environ(environ)
# We use this to check if the application returns without
# calling start_response:
start_response_started = []
def start_response_wrapper(*args, **kw):
assert_(len(args) == 2 or len(args) == 3, (
"Invalid number of arguments: %s" % (args,)))
assert_(not kw, "No keyword arguments allowed")
status = args[0]
headers = args[1]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
check_status(status)
check_headers(headers)
check_content_type(status, headers)
check_exc_info(exc_info)
start_response_started.append(None)
return WriteWrapper(start_response(*args))
environ['wsgi.input'] = InputWrapper(environ['wsgi.input'])
environ['wsgi.errors'] = ErrorWrapper(environ['wsgi.errors'])
iterator = application(environ, start_response_wrapper)
assert_(iterator is not None and iterator != False,
"The application must return an iterator, if only an empty list")
check_iterator(iterator)
return IteratorWrapper(iterator, start_response_started)
return lint_app
class InputWrapper:
def __init__(self, wsgi_input):
self.input = wsgi_input
def read(self, *args):
assert_(len(args) <= 1)
v = self.input.read(*args)
assert_(type(v) is type(""))
return v
def readline(self):
v = self.input.readline()
assert_(type(v) is type(""))
return v
def readlines(self, *args):
assert_(len(args) <= 1)
lines = self.input.readlines(*args)
assert_(type(lines) is type([]))
for line in lines:
assert_(type(line) is type(""))
return lines
def __iter__(self):
while 1:
line = self.readline()
if not line:
return
yield line
def close(self):
assert_(0, "input.close() must not be called")
class ErrorWrapper:
def __init__(self, wsgi_errors):
self.errors = wsgi_errors
def write(self, s):
assert_(type(s) is type(""))
self.errors.write(s)
def flush(self):
self.errors.flush()
def writelines(self, seq):
for line in seq:
self.write(line)
def close(self):
assert_(0, "errors.close() must not be called")
class WriteWrapper:
def __init__(self, wsgi_writer):
self.writer = wsgi_writer
def __call__(self, s):
assert_(type(s) is type(""))
self.writer(s)
class PartialIteratorWrapper:
def __init__(self, wsgi_iterator):
self.iterator = wsgi_iterator
def __iter__(self):
# We want to make sure __iter__ is called
return IteratorWrapper(self.iterator, None)
class IteratorWrapper:
def __init__(self, wsgi_iterator, check_start_response):
self.original_iterator = wsgi_iterator
self.iterator = iter(wsgi_iterator)
self.closed = False
self.check_start_response = check_start_response
def __iter__(self):
return self
def next(self):
assert_(not self.closed,
"Iterator read after closed")
v = self.iterator.next()
if self.check_start_response is not None:
assert_(self.check_start_response,
"The application returns and we started iterating over its body, but start_response has not yet been called")
self.check_start_response = None
return v
def close(self):
self.closed = True
if hasattr(self.original_iterator, 'close'):
self.original_iterator.close()
def __del__(self):
if not self.closed:
sys.stderr.write(
"Iterator garbage collected without being closed")
assert_(self.closed,
"Iterator garbage collected without being closed")
def check_environ(environ):
assert_(type(environ) is DictType,
"Environment is not of the right type: %r (environment: %r)"
% (type(environ), environ))
for key in ['REQUEST_METHOD', 'SERVER_NAME', 'SERVER_PORT',
'wsgi.version', 'wsgi.input', 'wsgi.errors',
'wsgi.multithread', 'wsgi.multiprocess',
'wsgi.run_once']:
assert_(key in environ,
"Environment missing required key: %r" % (key,))
for key in ['HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH']:
assert_(key not in environ,
"Environment should not have the key: %s "
"(use %s instead)" % (key, key[5:]))
if 'QUERY_STRING' not in environ:
warnings.warn(
'QUERY_STRING is not in the WSGI environment; the cgi '
'module will use sys.argv when this variable is missing, '
'so application errors are more likely',
WSGIWarning)
for key in environ.keys():
if '.' in key:
# Extension, we don't care about its type
continue
assert_(type(environ[key]) is StringType,
"Environmental variable %s is not a string: %r (value: %r)"
% (key, type(environ[key]), environ[key]))
assert_(type(environ['wsgi.version']) is TupleType,
"wsgi.version should be a tuple (%r)" % (environ['wsgi.version'],))
assert_(environ['wsgi.url_scheme'] in ('http', 'https'),
"wsgi.url_scheme unknown: %r" % environ['wsgi.url_scheme'])
check_input(environ['wsgi.input'])
check_errors(environ['wsgi.errors'])
# @@: these need filling out:
if environ['REQUEST_METHOD'] not in (
'GET', 'HEAD', 'POST', 'OPTIONS','PUT','DELETE','TRACE'):
warnings.warn(
"Unknown REQUEST_METHOD: %r" % environ['REQUEST_METHOD'],
WSGIWarning)
assert_(not environ.get('SCRIPT_NAME')
or environ['SCRIPT_NAME'].startswith('/'),
"SCRIPT_NAME doesn't start with /: %r" % environ['SCRIPT_NAME'])
assert_(not environ.get('PATH_INFO')
or environ['PATH_INFO'].startswith('/'),
"PATH_INFO doesn't start with /: %r" % environ['PATH_INFO'])
if environ.get('CONTENT_LENGTH'):
assert_(int(environ['CONTENT_LENGTH']) >= 0,
"Invalid CONTENT_LENGTH: %r" % environ['CONTENT_LENGTH'])
if not environ.get('SCRIPT_NAME'):
assert_(environ.has_key('PATH_INFO'),
"One of SCRIPT_NAME or PATH_INFO are required (PATH_INFO "
"should at least be '/' if SCRIPT_NAME is empty)")
assert_(environ.get('SCRIPT_NAME') != '/',
"SCRIPT_NAME cannot be '/'; it should instead be '', and "
"PATH_INFO should be '/'")
def check_input(wsgi_input):
for attr in ['read', 'readline', 'readlines', '__iter__']:
assert_(hasattr(wsgi_input, attr),
"wsgi.input (%r) doesn't have the attribute %s"
% (wsgi_input, attr))
def check_errors(wsgi_errors):
for attr in ['flush', 'write', 'writelines']:
assert_(hasattr(wsgi_errors, attr),
"wsgi.errors (%r) doesn't have the attribute %s"
% (wsgi_errors, attr))
def check_status(status):
assert_(type(status) is StringType,
"Status must be a string (not %r)" % status)
# Implicitly check that we can turn it into an integer:
status_code = status.split(None, 1)[0]
assert_(len(status_code) == 3,
"Status codes must be three characters: %r" % status_code)
status_int = int(status_code)
assert_(status_int >= 100, "Status code is invalid: %r" % status_int)
if len(status) < 4 or status[3] != ' ':
warnings.warn(
"The status string (%r) should be a three-digit integer "
"followed by a single space and a status explanation"
% status, WSGIWarning)
def check_headers(headers):
assert_(type(headers) is ListType,
"Headers (%r) must be of type list: %r"
% (headers, type(headers)))
header_names = {}
for item in headers:
assert_(type(item) is TupleType,
"Individual headers (%r) must be of type tuple: %r"
% (item, type(item)))
assert_(len(item) == 2)
name, value = item
assert_(name.lower() != 'status',
"The Status header cannot be used; it conflicts with CGI "
"script, and HTTP status is not given through headers "
"(value: %r)." % value)
header_names[name.lower()] = None
assert_('\n' not in name and ':' not in name,
"Header names may not contain ':' or '\\n': %r" % name)
assert_(header_re.search(name), "Bad header name: %r" % name)
assert_(not name.endswith('-') and not name.endswith('_'),
"Names may not end in '-' or '_': %r" % name)
if bad_header_value_re.search(value):
assert_(0, "Bad header value: %r (bad char: %r)"
% (value, bad_header_value_re.search(value).group(0)))
def check_content_type(status, headers):
code = int(status.split(None, 1)[0])
# @@: need one more person to verify this interpretation of RFC 2616
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html
NO_MESSAGE_BODY = (204, 304)
for name, value in headers:
if name.lower() == 'content-type':
if code not in NO_MESSAGE_BODY:
return
assert_(0, ("Content-Type header found in a %s response, "
"which must not return content.") % code)
if code not in NO_MESSAGE_BODY:
assert_(0, "No Content-Type header found in headers (%s)" % headers)
def check_exc_info(exc_info):
assert_(exc_info is None or type(exc_info) is type(()),
"exc_info (%r) is not a tuple: %r" % (exc_info, type(exc_info)))
# More exc_info checks?
def check_iterator(iterator):
# Technically a string is legal, which is why it's a really bad
# idea, because it may cause the response to be returned
# character-by-character
assert_(not isinstance(iterator, str),
"You should not return a string as your application iterator, "
"instead return a single-item list containing that string.")
| gpl-3.0 |
JioCloud/horizon | openstack_dashboard/dashboards/project/data_processing/data_image_registry/forms.py | 3 | 4238 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard.api import glance
from openstack_dashboard.api import sahara as saharaclient
class ImageForm(forms.SelfHandlingForm):
image_id = forms.CharField(widget=forms.HiddenInput())
tags_list = forms.CharField(widget=forms.HiddenInput())
user_name = forms.CharField(max_length=80, label=_("User Name"))
description = forms.CharField(max_length=80,
label=_("Description"),
required=False,
widget=forms.Textarea(attrs={'cols': 80,
'rows': 20}))
def handle(self, request, data):
try:
image_id = data['image_id']
user_name = data['user_name']
desc = data['description']
saharaclient.image_update(request, image_id, user_name, desc)
image_tags = json.loads(data["tags_list"])
saharaclient.image_tags_update(request, image_id, image_tags)
messages.success(request,
_("Successfully updated image."))
return True
except Exception:
exceptions.handle(request,
_("Failed to update image."))
return False
class EditTagsForm(ImageForm):
image_id = forms.CharField(widget=forms.HiddenInput())
class RegisterImageForm(ImageForm):
image_id = forms.ChoiceField(label=_("Image"))
def __init__(self, request, *args, **kwargs):
super(RegisterImageForm, self).__init__(request, *args, **kwargs)
self._populate_image_id_choices()
def _populate_image_id_choices(self):
images = self._get_available_images(self.request)
choices = [(image.id, image.name)
for image in images
if image.properties.get("image_type", '') != "snapshot"]
if choices:
choices.insert(0, ("", _("Select Image")))
else:
choices.insert(0, ("", _("No images available.")))
self.fields['image_id'].choices = choices
def _get_images(self, request, filter):
try:
images, _more, _prev = (
glance.image_list_detailed(request, filters=filter))
except Exception:
images = []
exceptions.handle(request,
_("Unable to retrieve images with filter %s.") %
filter)
return images
def _get_public_images(self, request):
filter = {"is_public": True,
"status": "active"}
return self._get_images(request, filter)
def _get_tenant_images(self, request):
filter = {"owner": request.user.tenant_id,
"status": "active"}
return self._get_images(request, filter)
def _get_available_images(self, request):
images = self._get_tenant_images(request)
if request.user.is_superuser:
images += self._get_public_images(request)
final_images = []
try:
image_ids = set(img.id for img in saharaclient.image_list(request))
except Exception:
image_ids = set()
exceptions.handle(request,
_("Unable to fetch available images."))
for image in images:
if (image.id not in image_ids and
image.container_format not in ('aki', 'ari')):
final_images.append(image)
return final_images
| apache-2.0 |
wskplho/sl4a | python-build/python-libs/gdata/src/gdata/Crypto/Protocol/AllOrNothing.py | 226 | 10952 | """This file implements all-or-nothing package transformations.
An all-or-nothing package transformation is one in which some text is
transformed into message blocks, such that all blocks must be obtained before
the reverse transformation can be applied. Thus, if any blocks are corrupted
or lost, the original message cannot be reproduced.
An all-or-nothing package transformation is not encryption, although a block
cipher algorithm is used. The encryption key is randomly generated and is
extractable from the message blocks.
This class implements the All-Or-Nothing package transformation algorithm
described in:
Ronald L. Rivest. "All-Or-Nothing Encryption and The Package Transform"
http://theory.lcs.mit.edu/~rivest/fusion.pdf
"""
__revision__ = "$Id: AllOrNothing.py,v 1.8 2003/02/28 15:23:20 akuchling Exp $"
import operator
import string
from Crypto.Util.number import bytes_to_long, long_to_bytes
class AllOrNothing:
"""Class implementing the All-or-Nothing package transform.
Methods for subclassing:
_inventkey(key_size):
Returns a randomly generated key. Subclasses can use this to
implement better random key generating algorithms. The default
algorithm is probably not very cryptographically secure.
"""
def __init__(self, ciphermodule, mode=None, IV=None):
"""AllOrNothing(ciphermodule, mode=None, IV=None)
ciphermodule is a module implementing the cipher algorithm to
use. It must provide the PEP272 interface.
Note that the encryption key is randomly generated
automatically when needed. Optional arguments mode and IV are
passed directly through to the ciphermodule.new() method; they
are the feedback mode and initialization vector to use. All
three arguments must be the same for the object used to create
the digest, and to undigest'ify the message blocks.
"""
self.__ciphermodule = ciphermodule
self.__mode = mode
self.__IV = IV
self.__key_size = ciphermodule.key_size
if self.__key_size == 0:
self.__key_size = 16
__K0digit = chr(0x69)
def digest(self, text):
"""digest(text:string) : [string]
Perform the All-or-Nothing package transform on the given
string. Output is a list of message blocks describing the
transformed text, where each block is a string of bit length equal
to the ciphermodule's block_size.
"""
# generate a random session key and K0, the key used to encrypt the
# hash blocks. Rivest calls this a fixed, publically-known encryption
# key, but says nothing about the security implications of this key or
# how to choose it.
key = self._inventkey(self.__key_size)
K0 = self.__K0digit * self.__key_size
# we need two cipher objects here, one that is used to encrypt the
# message blocks and one that is used to encrypt the hashes. The
# former uses the randomly generated key, while the latter uses the
# well-known key.
mcipher = self.__newcipher(key)
hcipher = self.__newcipher(K0)
# Pad the text so that its length is a multiple of the cipher's
# block_size. Pad with trailing spaces, which will be eliminated in
# the undigest() step.
block_size = self.__ciphermodule.block_size
padbytes = block_size - (len(text) % block_size)
text = text + ' ' * padbytes
# Run through the algorithm:
# s: number of message blocks (size of text / block_size)
# input sequence: m1, m2, ... ms
# random key K' (`key' in the code)
# Compute output sequence: m'1, m'2, ... m's' for s' = s + 1
# Let m'i = mi ^ E(K', i) for i = 1, 2, 3, ..., s
# Let m's' = K' ^ h1 ^ h2 ^ ... hs
# where hi = E(K0, m'i ^ i) for i = 1, 2, ... s
#
# The one complication I add is that the last message block is hard
# coded to the number of padbytes added, so that these can be stripped
# during the undigest() step
s = len(text) / block_size
blocks = []
hashes = []
for i in range(1, s+1):
start = (i-1) * block_size
end = start + block_size
mi = text[start:end]
assert len(mi) == block_size
cipherblock = mcipher.encrypt(long_to_bytes(i, block_size))
mticki = bytes_to_long(mi) ^ bytes_to_long(cipherblock)
blocks.append(mticki)
# calculate the hash block for this block
hi = hcipher.encrypt(long_to_bytes(mticki ^ i, block_size))
hashes.append(bytes_to_long(hi))
# Add the padbytes length as a message block
i = i + 1
cipherblock = mcipher.encrypt(long_to_bytes(i, block_size))
mticki = padbytes ^ bytes_to_long(cipherblock)
blocks.append(mticki)
# calculate this block's hash
hi = hcipher.encrypt(long_to_bytes(mticki ^ i, block_size))
hashes.append(bytes_to_long(hi))
# Now calculate the last message block of the sequence 1..s'. This
# will contain the random session key XOR'd with all the hash blocks,
# so that for undigest(), once all the hash blocks are calculated, the
# session key can be trivially extracted. Calculating all the hash
# blocks requires that all the message blocks be received, thus the
# All-or-Nothing algorithm succeeds.
mtick_stick = bytes_to_long(key) ^ reduce(operator.xor, hashes)
blocks.append(mtick_stick)
# we convert the blocks to strings since in Python, byte sequences are
# always represented as strings. This is more consistent with the
# model that encryption and hash algorithms always operate on strings.
return map(long_to_bytes, blocks)
def undigest(self, blocks):
"""undigest(blocks : [string]) : string
Perform the reverse package transformation on a list of message
blocks. Note that the ciphermodule used for both transformations
must be the same. blocks is a list of strings of bit length
equal to the ciphermodule's block_size.
"""
# better have at least 2 blocks, for the padbytes package and the hash
# block accumulator
if len(blocks) < 2:
raise ValueError, "List must be at least length 2."
# blocks is a list of strings. We need to deal with them as long
# integers
blocks = map(bytes_to_long, blocks)
# Calculate the well-known key, to which the hash blocks are
# encrypted, and create the hash cipher.
K0 = self.__K0digit * self.__key_size
hcipher = self.__newcipher(K0)
# Since we have all the blocks (or this method would have been called
# prematurely), we can calcualte all the hash blocks.
hashes = []
for i in range(1, len(blocks)):
mticki = blocks[i-1] ^ i
hi = hcipher.encrypt(long_to_bytes(mticki))
hashes.append(bytes_to_long(hi))
# now we can calculate K' (key). remember the last block contains
# m's' which we don't include here
key = blocks[-1] ^ reduce(operator.xor, hashes)
# and now we can create the cipher object
mcipher = self.__newcipher(long_to_bytes(key))
block_size = self.__ciphermodule.block_size
# And we can now decode the original message blocks
parts = []
for i in range(1, len(blocks)):
cipherblock = mcipher.encrypt(long_to_bytes(i, block_size))
mi = blocks[i-1] ^ bytes_to_long(cipherblock)
parts.append(mi)
# The last message block contains the number of pad bytes appended to
# the original text string, such that its length was an even multiple
# of the cipher's block_size. This number should be small enough that
# the conversion from long integer to integer should never overflow
padbytes = int(parts[-1])
text = string.join(map(long_to_bytes, parts[:-1]), '')
return text[:-padbytes]
def _inventkey(self, key_size):
# TBD: Not a very secure algorithm. Eventually, I'd like to use JHy's
# kernelrand module
import time
from Crypto.Util import randpool
# TBD: key_size * 2 to work around possible bug in RandomPool?
pool = randpool.RandomPool(key_size * 2)
while key_size > pool.entropy:
pool.add_event()
# we now have enough entropy in the pool to get a key_size'd key
return pool.get_bytes(key_size)
def __newcipher(self, key):
if self.__mode is None and self.__IV is None:
return self.__ciphermodule.new(key)
elif self.__IV is None:
return self.__ciphermodule.new(key, self.__mode)
else:
return self.__ciphermodule.new(key, self.__mode, self.__IV)
if __name__ == '__main__':
import sys
import getopt
import base64
usagemsg = '''\
Test module usage: %(program)s [-c cipher] [-l] [-h]
Where:
--cipher module
-c module
Cipher module to use. Default: %(ciphermodule)s
--aslong
-l
Print the encoded message blocks as long integers instead of base64
encoded strings
--help
-h
Print this help message
'''
ciphermodule = 'AES'
aslong = 0
def usage(code, msg=None):
if msg:
print msg
print usagemsg % {'program': sys.argv[0],
'ciphermodule': ciphermodule}
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:],
'c:l', ['cipher=', 'aslong'])
except getopt.error, msg:
usage(1, msg)
if args:
usage(1, 'Too many arguments')
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-c', '--cipher'):
ciphermodule = arg
elif opt in ('-l', '--aslong'):
aslong = 1
# ugly hack to force __import__ to give us the end-path module
module = __import__('Crypto.Cipher.'+ciphermodule, None, None, ['new'])
a = AllOrNothing(module)
print 'Original text:\n=========='
print __doc__
print '=========='
msgblocks = a.digest(__doc__)
print 'message blocks:'
for i, blk in map(None, range(len(msgblocks)), msgblocks):
# base64 adds a trailing newline
print ' %3d' % i,
if aslong:
print bytes_to_long(blk)
else:
print base64.encodestring(blk)[:-1]
#
# get a new undigest-only object so there's no leakage
b = AllOrNothing(module)
text = b.undigest(msgblocks)
if text == __doc__:
print 'They match!'
else:
print 'They differ!'
| apache-2.0 |
Samsung/skia | third_party/externals/gyp/test/win/gyptest-link-large-pdb.py | 218 | 2332 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure msvs_large_pdb works correctly.
"""
import TestGyp
import struct
import sys
CHDIR = 'large-pdb'
def CheckImageAndPdb(test, image_basename, expected_page_size,
pdb_basename=None):
if not pdb_basename:
pdb_basename = image_basename + '.pdb'
test.built_file_must_exist(image_basename, chdir=CHDIR)
test.built_file_must_exist(pdb_basename, chdir=CHDIR)
# We expect the PDB to have the given page size. For full details of the
# header look here: https://code.google.com/p/pdbparser/wiki/MSF_Format
# We read the little-endian 4-byte unsigned integer at position 32 of the
# file.
pdb_path = test.built_file_path(pdb_basename, chdir=CHDIR)
pdb_file = open(pdb_path, 'rb')
pdb_file.seek(32, 0)
page_size = struct.unpack('<I', pdb_file.read(4))[0]
if page_size != expected_page_size:
print "Expected page size of %d, got %d for PDB file `%s'." % (
expected_page_size, page_size, pdb_path)
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
test.run_gyp('large-pdb.gyp', chdir=CHDIR)
test.build('large-pdb.gyp', 'large_pdb_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_exe.exe', 4096)
test.build('large-pdb.gyp', 'small_pdb_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'small_pdb_exe.exe', 1024)
test.build('large-pdb.gyp', 'large_pdb_dll', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_dll.dll', 4096)
test.build('large-pdb.gyp', 'small_pdb_dll', chdir=CHDIR)
CheckImageAndPdb(test, 'small_pdb_dll.dll', 1024)
test.build('large-pdb.gyp', 'large_pdb_implicit_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_implicit_exe.exe', 4096)
# This target has a different PDB name because it uses an
# 'msvs_large_pdb_path' variable.
test.build('large-pdb.gyp', 'large_pdb_variable_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'large_pdb_variable_exe.exe', 4096,
pdb_basename='foo.pdb')
# This target has a different output name because it uses 'product_name'.
test.build('large-pdb.gyp', 'large_pdb_product_exe', chdir=CHDIR)
CheckImageAndPdb(test, 'bar.exe', 4096)
test.pass_test()
| bsd-3-clause |
epssy/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/gis/tests/geoapp/models.py | 109 | 1890 | from django.contrib.gis.db import models
from django.contrib.gis.tests.utils import mysql, spatialite
from django.utils.encoding import python_2_unicode_compatible
# MySQL spatial indices can't handle NULL geometries.
null_flag = not mysql
@python_2_unicode_compatible
class Country(models.Model):
name = models.CharField(max_length=30)
mpoly = models.MultiPolygonField() # SRID, by default, is 4326
objects = models.GeoManager()
def __str__(self): return self.name
@python_2_unicode_compatible
class City(models.Model):
name = models.CharField(max_length=30)
point = models.PointField()
objects = models.GeoManager()
def __str__(self): return self.name
# This is an inherited model from City
class PennsylvaniaCity(City):
county = models.CharField(max_length=30)
founded = models.DateTimeField(null=True)
objects = models.GeoManager() # TODO: This should be implicitly inherited.
@python_2_unicode_compatible
class State(models.Model):
name = models.CharField(max_length=30)
poly = models.PolygonField(null=null_flag) # Allowing NULL geometries here.
objects = models.GeoManager()
def __str__(self): return self.name
@python_2_unicode_compatible
class Track(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField()
objects = models.GeoManager()
def __str__(self): return self.name
class Truth(models.Model):
val = models.BooleanField(default=False)
objects = models.GeoManager()
if not spatialite:
@python_2_unicode_compatible
class Feature(models.Model):
name = models.CharField(max_length=20)
geom = models.GeometryField()
objects = models.GeoManager()
def __str__(self): return self.name
class MinusOneSRID(models.Model):
geom = models.PointField(srid=-1) # Minus one SRID.
objects = models.GeoManager()
| apache-2.0 |
craftytrickster/servo | tests/wpt/web-platform-tests/tools/pytest/testing/test_recwarn.py | 174 | 8300 | import warnings
import py
import pytest
from _pytest.recwarn import WarningsRecorder
def test_recwarn_functional(testdir):
reprec = testdir.inline_runsource("""
import warnings
oldwarn = warnings.showwarning
def test_method(recwarn):
assert warnings.showwarning != oldwarn
warnings.warn("hello")
warn = recwarn.pop()
assert isinstance(warn.message, UserWarning)
def test_finalized():
assert warnings.showwarning == oldwarn
""")
res = reprec.countoutcomes()
assert tuple(res) == (2, 0, 0), res
class TestWarningsRecorderChecker(object):
def test_recording(self, recwarn):
showwarning = py.std.warnings.showwarning
rec = WarningsRecorder()
with rec:
assert py.std.warnings.showwarning != showwarning
assert not rec.list
py.std.warnings.warn_explicit("hello", UserWarning, "xyz", 13)
assert len(rec.list) == 1
py.std.warnings.warn(DeprecationWarning("hello"))
assert len(rec.list) == 2
warn = rec.pop()
assert str(warn.message) == "hello"
l = rec.list
rec.clear()
assert len(rec.list) == 0
assert l is rec.list
pytest.raises(AssertionError, "rec.pop()")
assert showwarning == py.std.warnings.showwarning
def test_typechecking(self):
from _pytest.recwarn import WarningsChecker
with pytest.raises(TypeError):
WarningsChecker(5)
with pytest.raises(TypeError):
WarningsChecker(('hi', RuntimeWarning))
with pytest.raises(TypeError):
WarningsChecker([DeprecationWarning, RuntimeWarning])
def test_invalid_enter_exit(self):
# wrap this test in WarningsRecorder to ensure warning state gets reset
with WarningsRecorder():
with pytest.raises(RuntimeError):
rec = WarningsRecorder()
rec.__exit__(None, None, None) # can't exit before entering
with pytest.raises(RuntimeError):
rec = WarningsRecorder()
with rec:
with rec:
pass # can't enter twice
class TestDeprecatedCall(object):
"""test pytest.deprecated_call()"""
def dep(self, i, j=None):
if i == 0:
py.std.warnings.warn("is deprecated", DeprecationWarning,
stacklevel=1)
return 42
def dep_explicit(self, i):
if i == 0:
py.std.warnings.warn_explicit("dep_explicit", category=DeprecationWarning,
filename="hello", lineno=3)
def test_deprecated_call_raises(self):
with pytest.raises(AssertionError) as excinfo:
pytest.deprecated_call(self.dep, 3, 5)
assert str(excinfo).find("did not produce") != -1
def test_deprecated_call(self):
pytest.deprecated_call(self.dep, 0, 5)
def test_deprecated_call_ret(self):
ret = pytest.deprecated_call(self.dep, 0)
assert ret == 42
def test_deprecated_call_preserves(self):
onceregistry = py.std.warnings.onceregistry.copy()
filters = py.std.warnings.filters[:]
warn = py.std.warnings.warn
warn_explicit = py.std.warnings.warn_explicit
self.test_deprecated_call_raises()
self.test_deprecated_call()
assert onceregistry == py.std.warnings.onceregistry
assert filters == py.std.warnings.filters
assert warn is py.std.warnings.warn
assert warn_explicit is py.std.warnings.warn_explicit
def test_deprecated_explicit_call_raises(self):
with pytest.raises(AssertionError):
pytest.deprecated_call(self.dep_explicit, 3)
def test_deprecated_explicit_call(self):
pytest.deprecated_call(self.dep_explicit, 0)
pytest.deprecated_call(self.dep_explicit, 0)
def test_deprecated_call_as_context_manager_no_warning(self):
with pytest.raises(pytest.fail.Exception) as ex:
with pytest.deprecated_call():
self.dep(1)
assert str(ex.value) == "DID NOT WARN"
def test_deprecated_call_as_context_manager(self):
with pytest.deprecated_call():
self.dep(0)
def test_deprecated_call_pending(self):
def f():
py.std.warnings.warn(PendingDeprecationWarning("hi"))
pytest.deprecated_call(f)
def test_deprecated_call_specificity(self):
other_warnings = [Warning, UserWarning, SyntaxWarning, RuntimeWarning,
FutureWarning, ImportWarning, UnicodeWarning]
for warning in other_warnings:
def f():
py.std.warnings.warn(warning("hi"))
with pytest.raises(AssertionError):
pytest.deprecated_call(f)
def test_deprecated_function_already_called(self, testdir):
"""deprecated_call should be able to catch a call to a deprecated
function even if that function has already been called in the same
module. See #1190.
"""
testdir.makepyfile("""
import warnings
import pytest
def deprecated_function():
warnings.warn("deprecated", DeprecationWarning)
def test_one():
deprecated_function()
def test_two():
pytest.deprecated_call(deprecated_function)
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines('*=== 2 passed in *===')
class TestWarns(object):
def test_strings(self):
# different messages, b/c Python suppresses multiple identical warnings
source1 = "warnings.warn('w1', RuntimeWarning)"
source2 = "warnings.warn('w2', RuntimeWarning)"
source3 = "warnings.warn('w3', RuntimeWarning)"
pytest.warns(RuntimeWarning, source1)
pytest.raises(pytest.fail.Exception,
lambda: pytest.warns(UserWarning, source2))
pytest.warns(RuntimeWarning, source3)
def test_function(self):
pytest.warns(SyntaxWarning,
lambda msg: warnings.warn(msg, SyntaxWarning), "syntax")
def test_warning_tuple(self):
pytest.warns((RuntimeWarning, SyntaxWarning),
lambda: warnings.warn('w1', RuntimeWarning))
pytest.warns((RuntimeWarning, SyntaxWarning),
lambda: warnings.warn('w2', SyntaxWarning))
pytest.raises(pytest.fail.Exception,
lambda: pytest.warns(
(RuntimeWarning, SyntaxWarning),
lambda: warnings.warn('w3', UserWarning)))
def test_as_contextmanager(self):
with pytest.warns(RuntimeWarning):
warnings.warn("runtime", RuntimeWarning)
with pytest.raises(pytest.fail.Exception):
with pytest.warns(RuntimeWarning):
warnings.warn("user", UserWarning)
with pytest.raises(pytest.fail.Exception):
with pytest.warns(UserWarning):
warnings.warn("runtime", RuntimeWarning)
with pytest.warns(UserWarning):
warnings.warn("user", UserWarning)
def test_record(self):
with pytest.warns(UserWarning) as record:
warnings.warn("user", UserWarning)
assert len(record) == 1
assert str(record[0].message) == "user"
def test_record_only(self):
with pytest.warns(None) as record:
warnings.warn("user", UserWarning)
warnings.warn("runtime", RuntimeWarning)
assert len(record) == 2
assert str(record[0].message) == "user"
assert str(record[1].message) == "runtime"
def test_double_test(self, testdir):
"""If a test is run again, the warning should still be raised"""
testdir.makepyfile('''
import pytest
import warnings
@pytest.mark.parametrize('run', [1, 2])
def test(run):
with pytest.warns(RuntimeWarning):
warnings.warn("runtime", RuntimeWarning)
''')
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*2 passed in*'])
| mpl-2.0 |
seankelly/buildbot | master/buildbot/steps/package/rpm/mock.py | 11 | 5749 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Portions Copyright Buildbot Team Members
# Portions Copyright Marius Rieder <[email protected]>
"""
Steps and objects related to mock building.
"""
from __future__ import absolute_import
from __future__ import print_function
import re
from buildbot import config
from buildbot.process import logobserver
from buildbot.process import remotecommand
from buildbot.steps.shell import ShellCommand
class MockStateObserver(logobserver.LogLineObserver):
_line_re = re.compile(r'^.*State Changed: (.*)$')
def outLineReceived(self, line):
m = self._line_re.search(line.strip())
if m:
state = m.group(1)
if not state == 'end':
self.step.descriptionSuffix = ["[%s]" % m.group(1)]
else:
self.step.descriptionSuffix = None
self.step.step_status.setText(self.step.describe(False))
class Mock(ShellCommand):
"""Add the mock logfiles and clean them if they already exist. Add support
for the root and resultdir parameter of mock."""
name = "mock"
renderables = ["root", "resultdir"]
haltOnFailure = 1
flunkOnFailure = 1
mock_logfiles = ['build.log', 'root.log', 'state.log']
root = None
resultdir = None
def __init__(self,
root=None,
resultdir=None,
**kwargs):
"""
Creates the Mock object.
@type root: str
@param root: the name of the mock buildroot
@type resultdir: str
@param resultdir: the path of the result dir
@type kwargs: dict
@param kwargs: All further keyword arguments.
"""
ShellCommand.__init__(self, **kwargs)
if root:
self.root = root
if resultdir:
self.resultdir = resultdir
if not self.root:
config.error("You must specify a mock root")
self.command = ['mock', '--root', self.root]
if self.resultdir:
self.command += ['--resultdir', self.resultdir]
def start(self):
"""
Try to remove the old mock logs first.
"""
if self.resultdir:
for lname in self.mock_logfiles:
self.logfiles[lname] = self.build.path_module.join(self.resultdir,
lname)
else:
for lname in self.mock_logfiles:
self.logfiles[lname] = lname
self.addLogObserver('state.log', MockStateObserver())
cmd = remotecommand.RemoteCommand('rmdir', {'dir':
[self.build.path_module.join('build', self.logfiles[l])
for l in self.mock_logfiles]})
d = self.runCommand(cmd)
@d.addCallback
def removeDone(cmd):
ShellCommand.start(self)
d.addErrback(self.failed)
class MockBuildSRPM(Mock):
"""Build a srpm within a mock. Requires a spec file and a sources dir."""
name = "mockbuildsrpm"
description = ["mock buildsrpm"]
descriptionDone = ["mock buildsrpm"]
spec = None
sources = '.'
def __init__(self,
spec=None,
sources=None,
**kwargs):
"""
Creates the MockBuildSRPM object.
@type spec: str
@param spec: the path of the specfiles.
@type sources: str
@param sources: the path of the sources dir.
@type kwargs: dict
@param kwargs: All further keyword arguments.
"""
Mock.__init__(self, **kwargs)
if spec:
self.spec = spec
if sources:
self.sources = sources
if not self.spec:
config.error("You must specify a spec file")
if not self.sources:
config.error("You must specify a sources dir")
self.command += ['--buildsrpm', '--spec', self.spec,
'--sources', self.sources]
self.addLogObserver(
'stdio', logobserver.LineConsumerLogObserver(self.logConsumer))
def logConsumer(self):
r = re.compile(r"Wrote: .*/([^/]*.src.rpm)")
while True:
stream, line = yield
m = r.search(line)
if m:
self.setProperty("srpm", m.group(1), 'MockBuildSRPM')
class MockRebuild(Mock):
"""Rebuild a srpm within a mock. Requires a srpm file."""
name = "mock"
description = ["mock rebuilding srpm"]
descriptionDone = ["mock rebuild srpm"]
srpm = None
def __init__(self, srpm=None, **kwargs):
"""
Creates the MockRebuildRPM object.
@type srpm: str
@param srpm: the path of the srpm file.
@type kwargs: dict
@param kwargs: All further keyword arguments.
"""
Mock.__init__(self, **kwargs)
if srpm:
self.srpm = srpm
if not self.srpm:
config.error("You must specify a srpm")
self.command += ['--rebuild', self.srpm]
| gpl-2.0 |
vipulroxx/sympy | sympy/series/tests/test_order.py | 6 | 13941 | from sympy import (Symbol, Rational, Order, exp, ln, log, nan, oo, O, pi, I,
S, Integral, sin, cos, sqrt, conjugate, expand, transpose, symbols,
Function)
from sympy.utilities.pytest import raises
from sympy.abc import w, x, y, z
def test_caching_bug():
#needs to be a first test, so that all caches are clean
#cache it
e = O(w)
#and test that this won't raise an exception
O(w**(-1/x/log(3)*log(5)), w)
def test_free_symbols():
assert Order(1).free_symbols == set()
assert Order(x).free_symbols == set([x])
assert Order(1, x).free_symbols == set([x])
assert Order(x*y).free_symbols == set([x, y])
assert Order(x, x, y).free_symbols == set([x, y])
def test_simple_1():
o = Rational(0)
assert Order(2*x) == Order(x)
assert Order(x)*3 == Order(x)
assert -28*Order(x) == Order(x)
assert Order(Order(x)) == Order(x)
assert Order(Order(x), y) == Order(Order(x), x, y)
assert Order(-23) == Order(1)
assert Order(exp(x)) == Order(1, x)
assert Order(exp(1/x)).expr == exp(1/x)
assert Order(x*exp(1/x)).expr == x*exp(1/x)
assert Order(x**(o/3)).expr == x**(o/3)
assert Order(x**(5*o/3)).expr == x**(5*o/3)
assert Order(x**2 + x + y, x) == O(1, x)
assert Order(x**2 + x + y, y) == O(1, y)
raises(ValueError, lambda: Order(exp(x), x, x))
raises(TypeError, lambda: Order(x, 2 - x))
def test_simple_2():
assert Order(2*x)*x == Order(x**2)
assert Order(2*x)/x == Order(1, x)
assert Order(2*x)*x*exp(1/x) == Order(x**2*exp(1/x))
assert (Order(2*x)*x*exp(1/x)/ln(x)**3).expr == x**2*exp(1/x)*ln(x)**-3
def test_simple_3():
assert Order(x) + x == Order(x)
assert Order(x) + 2 == 2 + Order(x)
assert Order(x) + x**2 == Order(x)
assert Order(x) + 1/x == 1/x + Order(x)
assert Order(1/x) + 1/x**2 == 1/x**2 + Order(1/x)
assert Order(x) + exp(1/x) == Order(x) + exp(1/x)
def test_simple_4():
assert Order(x)**2 == Order(x**2)
def test_simple_5():
assert Order(x) + Order(x**2) == Order(x)
assert Order(x) + Order(x**-2) == Order(x**-2)
assert Order(x) + Order(1/x) == Order(1/x)
def test_simple_6():
assert Order(x) - Order(x) == Order(x)
assert Order(x) + Order(1) == Order(1)
assert Order(x) + Order(x**2) == Order(x)
assert Order(1/x) + Order(1) == Order(1/x)
assert Order(x) + Order(exp(1/x)) == Order(exp(1/x))
assert Order(x**3) + Order(exp(2/x)) == Order(exp(2/x))
assert Order(x**-3) + Order(exp(2/x)) == Order(exp(2/x))
def test_simple_7():
assert 1 + O(1) == O(1)
assert 2 + O(1) == O(1)
assert x + O(1) == O(1)
assert 1/x + O(1) == 1/x + O(1)
def test_simple_8():
assert O(sqrt(-x)) == O(sqrt(x))
assert O(x**2*sqrt(x)) == O(x**(S(5)/2))
assert O(x**3*sqrt(-(-x)**3)) == O(x**(S(9)/2))
assert O(x**(S(3)/2)*sqrt((-x)**3)) == O(x**3)
assert O(x*(-2*x)**(I/2)) == O(x*(-x)**(I/2))
def test_as_expr_variables():
assert Order(x).as_expr_variables(None) == (x, ((x, 0),))
assert Order(x).as_expr_variables((((x, 0),))) == (x, ((x, 0),))
assert Order(y).as_expr_variables(((x, 0),)) == (y, ((x, 0), (y, 0)))
assert Order(y).as_expr_variables(((x, 0), (y, 0))) == (y, ((x, 0), (y, 0)))
def test_contains_0():
assert Order(1, x).contains(Order(1, x))
assert Order(1, x).contains(Order(1))
assert Order(1).contains(Order(1, x)) is False
def test_contains_1():
assert Order(x).contains(Order(x))
assert Order(x).contains(Order(x**2))
assert not Order(x**2).contains(Order(x))
assert not Order(x).contains(Order(1/x))
assert not Order(1/x).contains(Order(exp(1/x)))
assert not Order(x).contains(Order(exp(1/x)))
assert Order(1/x).contains(Order(x))
assert Order(exp(1/x)).contains(Order(x))
assert Order(exp(1/x)).contains(Order(1/x))
assert Order(exp(1/x)).contains(Order(exp(1/x)))
assert Order(exp(2/x)).contains(Order(exp(1/x)))
assert not Order(exp(1/x)).contains(Order(exp(2/x)))
def test_contains_2():
assert Order(x).contains(Order(y)) is None
assert Order(x).contains(Order(y*x))
assert Order(y*x).contains(Order(x))
assert Order(y).contains(Order(x*y))
assert Order(x).contains(Order(y**2*x))
def test_contains_3():
assert Order(x*y**2).contains(Order(x**2*y)) is None
assert Order(x**2*y).contains(Order(x*y**2)) is None
def test_contains_4():
assert Order(sin(1/x**2)).contains(Order(cos(1/x**2))) is None
assert Order(cos(1/x**2)).contains(Order(sin(1/x**2))) is None
def test_contains():
assert Order(1, x) not in Order(1)
assert Order(1) in Order(1, x)
raises(TypeError, lambda: Order(x*y**2) in Order(x**2*y))
def test_add_1():
assert Order(x + x) == Order(x)
assert Order(3*x - 2*x**2) == Order(x)
assert Order(1 + x) == Order(1, x)
assert Order(1 + 1/x) == Order(1/x)
assert Order(ln(x) + 1/ln(x)) == Order(ln(x))
assert Order(exp(1/x) + x) == Order(exp(1/x))
assert Order(exp(1/x) + 1/x**20) == Order(exp(1/x))
def test_ln_args():
assert O(log(x)) + O(log(2*x)) == O(log(x))
assert O(log(x)) + O(log(x**3)) == O(log(x))
assert O(log(x*y)) + O(log(x) + log(y)) == O(log(x*y))
def test_multivar_0():
assert Order(x*y).expr == x*y
assert Order(x*y**2).expr == x*y**2
assert Order(x*y, x).expr == x
assert Order(x*y**2, y).expr == y**2
assert Order(x*y*z).expr == x*y*z
assert Order(x/y).expr == x/y
assert Order(x*exp(1/y)).expr == x*exp(1/y)
assert Order(exp(x)*exp(1/y)).expr == exp(1/y)
def test_multivar_0a():
assert Order(exp(1/x)*exp(1/y)).expr == exp(1/x + 1/y)
def test_multivar_1():
assert Order(x + y).expr == x + y
assert Order(x + 2*y).expr == x + y
assert (Order(x + y) + x).expr == (x + y)
assert (Order(x + y) + x**2) == Order(x + y)
assert (Order(x + y) + 1/x) == 1/x + Order(x + y)
assert Order(x**2 + y*x).expr == x**2 + y*x
def test_multivar_2():
assert Order(x**2*y + y**2*x, x, y).expr == x**2*y + y**2*x
def test_multivar_mul_1():
assert Order(x + y)*x == Order(x**2 + y*x, x, y)
def test_multivar_3():
assert (Order(x) + Order(y)).args in [
(Order(x), Order(y)),
(Order(y), Order(x))]
assert Order(x) + Order(y) + Order(x + y) == Order(x + y)
assert (Order(x**2*y) + Order(y**2*x)).args in [
(Order(x*y**2), Order(y*x**2)),
(Order(y*x**2), Order(x*y**2))]
assert (Order(x**2*y) + Order(y*x)) == Order(x*y)
def test_issue_3468():
y = Symbol('y', negative=True)
z = Symbol('z', complex=True)
# check that Order does not modify assumptions about symbols
Order(x)
Order(y)
Order(z)
assert x.is_positive is None
assert y.is_positive is False
assert z.is_positive is None
def test_leading_order():
assert (x + 1 + 1/x**5).extract_leading_order(x) == ((1/x**5, O(1/x**5)),)
assert (1 + 1/x).extract_leading_order(x) == ((1/x, O(1/x)),)
assert (1 + x).extract_leading_order(x) == ((1, O(1, x)),)
assert (1 + x**2).extract_leading_order(x) == ((1, O(1, x)),)
assert (2 + x**2).extract_leading_order(x) == ((2, O(1, x)),)
assert (x + x**2).extract_leading_order(x) == ((x, O(x)),)
def test_leading_order2():
assert set((2 + pi + x**2).extract_leading_order(x)) == set(((pi, O(1, x)),
(S(2), O(1, x))))
assert set((2*x + pi*x + x**2).extract_leading_order(x)) == set(((2*x, O(x)),
(x*pi, O(x))))
def test_order_leadterm():
assert O(x**2)._eval_as_leading_term(x) == O(x**2)
def test_order_symbols():
e = x*y*sin(x)*Integral(x, (x, 1, 2))
assert O(e) == O(x**2*y, x, y)
assert O(e, x) == O(x**2)
def test_nan():
assert O(nan) == nan
assert not O(x).contains(nan)
def test_O1():
assert O(1, x) * x == O(x)
assert O(1, y) * x == O(1, y)
def test_getn():
# other lines are tested incidentally by the suite
assert O(x).getn() == 1
assert O(x/log(x)).getn() == 1
assert O(x**2/log(x)**2).getn() == 2
assert O(x*log(x)).getn() == 1
raises(NotImplementedError, lambda: (O(x) + O(y)).getn())
def test_diff():
assert O(x**2).diff(x) == O(x)
def test_getO():
assert (x).getO() is None
assert (x).removeO() == x
assert (O(x)).getO() == O(x)
assert (O(x)).removeO() == 0
assert (z + O(x) + O(y)).getO() == O(x) + O(y)
assert (z + O(x) + O(y)).removeO() == z
raises(NotImplementedError, lambda: (O(x) + O(y)).getn())
def test_leading_term():
from sympy import digamma
assert O(1/digamma(1/x)) == O(1/log(x))
def test_eval():
assert Order(x).subs(Order(x), 1) == 1
assert Order(x).subs(x, y) == Order(y)
assert Order(x).subs(y, x) == Order(x)
assert Order(x).subs(x, x + y) == Order(x + y, (x, -y))
assert (O(1)**x).is_Pow
def test_issue_4279():
a, b = symbols('a b')
assert O(a, a, b) + O(1, a, b) == O(1, a, b)
assert O(b, a, b) + O(1, a, b) == O(1, a, b)
assert O(a + b, a, b) + O(1, a, b) == O(1, a, b)
assert O(1, a, b) + O(a, a, b) == O(1, a, b)
assert O(1, a, b) + O(b, a, b) == O(1, a, b)
assert O(1, a, b) + O(a + b, a, b) == O(1, a, b)
def test_issue_4855():
assert 1/O(1) != O(1)
assert 1/O(x) != O(1/x)
assert 1/O(x, (x, oo)) != O(1/x, (x, oo))
f = Function('f')
assert 1/O(f(x)) != O(1/x)
def test_order_conjugate_transpose():
x = Symbol('x', real=True)
y = Symbol('y', imaginary=True)
assert conjugate(Order(x)) == Order(conjugate(x))
assert conjugate(Order(y)) == Order(conjugate(y))
assert conjugate(Order(x**2)) == Order(conjugate(x)**2)
assert conjugate(Order(y**2)) == Order(conjugate(y)**2)
assert transpose(Order(x)) == Order(transpose(x))
assert transpose(Order(y)) == Order(transpose(y))
assert transpose(Order(x**2)) == Order(transpose(x)**2)
assert transpose(Order(y**2)) == Order(transpose(y)**2)
def test_order_noncommutative():
A = Symbol('A', commutative=False)
assert Order(A + A*x, x) == Order(1, x)
assert (A + A*x)*Order(x) == Order(x)
assert (A*x)*Order(x) == Order(x**2, x)
assert expand((1 + Order(x))*A*A*x) == A*A*x + Order(x**2, x)
assert expand((A*A + Order(x))*x) == A*A*x + Order(x**2, x)
assert expand((A + Order(x))*A*x) == A*A*x + Order(x**2, x)
def test_issue_6753():
assert (1 + x**2)**10000*O(x) == O(x)
def test_order_at_infinity():
assert Order(1 + x, (x, oo)) == Order(x, (x, oo))
assert Order(3*x, (x, oo)) == Order(x, (x, oo))
assert Order(x, (x, oo))*3 == Order(x, (x, oo))
assert -28*Order(x, (x, oo)) == Order(x, (x, oo))
assert Order(Order(x, (x, oo)), (x, oo)) == Order(x, (x, oo))
assert Order(Order(x, (x, oo)), (y, oo)) == Order(x, (x, oo), (y, oo))
assert Order(3, (x, oo)) == Order(1, (x, oo))
assert Order(x**2 + x + y, (x, oo)) == O(x**2, (x, oo))
assert Order(x**2 + x + y, (y, oo)) == O(y, (y, oo))
assert Order(2*x, (x, oo))*x == Order(x**2, (x, oo))
assert Order(2*x, (x, oo))/x == Order(1, (x, oo))
assert Order(2*x, (x, oo))*x*exp(1/x) == Order(x**2*exp(1/x), (x, oo))
assert Order(2*x, (x, oo))*x*exp(1/x)/ln(x)**3 == Order(x**2*exp(1/x)*ln(x)**-3, (x, oo))
assert Order(x, (x, oo)) + 1/x == 1/x + Order(x, (x, oo)) == Order(x, (x, oo))
assert Order(x, (x, oo)) + 1 == 1 + Order(x, (x, oo)) == Order(x, (x, oo))
assert Order(x, (x, oo)) + x == x + Order(x, (x, oo)) == Order(x, (x, oo))
assert Order(x, (x, oo)) + x**2 == x**2 + Order(x, (x, oo))
assert Order(1/x, (x, oo)) + 1/x**2 == 1/x**2 + Order(1/x, (x, oo)) == Order(1/x, (x, oo))
assert Order(x, (x, oo)) + exp(1/x) == exp(1/x) + Order(x, (x, oo))
assert Order(x, (x, oo))**2 == Order(x**2, (x, oo))
assert Order(x, (x, oo)) + Order(x**2, (x, oo)) == Order(x**2, (x, oo))
assert Order(x, (x, oo)) + Order(x**-2, (x, oo)) == Order(x, (x, oo))
assert Order(x, (x, oo)) + Order(1/x, (x, oo)) == Order(x, (x, oo))
assert Order(x, (x, oo)) - Order(x, (x, oo)) == Order(x, (x, oo))
assert Order(x, (x, oo)) + Order(1, (x, oo)) == Order(x, (x, oo))
assert Order(x, (x, oo)) + Order(x**2, (x, oo)) == Order(x**2, (x, oo))
assert Order(1/x, (x, oo)) + Order(1, (x, oo)) == Order(1, (x, oo))
assert Order(x, (x, oo)) + Order(exp(1/x), (x, oo)) == Order(x, (x, oo))
assert Order(x**3, (x, oo)) + Order(exp(2/x), (x, oo)) == Order(x**3, (x, oo))
assert Order(x**-3, (x, oo)) + Order(exp(2/x), (x, oo)) == Order(exp(2/x), (x, oo))
# issue 7207
assert Order(exp(x), (x, oo)).expr == Order(2*exp(x), (x, oo)).expr == exp(x)
assert Order(y**x, (x, oo)).expr == Order(2*y**x, (x, oo)).expr == exp(log(y)*x)
def test_mixing_order_at_zero_and_infinity():
assert (Order(x, (x, 0)) + Order(x, (x, oo))).is_Add
assert Order(x, (x, 0)) + Order(x, (x, oo)) == Order(x, (x, oo)) + Order(x, (x, 0))
assert Order(Order(x, (x, oo))) == Order(x, (x, oo))
# not supported (yet)
raises(NotImplementedError, lambda: Order(x, (x, 0))*Order(x, (x, oo)))
raises(NotImplementedError, lambda: Order(x, (x, oo))*Order(x, (x, 0)))
raises(NotImplementedError, lambda: Order(Order(x, (x, oo)), y))
raises(NotImplementedError, lambda: Order(Order(x), (x, oo)))
def test_order_at_some_point():
assert Order(x, (x, 1)) == Order(1, (x, 1))
assert Order(2*x - 2, (x, 1)) == Order(x - 1, (x, 1))
assert Order(-x + 1, (x, 1)) == Order(x - 1, (x, 1))
assert Order(x - 1, (x, 1))**2 == Order((x - 1)**2, (x, 1))
assert Order(x - 2, (x, 2)) - O(x - 2, (x, 2)) == Order(x - 2, (x, 2))
def test_order_subs_limits():
# issue 3333
assert (1 + Order(x)).subs(x, 1/x) == 1 + Order(1/x, (x, oo))
assert (1 + Order(x)).limit(x, 0) == 1
# issue 5769
assert ((x + Order(x**2))/x).limit(x, 0) == 1
assert Order(x**2).subs(x, y - 1) == Order((y - 1)**2, (y, 1))
assert Order(10*x**2, (x, 2)).subs(x, y - 1) == Order(1, (y, 3))
| bsd-3-clause |
jrbl/invenio | modules/websearch/lib/search_engine_tests.py | 2 | 12267 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Unit tests for the search engine."""
__revision__ = \
"$Id$"
import unittest
from invenio import search_engine
from invenio.testutils import make_test_suite, run_test_suite
class TestMiscUtilityFunctions(unittest.TestCase):
"""Test whatever non-data-specific utility functions are essential."""
def test_ziplist2x2(self):
"""search engine - ziplist 2 x 2"""
self.assertEqual(search_engine.ziplist([1, 2], [3, 4]), [[1, 3], [2, 4]])
def test_ziplist3x3(self):
"""search engine - ziplist 3 x 3"""
self.assertEqual(search_engine.ziplist([1, 2, 3], ['a', 'b', 'c'], [9, 8, 7]),
[[1, 'a', 9], [2, 'b', 8], [3, 'c', 7]])
class TestWashQueryParameters(unittest.TestCase):
"""Test for washing of search query parameters."""
def test_wash_pattern(self):
"""search engine - washing of query patterns"""
self.assertEqual("Ellis, J", search_engine.wash_pattern('Ellis, J'))
#self.assertEqual("ell", search_engine.wash_pattern('ell*'))
def test_wash_dates_from_tuples(self):
"""search engine - washing of date arguments from (year,month,day) tuples"""
self.assertEqual(search_engine.wash_dates(d1y=1980, d1m=1, d1d=28, d2y=2003, d2m=2, d2d=3),
('1980-01-28 00:00:00', '2003-02-03 00:00:00'))
self.assertEqual(search_engine.wash_dates(d1y=1980, d1m=0, d1d=28, d2y=2003, d2m=2, d2d=0),
('1980-01-28 00:00:00', '2003-02-31 00:00:00'))
def test_wash_dates_from_datetexts(self):
"""search engine - washing of date arguments from datetext strings"""
self.assertEqual(search_engine.wash_dates(d1="1980-01-28 01:02:03", d2="1980-01-29 12:34:56"),
('1980-01-28 01:02:03', '1980-01-29 12:34:56'))
self.assertEqual(search_engine.wash_dates(d1="1980-01-28 01:02:03"),
('1980-01-28 01:02:03', '9999-12-31 00:00:00'))
self.assertEqual(search_engine.wash_dates(d2="1980-01-29 12:34:56"),
('0000-01-01 00:00:00', '1980-01-29 12:34:56'))
def test_wash_dates_from_both(self):
"""search engine - washing of date arguments from both datetext strings and (year,month,day) tuples"""
# datetext mode takes precedence, d1* should be ignored
self.assertEqual(search_engine.wash_dates(d1="1980-01-28 01:02:03", d1y=1980, d1m=1, d1d=28),
('1980-01-28 01:02:03', '9999-12-31 00:00:00'))
# datetext mode takes precedence, d2 missing, d2* should be ignored
self.assertEqual(search_engine.wash_dates(d1="1980-01-28 01:02:03", d2y=2003, d2m=2, d2d=3),
('1980-01-28 01:02:03', '2003-02-03 00:00:00'))
class TestQueryParser(unittest.TestCase):
"""Test of search pattern (or query) parser."""
def _check(self, p, f, m, result_wanted):
"Internal checking function calling create_basic_search_units."
result_obtained = search_engine.create_basic_search_units(None, p, f, m)
assert result_obtained == result_wanted, \
'obtained %s instead of %s' % (repr(result_obtained),
repr(result_wanted))
return
def test_parsing_single_word_query(self):
"search engine - parsing single word queries"
self._check('word', '', None, [['+', 'word', '', 'w']])
def test_parsing_single_word_with_boolean_operators(self):
"search engine - parsing single word queries"
self._check('+word', '', None, [['+', 'word', '', 'w']])
self._check('-word', '', None, [['-', 'word', '', 'w']])
self._check('|word', '', None, [['|', 'word', '', 'w']])
def test_parsing_single_word_in_field(self):
"search engine - parsing single word queries in a logical field"
self._check('word', 'title', None, [['+', 'word', 'title', 'w']])
def test_parsing_single_word_in_tag(self):
"search engine - parsing single word queries in a physical tag"
self._check('word', '500', None, [['+', 'word', '500', 'a']])
def test_parsing_query_with_commas(self):
"search engine - parsing queries with commas"
self._check('word,word', 'title', None,
[['+', 'word,word', 'title', 'a']])
def test_parsing_exact_phrase_query(self):
"search engine - parsing exact phrase"
self._check('"the word"', 'title', None,
[['+', 'the word', 'title', 'a']])
def test_parsing_exact_phrase_query_unbalanced(self):
"search engine - parsing unbalanced exact phrase"
self._check('"the word', 'title', None,
[['+', '"the', 'title', 'w'],
['+', 'word', 'title', 'w']])
def test_parsing_exact_phrase_query_in_any_field(self):
"search engine - parsing exact phrase in any field"
self._check('"the word"', '', None,
[['+', 'the word', '', 'a']])
def test_parsing_partial_phrase_query(self):
"search engine - parsing partial phrase"
self._check("'the word'", 'title', None,
[['+', '%the word%', 'title', 'a']])
def test_parsing_partial_phrase_query_unbalanced(self):
"search engine - parsing unbalanced partial phrase"
self._check("'the word", 'title', None,
[['+', "'the", 'title', 'w'],
['+', "word", 'title', 'w']])
def test_parsing_partial_phrase_query_in_any_field(self):
"search engine - parsing partial phrase in any field"
self._check("'the word'", '', None,
[['+', '%the word%', '', 'a']])
def test_parsing_regexp_query(self):
"search engine - parsing regex matches"
self._check("/the word/", 'title', None,
[['+', 'the word', 'title', 'r']])
def test_parsing_regexp_query_unbalanced(self):
"search engine - parsing unbalanced regexp"
self._check("/the word", 'title', None,
[['+', '/the', 'title', 'w'],
['+', 'word', 'title', 'w']])
def test_parsing_regexp_query_in_any_field(self):
"search engine - parsing regexp searches in any field"
self._check("/the word/", '', None,
[['+', 'the word', '', 'r']])
def test_parsing_boolean_query(self):
"search engine - parsing boolean query with several words"
self._check("muon kaon ellis cern", '', None,
[['+', 'muon', '', 'w'],
['+', 'kaon', '', 'w'],
['+', 'ellis', '', 'w'],
['+', 'cern', '', 'w']])
def test_parsing_boolean_query_with_word_operators(self):
"search engine - parsing boolean query with word operators"
self._check("muon and kaon or ellis not cern", '', None,
[['+', 'muon', '', 'w'],
['+', 'kaon', '', 'w'],
['|', 'ellis', '', 'w'],
['-', 'cern', '', 'w']])
def test_parsing_boolean_query_with_symbol_operators(self):
"search engine - parsing boolean query with symbol operators"
self._check("muon +kaon |ellis -cern", '', None,
[['+', 'muon', '', 'w'],
['+', 'kaon', '', 'w'],
['|', 'ellis', '', 'w'],
['-', 'cern', '', 'w']])
def test_parsing_boolean_query_with_symbol_operators_and_spaces(self):
"search engine - parsing boolean query with operators and spaces"
self._check("muon + kaon | ellis - cern", '', None,
[['+', 'muon', '', 'w'],
['+', 'kaon', '', 'w'],
['|', 'ellis', '', 'w'],
['-', 'cern', '', 'w']])
def test_parsing_boolean_query_with_symbol_operators_and_no_spaces(self):
"search engine - parsing boolean query with operators and no spaces"
self._check("muon+kaon|ellis-cern", '', None,
[['+', 'muon+kaon|ellis-cern', '', 'w']])
def test_parsing_structured_query_existing(self):
"search engine - parsing structured query, existing index"
self._check("title:muon", '', None,
[['+', 'muon', 'title', 'w']])
def test_parsing_structured_query_existing_field(self):
"search engine - parsing structured query, existing field, but no word index"
self._check("division:IT", '', None,
[['+', 'IT', 'division', 'a']])
def test_parsing_structured_query_nonexisting(self):
"search engine - parsing structured query, non-existing index"
self._check("foo:muon", '', None,
[['+', 'foo:muon', '', 'w']])
def test_parsing_structured_query_marc(self):
"search engine - parsing structured query, MARC-tag defined index"
self._check("245:muon", '', None,
[['+', 'muon', '245', 'a']])
def test_parsing_combined_structured_query(self):
"search engine - parsing combined structured query"
self._check("title:muon author:ellis", '', None,
[['+', 'muon', 'title', 'w'],
['+', 'ellis', 'author', 'w']])
def test_parsing_structured_regexp_query(self):
"search engine - parsing structured regexp query"
self._check("title:/(one|two)/", '', None,
[['+', '(one|two)', 'title', 'r']])
def test_parsing_structured_regexp_marc_query(self):
"search engine - parsing structured regexp MARC query"
self._check("245__a:/(one|two)/", '', None,
[['+', '(one|two)', '245__a', 'r']])
def test_parsing_structured_regexp_refersto_query(self):
"search engine - parsing structured regexp refersto query"
self._check("refersto:/(one|two)/", '', None,
[['+', '(one|two)', 'refersto', 'r']])
def test_parsing_combined_structured_query_in_a_field(self):
"search engine - parsing structured query in a field"
self._check("title:muon author:ellis", 'abstract', None,
[['+', 'muon', 'title', 'w'],
['+', 'ellis', 'author', 'w']])
def test_parsing_colons_and_spaces_well_struuctured(self):
"search engine - parsing query with colons and spaces, well structured"
self._check("title: muon author:ellis keyword: kaon", 'abstract', None,
[['+', 'muon', 'title', 'w'],
['+', 'ellis', 'author', 'w'],
['+', 'kaon', 'keyword', 'w']])
def test_parsing_colons_and_spaces_badly_struuctured(self):
"search engine - parsing query with colons and spaces, badly structured"
self._check("foo: bar", 'abstract', None,
[['+', 'bar', 'abstract', 'w'],
['+', 'foo:', 'abstract', 'w']])
def test_parsing_colons_and_spaces_for_phrase_query(self):
"search engine - parsing query with colons and spaces, phrase query"
self._check('author: "Ellis, J"', None, None,
[['+', 'Ellis, J', 'author', 'a']])
TEST_SUITE = make_test_suite(TestWashQueryParameters,
TestQueryParser,
TestMiscUtilityFunctions)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
kawamon/hue | desktop/core/ext-py/retry-decorator-1.0.0/retry_decorator/retry_decorator.py | 36 | 1190 | #!/usr/bin/env python
from __future__ import print_function
import traceback
import logging
import time
import random
import sys
def retry(ExceptionToCheck, tries=10, timeout_secs=1.0, logger=None):
"""
Retry calling the decorated function using an exponential backoff.
"""
def deco_retry(f):
def f_retry(*args, **kwargs):
mtries, mdelay = tries, timeout_secs
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
#traceback.print_exc()
half_interval = mdelay * 0.10 #interval size
actual_delay = random.uniform(mdelay - half_interval, mdelay + half_interval)
msg = "Retrying in %.2f seconds ..." % actual_delay
if logger is None:
logging.exception(msg)
else:
logger.exception(msg)
time.sleep(actual_delay)
mtries -= 1
mdelay *= 2
return f(*args, **kwargs)
return f_retry # true decorator
return deco_retry
| apache-2.0 |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.6.0/Lib/encodings/cp855.py | 272 | 33850 | """ Python Character Mapping Codec generated from 'VENDORS/MICSFT/PC/CP855.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp855',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0452, # CYRILLIC SMALL LETTER DJE
0x0081: 0x0402, # CYRILLIC CAPITAL LETTER DJE
0x0082: 0x0453, # CYRILLIC SMALL LETTER GJE
0x0083: 0x0403, # CYRILLIC CAPITAL LETTER GJE
0x0084: 0x0451, # CYRILLIC SMALL LETTER IO
0x0085: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x0086: 0x0454, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0087: 0x0404, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0088: 0x0455, # CYRILLIC SMALL LETTER DZE
0x0089: 0x0405, # CYRILLIC CAPITAL LETTER DZE
0x008a: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x008b: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x008c: 0x0457, # CYRILLIC SMALL LETTER YI
0x008d: 0x0407, # CYRILLIC CAPITAL LETTER YI
0x008e: 0x0458, # CYRILLIC SMALL LETTER JE
0x008f: 0x0408, # CYRILLIC CAPITAL LETTER JE
0x0090: 0x0459, # CYRILLIC SMALL LETTER LJE
0x0091: 0x0409, # CYRILLIC CAPITAL LETTER LJE
0x0092: 0x045a, # CYRILLIC SMALL LETTER NJE
0x0093: 0x040a, # CYRILLIC CAPITAL LETTER NJE
0x0094: 0x045b, # CYRILLIC SMALL LETTER TSHE
0x0095: 0x040b, # CYRILLIC CAPITAL LETTER TSHE
0x0096: 0x045c, # CYRILLIC SMALL LETTER KJE
0x0097: 0x040c, # CYRILLIC CAPITAL LETTER KJE
0x0098: 0x045e, # CYRILLIC SMALL LETTER SHORT U
0x0099: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U
0x009a: 0x045f, # CYRILLIC SMALL LETTER DZHE
0x009b: 0x040f, # CYRILLIC CAPITAL LETTER DZHE
0x009c: 0x044e, # CYRILLIC SMALL LETTER YU
0x009d: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x009e: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x009f: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x00a0: 0x0430, # CYRILLIC SMALL LETTER A
0x00a1: 0x0410, # CYRILLIC CAPITAL LETTER A
0x00a2: 0x0431, # CYRILLIC SMALL LETTER BE
0x00a3: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x00a4: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00a5: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x00a6: 0x0434, # CYRILLIC SMALL LETTER DE
0x00a7: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x00a8: 0x0435, # CYRILLIC SMALL LETTER IE
0x00a9: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x00aa: 0x0444, # CYRILLIC SMALL LETTER EF
0x00ab: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x00ac: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00ad: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00b6: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x00b7: 0x0438, # CYRILLIC SMALL LETTER I
0x00b8: 0x0418, # CYRILLIC CAPITAL LETTER I
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00be: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x043a, # CYRILLIC SMALL LETTER KA
0x00c7: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x00a4, # CURRENCY SIGN
0x00d0: 0x043b, # CYRILLIC SMALL LETTER EL
0x00d1: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x00d2: 0x043c, # CYRILLIC SMALL LETTER EM
0x00d3: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x00d4: 0x043d, # CYRILLIC SMALL LETTER EN
0x00d5: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x00d6: 0x043e, # CYRILLIC SMALL LETTER O
0x00d7: 0x041e, # CYRILLIC CAPITAL LETTER O
0x00d8: 0x043f, # CYRILLIC SMALL LETTER PE
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x00de: 0x044f, # CYRILLIC SMALL LETTER YA
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00e1: 0x0440, # CYRILLIC SMALL LETTER ER
0x00e2: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x00e3: 0x0441, # CYRILLIC SMALL LETTER ES
0x00e4: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x00e5: 0x0442, # CYRILLIC SMALL LETTER TE
0x00e6: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x00e7: 0x0443, # CYRILLIC SMALL LETTER U
0x00e8: 0x0423, # CYRILLIC CAPITAL LETTER U
0x00e9: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00ea: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x00eb: 0x0432, # CYRILLIC SMALL LETTER VE
0x00ec: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x00ed: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00ee: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x00ef: 0x2116, # NUMERO SIGN
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00f2: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x00f3: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00f4: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x00f5: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00f6: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x00f7: 0x044d, # CYRILLIC SMALL LETTER E
0x00f8: 0x042d, # CYRILLIC CAPITAL LETTER E
0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00fa: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x00fb: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00fc: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x00fd: 0x00a7, # SECTION SIGN
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\u0452' # 0x0080 -> CYRILLIC SMALL LETTER DJE
'\u0402' # 0x0081 -> CYRILLIC CAPITAL LETTER DJE
'\u0453' # 0x0082 -> CYRILLIC SMALL LETTER GJE
'\u0403' # 0x0083 -> CYRILLIC CAPITAL LETTER GJE
'\u0451' # 0x0084 -> CYRILLIC SMALL LETTER IO
'\u0401' # 0x0085 -> CYRILLIC CAPITAL LETTER IO
'\u0454' # 0x0086 -> CYRILLIC SMALL LETTER UKRAINIAN IE
'\u0404' # 0x0087 -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
'\u0455' # 0x0088 -> CYRILLIC SMALL LETTER DZE
'\u0405' # 0x0089 -> CYRILLIC CAPITAL LETTER DZE
'\u0456' # 0x008a -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0406' # 0x008b -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
'\u0457' # 0x008c -> CYRILLIC SMALL LETTER YI
'\u0407' # 0x008d -> CYRILLIC CAPITAL LETTER YI
'\u0458' # 0x008e -> CYRILLIC SMALL LETTER JE
'\u0408' # 0x008f -> CYRILLIC CAPITAL LETTER JE
'\u0459' # 0x0090 -> CYRILLIC SMALL LETTER LJE
'\u0409' # 0x0091 -> CYRILLIC CAPITAL LETTER LJE
'\u045a' # 0x0092 -> CYRILLIC SMALL LETTER NJE
'\u040a' # 0x0093 -> CYRILLIC CAPITAL LETTER NJE
'\u045b' # 0x0094 -> CYRILLIC SMALL LETTER TSHE
'\u040b' # 0x0095 -> CYRILLIC CAPITAL LETTER TSHE
'\u045c' # 0x0096 -> CYRILLIC SMALL LETTER KJE
'\u040c' # 0x0097 -> CYRILLIC CAPITAL LETTER KJE
'\u045e' # 0x0098 -> CYRILLIC SMALL LETTER SHORT U
'\u040e' # 0x0099 -> CYRILLIC CAPITAL LETTER SHORT U
'\u045f' # 0x009a -> CYRILLIC SMALL LETTER DZHE
'\u040f' # 0x009b -> CYRILLIC CAPITAL LETTER DZHE
'\u044e' # 0x009c -> CYRILLIC SMALL LETTER YU
'\u042e' # 0x009d -> CYRILLIC CAPITAL LETTER YU
'\u044a' # 0x009e -> CYRILLIC SMALL LETTER HARD SIGN
'\u042a' # 0x009f -> CYRILLIC CAPITAL LETTER HARD SIGN
'\u0430' # 0x00a0 -> CYRILLIC SMALL LETTER A
'\u0410' # 0x00a1 -> CYRILLIC CAPITAL LETTER A
'\u0431' # 0x00a2 -> CYRILLIC SMALL LETTER BE
'\u0411' # 0x00a3 -> CYRILLIC CAPITAL LETTER BE
'\u0446' # 0x00a4 -> CYRILLIC SMALL LETTER TSE
'\u0426' # 0x00a5 -> CYRILLIC CAPITAL LETTER TSE
'\u0434' # 0x00a6 -> CYRILLIC SMALL LETTER DE
'\u0414' # 0x00a7 -> CYRILLIC CAPITAL LETTER DE
'\u0435' # 0x00a8 -> CYRILLIC SMALL LETTER IE
'\u0415' # 0x00a9 -> CYRILLIC CAPITAL LETTER IE
'\u0444' # 0x00aa -> CYRILLIC SMALL LETTER EF
'\u0424' # 0x00ab -> CYRILLIC CAPITAL LETTER EF
'\u0433' # 0x00ac -> CYRILLIC SMALL LETTER GHE
'\u0413' # 0x00ad -> CYRILLIC CAPITAL LETTER GHE
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u0445' # 0x00b5 -> CYRILLIC SMALL LETTER HA
'\u0425' # 0x00b6 -> CYRILLIC CAPITAL LETTER HA
'\u0438' # 0x00b7 -> CYRILLIC SMALL LETTER I
'\u0418' # 0x00b8 -> CYRILLIC CAPITAL LETTER I
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u0439' # 0x00bd -> CYRILLIC SMALL LETTER SHORT I
'\u0419' # 0x00be -> CYRILLIC CAPITAL LETTER SHORT I
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u043a' # 0x00c6 -> CYRILLIC SMALL LETTER KA
'\u041a' # 0x00c7 -> CYRILLIC CAPITAL LETTER KA
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\xa4' # 0x00cf -> CURRENCY SIGN
'\u043b' # 0x00d0 -> CYRILLIC SMALL LETTER EL
'\u041b' # 0x00d1 -> CYRILLIC CAPITAL LETTER EL
'\u043c' # 0x00d2 -> CYRILLIC SMALL LETTER EM
'\u041c' # 0x00d3 -> CYRILLIC CAPITAL LETTER EM
'\u043d' # 0x00d4 -> CYRILLIC SMALL LETTER EN
'\u041d' # 0x00d5 -> CYRILLIC CAPITAL LETTER EN
'\u043e' # 0x00d6 -> CYRILLIC SMALL LETTER O
'\u041e' # 0x00d7 -> CYRILLIC CAPITAL LETTER O
'\u043f' # 0x00d8 -> CYRILLIC SMALL LETTER PE
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u041f' # 0x00dd -> CYRILLIC CAPITAL LETTER PE
'\u044f' # 0x00de -> CYRILLIC SMALL LETTER YA
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\u042f' # 0x00e0 -> CYRILLIC CAPITAL LETTER YA
'\u0440' # 0x00e1 -> CYRILLIC SMALL LETTER ER
'\u0420' # 0x00e2 -> CYRILLIC CAPITAL LETTER ER
'\u0441' # 0x00e3 -> CYRILLIC SMALL LETTER ES
'\u0421' # 0x00e4 -> CYRILLIC CAPITAL LETTER ES
'\u0442' # 0x00e5 -> CYRILLIC SMALL LETTER TE
'\u0422' # 0x00e6 -> CYRILLIC CAPITAL LETTER TE
'\u0443' # 0x00e7 -> CYRILLIC SMALL LETTER U
'\u0423' # 0x00e8 -> CYRILLIC CAPITAL LETTER U
'\u0436' # 0x00e9 -> CYRILLIC SMALL LETTER ZHE
'\u0416' # 0x00ea -> CYRILLIC CAPITAL LETTER ZHE
'\u0432' # 0x00eb -> CYRILLIC SMALL LETTER VE
'\u0412' # 0x00ec -> CYRILLIC CAPITAL LETTER VE
'\u044c' # 0x00ed -> CYRILLIC SMALL LETTER SOFT SIGN
'\u042c' # 0x00ee -> CYRILLIC CAPITAL LETTER SOFT SIGN
'\u2116' # 0x00ef -> NUMERO SIGN
'\xad' # 0x00f0 -> SOFT HYPHEN
'\u044b' # 0x00f1 -> CYRILLIC SMALL LETTER YERU
'\u042b' # 0x00f2 -> CYRILLIC CAPITAL LETTER YERU
'\u0437' # 0x00f3 -> CYRILLIC SMALL LETTER ZE
'\u0417' # 0x00f4 -> CYRILLIC CAPITAL LETTER ZE
'\u0448' # 0x00f5 -> CYRILLIC SMALL LETTER SHA
'\u0428' # 0x00f6 -> CYRILLIC CAPITAL LETTER SHA
'\u044d' # 0x00f7 -> CYRILLIC SMALL LETTER E
'\u042d' # 0x00f8 -> CYRILLIC CAPITAL LETTER E
'\u0449' # 0x00f9 -> CYRILLIC SMALL LETTER SHCHA
'\u0429' # 0x00fa -> CYRILLIC CAPITAL LETTER SHCHA
'\u0447' # 0x00fb -> CYRILLIC SMALL LETTER CHE
'\u0427' # 0x00fc -> CYRILLIC CAPITAL LETTER CHE
'\xa7' # 0x00fd -> SECTION SIGN
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a4: 0x00cf, # CURRENCY SIGN
0x00a7: 0x00fd, # SECTION SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ad: 0x00f0, # SOFT HYPHEN
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x0401: 0x0085, # CYRILLIC CAPITAL LETTER IO
0x0402: 0x0081, # CYRILLIC CAPITAL LETTER DJE
0x0403: 0x0083, # CYRILLIC CAPITAL LETTER GJE
0x0404: 0x0087, # CYRILLIC CAPITAL LETTER UKRAINIAN IE
0x0405: 0x0089, # CYRILLIC CAPITAL LETTER DZE
0x0406: 0x008b, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x0407: 0x008d, # CYRILLIC CAPITAL LETTER YI
0x0408: 0x008f, # CYRILLIC CAPITAL LETTER JE
0x0409: 0x0091, # CYRILLIC CAPITAL LETTER LJE
0x040a: 0x0093, # CYRILLIC CAPITAL LETTER NJE
0x040b: 0x0095, # CYRILLIC CAPITAL LETTER TSHE
0x040c: 0x0097, # CYRILLIC CAPITAL LETTER KJE
0x040e: 0x0099, # CYRILLIC CAPITAL LETTER SHORT U
0x040f: 0x009b, # CYRILLIC CAPITAL LETTER DZHE
0x0410: 0x00a1, # CYRILLIC CAPITAL LETTER A
0x0411: 0x00a3, # CYRILLIC CAPITAL LETTER BE
0x0412: 0x00ec, # CYRILLIC CAPITAL LETTER VE
0x0413: 0x00ad, # CYRILLIC CAPITAL LETTER GHE
0x0414: 0x00a7, # CYRILLIC CAPITAL LETTER DE
0x0415: 0x00a9, # CYRILLIC CAPITAL LETTER IE
0x0416: 0x00ea, # CYRILLIC CAPITAL LETTER ZHE
0x0417: 0x00f4, # CYRILLIC CAPITAL LETTER ZE
0x0418: 0x00b8, # CYRILLIC CAPITAL LETTER I
0x0419: 0x00be, # CYRILLIC CAPITAL LETTER SHORT I
0x041a: 0x00c7, # CYRILLIC CAPITAL LETTER KA
0x041b: 0x00d1, # CYRILLIC CAPITAL LETTER EL
0x041c: 0x00d3, # CYRILLIC CAPITAL LETTER EM
0x041d: 0x00d5, # CYRILLIC CAPITAL LETTER EN
0x041e: 0x00d7, # CYRILLIC CAPITAL LETTER O
0x041f: 0x00dd, # CYRILLIC CAPITAL LETTER PE
0x0420: 0x00e2, # CYRILLIC CAPITAL LETTER ER
0x0421: 0x00e4, # CYRILLIC CAPITAL LETTER ES
0x0422: 0x00e6, # CYRILLIC CAPITAL LETTER TE
0x0423: 0x00e8, # CYRILLIC CAPITAL LETTER U
0x0424: 0x00ab, # CYRILLIC CAPITAL LETTER EF
0x0425: 0x00b6, # CYRILLIC CAPITAL LETTER HA
0x0426: 0x00a5, # CYRILLIC CAPITAL LETTER TSE
0x0427: 0x00fc, # CYRILLIC CAPITAL LETTER CHE
0x0428: 0x00f6, # CYRILLIC CAPITAL LETTER SHA
0x0429: 0x00fa, # CYRILLIC CAPITAL LETTER SHCHA
0x042a: 0x009f, # CYRILLIC CAPITAL LETTER HARD SIGN
0x042b: 0x00f2, # CYRILLIC CAPITAL LETTER YERU
0x042c: 0x00ee, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x042d: 0x00f8, # CYRILLIC CAPITAL LETTER E
0x042e: 0x009d, # CYRILLIC CAPITAL LETTER YU
0x042f: 0x00e0, # CYRILLIC CAPITAL LETTER YA
0x0430: 0x00a0, # CYRILLIC SMALL LETTER A
0x0431: 0x00a2, # CYRILLIC SMALL LETTER BE
0x0432: 0x00eb, # CYRILLIC SMALL LETTER VE
0x0433: 0x00ac, # CYRILLIC SMALL LETTER GHE
0x0434: 0x00a6, # CYRILLIC SMALL LETTER DE
0x0435: 0x00a8, # CYRILLIC SMALL LETTER IE
0x0436: 0x00e9, # CYRILLIC SMALL LETTER ZHE
0x0437: 0x00f3, # CYRILLIC SMALL LETTER ZE
0x0438: 0x00b7, # CYRILLIC SMALL LETTER I
0x0439: 0x00bd, # CYRILLIC SMALL LETTER SHORT I
0x043a: 0x00c6, # CYRILLIC SMALL LETTER KA
0x043b: 0x00d0, # CYRILLIC SMALL LETTER EL
0x043c: 0x00d2, # CYRILLIC SMALL LETTER EM
0x043d: 0x00d4, # CYRILLIC SMALL LETTER EN
0x043e: 0x00d6, # CYRILLIC SMALL LETTER O
0x043f: 0x00d8, # CYRILLIC SMALL LETTER PE
0x0440: 0x00e1, # CYRILLIC SMALL LETTER ER
0x0441: 0x00e3, # CYRILLIC SMALL LETTER ES
0x0442: 0x00e5, # CYRILLIC SMALL LETTER TE
0x0443: 0x00e7, # CYRILLIC SMALL LETTER U
0x0444: 0x00aa, # CYRILLIC SMALL LETTER EF
0x0445: 0x00b5, # CYRILLIC SMALL LETTER HA
0x0446: 0x00a4, # CYRILLIC SMALL LETTER TSE
0x0447: 0x00fb, # CYRILLIC SMALL LETTER CHE
0x0448: 0x00f5, # CYRILLIC SMALL LETTER SHA
0x0449: 0x00f9, # CYRILLIC SMALL LETTER SHCHA
0x044a: 0x009e, # CYRILLIC SMALL LETTER HARD SIGN
0x044b: 0x00f1, # CYRILLIC SMALL LETTER YERU
0x044c: 0x00ed, # CYRILLIC SMALL LETTER SOFT SIGN
0x044d: 0x00f7, # CYRILLIC SMALL LETTER E
0x044e: 0x009c, # CYRILLIC SMALL LETTER YU
0x044f: 0x00de, # CYRILLIC SMALL LETTER YA
0x0451: 0x0084, # CYRILLIC SMALL LETTER IO
0x0452: 0x0080, # CYRILLIC SMALL LETTER DJE
0x0453: 0x0082, # CYRILLIC SMALL LETTER GJE
0x0454: 0x0086, # CYRILLIC SMALL LETTER UKRAINIAN IE
0x0455: 0x0088, # CYRILLIC SMALL LETTER DZE
0x0456: 0x008a, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x0457: 0x008c, # CYRILLIC SMALL LETTER YI
0x0458: 0x008e, # CYRILLIC SMALL LETTER JE
0x0459: 0x0090, # CYRILLIC SMALL LETTER LJE
0x045a: 0x0092, # CYRILLIC SMALL LETTER NJE
0x045b: 0x0094, # CYRILLIC SMALL LETTER TSHE
0x045c: 0x0096, # CYRILLIC SMALL LETTER KJE
0x045e: 0x0098, # CYRILLIC SMALL LETTER SHORT U
0x045f: 0x009a, # CYRILLIC SMALL LETTER DZHE
0x2116: 0x00ef, # NUMERO SIGN
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| mit |
fishky/django-rest-framework | rest_framework/routers.py | 52 | 11664 | """
Routers provide a convenient and consistent way of automatically
determining the URL conf for your API.
They are used by simply instantiating a Router class, and then registering
all the required ViewSets with that router.
For example, you might have a `urls.py` that looks something like this:
router = routers.DefaultRouter()
router.register('users', UserViewSet, 'user')
router.register('accounts', AccountViewSet, 'account')
urlpatterns = router.urls
"""
from __future__ import unicode_literals
import itertools
from collections import namedtuple
from django.conf.urls import url
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import NoReverseMatch
from rest_framework import views
from rest_framework.compat import OrderedDict
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.urlpatterns import format_suffix_patterns
Route = namedtuple('Route', ['url', 'mapping', 'name', 'initkwargs'])
DynamicDetailRoute = namedtuple('DynamicDetailRoute', ['url', 'name', 'initkwargs'])
DynamicListRoute = namedtuple('DynamicListRoute', ['url', 'name', 'initkwargs'])
def replace_methodname(format_string, methodname):
"""
Partially format a format_string, swapping out any
'{methodname}' or '{methodnamehyphen}' components.
"""
methodnamehyphen = methodname.replace('_', '-')
ret = format_string
ret = ret.replace('{methodname}', methodname)
ret = ret.replace('{methodnamehyphen}', methodnamehyphen)
return ret
def flatten(list_of_lists):
"""
Takes an iterable of iterables, returns a single iterable containing all items
"""
return itertools.chain(*list_of_lists)
class BaseRouter(object):
def __init__(self):
self.registry = []
def register(self, prefix, viewset, base_name=None):
if base_name is None:
base_name = self.get_default_base_name(viewset)
self.registry.append((prefix, viewset, base_name))
def get_default_base_name(self, viewset):
"""
If `base_name` is not specified, attempt to automatically determine
it from the viewset.
"""
raise NotImplementedError('get_default_base_name must be overridden')
def get_urls(self):
"""
Return a list of URL patterns, given the registered viewsets.
"""
raise NotImplementedError('get_urls must be overridden')
@property
def urls(self):
if not hasattr(self, '_urls'):
self._urls = self.get_urls()
return self._urls
class SimpleRouter(BaseRouter):
routes = [
# List route.
Route(
url=r'^{prefix}{trailing_slash}$',
mapping={
'get': 'list',
'post': 'create'
},
name='{basename}-list',
initkwargs={'suffix': 'List'}
),
# Dynamically generated list routes.
# Generated using @list_route decorator
# on methods of the viewset.
DynamicListRoute(
url=r'^{prefix}/{methodname}{trailing_slash}$',
name='{basename}-{methodnamehyphen}',
initkwargs={}
),
# Detail route.
Route(
url=r'^{prefix}/{lookup}{trailing_slash}$',
mapping={
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
},
name='{basename}-detail',
initkwargs={'suffix': 'Instance'}
),
# Dynamically generated detail routes.
# Generated using @detail_route decorator on methods of the viewset.
DynamicDetailRoute(
url=r'^{prefix}/{lookup}/{methodname}{trailing_slash}$',
name='{basename}-{methodnamehyphen}',
initkwargs={}
),
]
def __init__(self, trailing_slash=True):
self.trailing_slash = trailing_slash and '/' or ''
super(SimpleRouter, self).__init__()
def get_default_base_name(self, viewset):
"""
If `base_name` is not specified, attempt to automatically determine
it from the viewset.
"""
queryset = getattr(viewset, 'queryset', None)
assert queryset is not None, '`base_name` argument not specified, and could ' \
'not automatically determine the name from the viewset, as ' \
'it does not have a `.queryset` attribute.'
return queryset.model._meta.object_name.lower()
def get_routes(self, viewset):
"""
Augment `self.routes` with any dynamically generated routes.
Returns a list of the Route namedtuple.
"""
known_actions = flatten([route.mapping.values() for route in self.routes if isinstance(route, Route)])
# Determine any `@detail_route` or `@list_route` decorated methods on the viewset
detail_routes = []
list_routes = []
for methodname in dir(viewset):
attr = getattr(viewset, methodname)
httpmethods = getattr(attr, 'bind_to_methods', None)
detail = getattr(attr, 'detail', True)
if httpmethods:
if methodname in known_actions:
raise ImproperlyConfigured('Cannot use @detail_route or @list_route '
'decorators on method "%s" '
'as it is an existing route' % methodname)
httpmethods = [method.lower() for method in httpmethods]
if detail:
detail_routes.append((httpmethods, methodname))
else:
list_routes.append((httpmethods, methodname))
def _get_dynamic_routes(route, dynamic_routes):
ret = []
for httpmethods, methodname in dynamic_routes:
method_kwargs = getattr(viewset, methodname).kwargs
initkwargs = route.initkwargs.copy()
initkwargs.update(method_kwargs)
url_path = initkwargs.pop("url_path", None) or methodname
ret.append(Route(
url=replace_methodname(route.url, url_path),
mapping=dict((httpmethod, methodname) for httpmethod in httpmethods),
name=replace_methodname(route.name, url_path),
initkwargs=initkwargs,
))
return ret
ret = []
for route in self.routes:
if isinstance(route, DynamicDetailRoute):
# Dynamic detail routes (@detail_route decorator)
ret += _get_dynamic_routes(route, detail_routes)
elif isinstance(route, DynamicListRoute):
# Dynamic list routes (@list_route decorator)
ret += _get_dynamic_routes(route, list_routes)
else:
# Standard route
ret.append(route)
return ret
def get_method_map(self, viewset, method_map):
"""
Given a viewset, and a mapping of http methods to actions,
return a new mapping which only includes any mappings that
are actually implemented by the viewset.
"""
bound_methods = {}
for method, action in method_map.items():
if hasattr(viewset, action):
bound_methods[method] = action
return bound_methods
def get_lookup_regex(self, viewset, lookup_prefix=''):
"""
Given a viewset, return the portion of URL regex that is used
to match against a single instance.
Note that lookup_prefix is not used directly inside REST rest_framework
itself, but is required in order to nicely support nested router
implementations, such as drf-nested-routers.
https://github.com/alanjds/drf-nested-routers
"""
base_regex = '(?P<{lookup_prefix}{lookup_url_kwarg}>{lookup_value})'
# Use `pk` as default field, unset set. Default regex should not
# consume `.json` style suffixes and should break at '/' boundaries.
lookup_field = getattr(viewset, 'lookup_field', 'pk')
lookup_url_kwarg = getattr(viewset, 'lookup_url_kwarg', None) or lookup_field
lookup_value = getattr(viewset, 'lookup_value_regex', '[^/.]+')
return base_regex.format(
lookup_prefix=lookup_prefix,
lookup_url_kwarg=lookup_url_kwarg,
lookup_value=lookup_value
)
def get_urls(self):
"""
Use the registered viewsets to generate a list of URL patterns.
"""
ret = []
for prefix, viewset, basename in self.registry:
lookup = self.get_lookup_regex(viewset)
routes = self.get_routes(viewset)
for route in routes:
# Only actions which actually exist on the viewset will be bound
mapping = self.get_method_map(viewset, route.mapping)
if not mapping:
continue
# Build the url pattern
regex = route.url.format(
prefix=prefix,
lookup=lookup,
trailing_slash=self.trailing_slash
)
view = viewset.as_view(mapping, **route.initkwargs)
name = route.name.format(basename=basename)
ret.append(url(regex, view, name=name))
return ret
class DefaultRouter(SimpleRouter):
"""
The default router extends the SimpleRouter, but also adds in a default
API root view, and adds format suffix patterns to the URLs.
"""
include_root_view = True
include_format_suffixes = True
root_view_name = 'api-root'
def get_api_root_view(self):
"""
Return a view to use as the API root.
"""
api_root_dict = OrderedDict()
list_name = self.routes[0].name
for prefix, viewset, basename in self.registry:
api_root_dict[prefix] = list_name.format(basename=basename)
class APIRoot(views.APIView):
_ignore_model_permissions = True
def get(self, request, *args, **kwargs):
ret = OrderedDict()
namespace = request.resolver_match.namespace
for key, url_name in api_root_dict.items():
if namespace:
url_name = namespace + ':' + url_name
try:
ret[key] = reverse(
url_name,
args=args,
kwargs=kwargs,
request=request,
format=kwargs.get('format', None)
)
except NoReverseMatch:
# Don't bail out if eg. no list routes exist, only detail routes.
continue
return Response(ret)
return APIRoot.as_view()
def get_urls(self):
"""
Generate the list of URL patterns, including a default root view
for the API, and appending `.json` style format suffixes.
"""
urls = []
if self.include_root_view:
root_url = url(r'^$', self.get_api_root_view(), name=self.root_view_name)
urls.append(root_url)
default_urls = super(DefaultRouter, self).get_urls()
urls.extend(default_urls)
if self.include_format_suffixes:
urls = format_suffix_patterns(urls)
return urls
| bsd-2-clause |
hplustree/trove | trove/tests/scenario/runners/instance_error_create_runners.py | 3 | 4773 | # Copyright 2016 Tesora Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import SkipTest
from trove.tests.scenario.runners.test_runners import CheckInstance
from trove.tests.scenario.runners.test_runners import TestRunner
class InstanceErrorCreateRunner(TestRunner):
def __init__(self):
super(InstanceErrorCreateRunner, self).__init__(sleep_time=1)
self.error_inst_id = None
self.error2_inst_id = None
def run_create_error_instance(self, expected_http_code=200):
if self.is_using_existing_instance:
raise SkipTest("Using an existing instance.")
name = self.instance_info.name + '_error'
flavor = self.get_instance_flavor(fault_num=1)
client = self.auth_client
inst = client.instances.create(
name,
self.get_flavor_href(flavor),
self.instance_info.volume,
nics=self.instance_info.nics,
datastore=self.instance_info.dbaas_datastore,
datastore_version=self.instance_info.dbaas_datastore_version)
self.assert_client_code(client, expected_http_code)
self.error_inst_id = inst.id
def run_create_error2_instance(self, expected_http_code=200):
if self.is_using_existing_instance:
raise SkipTest("Using an existing instance.")
name = self.instance_info.name + '_error2'
flavor = self.get_instance_flavor(fault_num=2)
client = self.auth_client
inst = client.instances.create(
name,
self.get_flavor_href(flavor),
self.instance_info.volume,
nics=self.instance_info.nics,
datastore=self.instance_info.dbaas_datastore,
datastore_version=self.instance_info.dbaas_datastore_version)
self.assert_client_code(client, expected_http_code)
self.error2_inst_id = inst.id
def run_wait_for_error_instances(self, expected_states=['ERROR']):
error_ids = []
if self.error_inst_id:
error_ids.append(self.error_inst_id)
if self.error2_inst_id:
error_ids.append(self.error2_inst_id)
if error_ids:
self.assert_all_instance_states(
error_ids, expected_states, fast_fail_status=[])
def run_validate_error_instance(self):
if not self.error_inst_id:
raise SkipTest("No error instance created.")
instance = self.get_instance(
self.error_inst_id, self.auth_client)
with CheckInstance(instance._info) as check:
check.fault()
err_msg = "disk is too small for requested image"
self.assert_true(err_msg in instance.fault['message'],
"Message '%s' does not contain '%s'" %
(instance.fault['message'], err_msg))
def run_validate_error2_instance(self):
if not self.error2_inst_id:
raise SkipTest("No error2 instance created.")
instance = self.get_instance(
self.error2_inst_id, client=self.admin_client)
with CheckInstance(instance._info) as check:
check.fault(is_admin=True)
err_msg = "Quota exceeded for ram"
self.assert_true(err_msg in instance.fault['message'],
"Message '%s' does not contain '%s'" %
(instance.fault['message'], err_msg))
def run_delete_error_instances(self, expected_http_code=202):
client = self.auth_client
if self.error_inst_id:
client.instances.delete(self.error_inst_id)
self.assert_client_code(client, expected_http_code)
if self.error2_inst_id:
client.instances.delete(self.error2_inst_id)
self.assert_client_code(client, expected_http_code)
def run_wait_for_error_delete(self, expected_states=['SHUTDOWN']):
delete_ids = []
if self.error_inst_id:
delete_ids.append(self.error_inst_id)
if self.error2_inst_id:
delete_ids.append(self.error2_inst_id)
if delete_ids:
self.assert_all_gone(delete_ids, expected_states[-1])
else:
raise SkipTest("Cleanup is not required.")
| apache-2.0 |
xujb/odoo | addons/purchase/purchase.py | 10 | 89960 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import pytz
from openerp import SUPERUSER_ID, workflow
from datetime import datetime
from dateutil.relativedelta import relativedelta
from operator import attrgetter
from openerp.tools.safe_eval import safe_eval as eval
from openerp.osv import fields, osv
from openerp.tools.translate import _
import openerp.addons.decimal_precision as dp
from openerp.osv.orm import browse_record_list, browse_record, browse_null
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT, DATETIME_FORMATS_MAP
from openerp.tools.float_utils import float_compare
class purchase_order(osv.osv):
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
for c in self.pool.get('account.tax').compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, order.partner_id)['taxes']:
val += c.get('amount', 0.0)
res[order.id]['amount_tax']=cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed']=cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total']=res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _set_minimum_planned_date(self, cr, uid, ids, name, value, arg, context=None):
if not value: return False
if type(ids)!=type([]):
ids=[ids]
pol_obj = self.pool.get('purchase.order.line')
for po in self.browse(cr, uid, ids, context=context):
if po.order_line:
pol_ids = pol_obj.search(cr, uid, [
('order_id', '=', po.id), '|', ('date_planned', '=', po.minimum_planned_date), ('date_planned', '<', value)
], context=context)
pol_obj.write(cr, uid, pol_ids, {'date_planned': value}, context=context)
self.invalidate_cache(cr, uid, context=context)
return True
def _minimum_planned_date(self, cr, uid, ids, field_name, arg, context=None):
res={}
purchase_obj=self.browse(cr, uid, ids, context=context)
for purchase in purchase_obj:
res[purchase.id] = False
if purchase.order_line:
min_date=purchase.order_line[0].date_planned
for line in purchase.order_line:
if line.state == 'cancel':
continue
if line.date_planned < min_date:
min_date=line.date_planned
res[purchase.id]=min_date
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
tot = 0.0
for invoice in purchase.invoice_ids:
if invoice.state not in ('draft','cancel'):
tot += invoice.amount_untaxed
if purchase.amount_untaxed:
res[purchase.id] = tot * 100.0 / purchase.amount_untaxed
else:
res[purchase.id] = 0.0
return res
def _shipped_rate(self, cr, uid, ids, name, arg, context=None):
if not ids: return {}
res = {}
for id in ids:
res[id] = [0.0,0.0]
cr.execute('''SELECT
p.order_id, sum(m.product_qty), m.state
FROM
stock_move m
LEFT JOIN
purchase_order_line p on (p.id=m.purchase_line_id)
WHERE
p.order_id IN %s GROUP BY m.state, p.order_id''',(tuple(ids),))
for oid,nbr,state in cr.fetchall():
if state=='cancel':
continue
if state=='done':
res[oid][0] += nbr or 0.0
res[oid][1] += nbr or 0.0
else:
res[oid][1] += nbr or 0.0
for r in res:
if not res[r][1]:
res[r] = 0.0
else:
res[r] = 100.0 * res[r][0] / res[r][1]
return res
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('purchase.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _get_purchase_order(self, cr, uid, ids, context=None):
result = {}
for order in self.browse(cr, uid, ids, context=context):
result[order.id] = True
return result.keys()
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for purchase in self.browse(cursor, user, ids, context=context):
res[purchase.id] = all(line.invoiced for line in purchase.order_line if line.state != 'cancel')
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
journal_obj = self.pool.get('account.journal')
res = journal_obj.search(cr, uid, [('type', '=', 'purchase'),
('company_id', '=', company_id)],
limit=1)
return res and res[0] or False
def _get_picking_in(self, cr, uid, context=None):
obj_data = self.pool.get('ir.model.data')
type_obj = self.pool.get('stock.picking.type')
user_obj = self.pool.get('res.users')
company_id = user_obj.browse(cr, uid, uid, context=context).company_id.id
types = type_obj.search(cr, uid, [('code', '=', 'incoming'), ('warehouse_id.company_id', '=', company_id)], context=context)
if not types:
types = type_obj.search(cr, uid, [('code', '=', 'incoming'), ('warehouse_id', '=', False)], context=context)
if not types:
raise osv.except_osv(_('Error!'), _("Make sure you have at least an incoming picking type defined"))
return types[0]
def _get_picking_ids(self, cr, uid, ids, field_names, args, context=None):
res = {}
for po_id in ids:
res[po_id] = []
query = """
SELECT picking_id, po.id FROM stock_picking p, stock_move m, purchase_order_line pol, purchase_order po
WHERE po.id in %s and po.id = pol.order_id and pol.id = m.purchase_line_id and m.picking_id = p.id
GROUP BY picking_id, po.id
"""
cr.execute(query, (tuple(ids), ))
picks = cr.fetchall()
for pick_id, po_id in picks:
res[po_id].append(pick_id)
return res
def _count_all(self, cr, uid, ids, field_name, arg, context=None):
return {
purchase.id: {
'shipment_count': len(purchase.picking_ids),
'invoice_count': len(purchase.invoice_ids),
}
for purchase in self.browse(cr, uid, ids, context=context)
}
STATE_SELECTION = [
('draft', 'Draft PO'),
('sent', 'RFQ'),
('bid', 'Bid Received'),
('confirmed', 'Waiting Approval'),
('approved', 'Purchase Confirmed'),
('except_picking', 'Shipping Exception'),
('except_invoice', 'Invoice Exception'),
('done', 'Done'),
('cancel', 'Cancelled')
]
READONLY_STATES = {
'confirmed': [('readonly', True)],
'approved': [('readonly', True)],
'done': [('readonly', True)]
}
_track = {
'state': {
'purchase.mt_rfq_confirmed': lambda self, cr, uid, obj, ctx=None: obj.state == 'confirmed',
'purchase.mt_rfq_approved': lambda self, cr, uid, obj, ctx=None: obj.state == 'approved',
'purchase.mt_rfq_done': lambda self, cr, uid, obj, ctx=None: obj.state == 'done',
},
}
_columns = {
'name': fields.char('Order Reference', required=True, select=True, copy=False,
help="Unique number of the purchase order, "
"computed automatically when the purchase order is created."),
'origin': fields.char('Source Document', copy=False,
help="Reference of the document that generated this purchase order "
"request; a sales order or an internal procurement request."),
'partner_ref': fields.char('Supplier Reference', states={'confirmed':[('readonly',True)],
'approved':[('readonly',True)],
'done':[('readonly',True)]},
copy=False,
help="Reference of the sales order or bid sent by your supplier. "
"It's mainly used to do the matching when you receive the "
"products as this reference is usually written on the "
"delivery order sent by your supplier."),
'date_order':fields.datetime('Order Date', required=True, states={'confirmed':[('readonly',True)],
'approved':[('readonly',True)]},
select=True, help="Depicts the date where the Quotation should be validated and converted into a Purchase Order, by default it's the creation date.",
copy=False),
'date_approve':fields.date('Date Approved', readonly=1, select=True, copy=False,
help="Date on which purchase order has been approved"),
'partner_id':fields.many2one('res.partner', 'Supplier', required=True, states=READONLY_STATES,
change_default=True, track_visibility='always'),
'dest_address_id':fields.many2one('res.partner', 'Customer Address (Direct Delivery)',
states=READONLY_STATES,
help="Put an address if you want to deliver directly from the supplier to the customer. " \
"Otherwise, keep empty to deliver to your own company."
),
'location_id': fields.many2one('stock.location', 'Destination', required=True, domain=[('usage','<>','view')], states=READONLY_STATES),
'pricelist_id':fields.many2one('product.pricelist', 'Pricelist', required=True, states=READONLY_STATES, help="The pricelist sets the currency used for this purchase order. It also computes the supplier price for the selected products/quantities."),
'currency_id': fields.many2one('res.currency','Currency', required=True, states=READONLY_STATES),
'state': fields.selection(STATE_SELECTION, 'Status', readonly=True,
help="The status of the purchase order or the quotation request. "
"A request for quotation is a purchase order in a 'Draft' status. "
"Then the order has to be confirmed by the user, the status switch "
"to 'Confirmed'. Then the supplier must confirm the order to change "
"the status to 'Approved'. When the purchase order is paid and "
"received, the status becomes 'Done'. If a cancel action occurs in "
"the invoice or in the receipt of goods, the status becomes "
"in exception.",
select=True, copy=False),
'order_line': fields.one2many('purchase.order.line', 'order_id', 'Order Lines',
states={'approved':[('readonly',True)],
'done':[('readonly',True)]},
copy=True),
'validator' : fields.many2one('res.users', 'Validated by', readonly=True, copy=False),
'notes': fields.text('Terms and Conditions'),
'invoice_ids': fields.many2many('account.invoice', 'purchase_invoice_rel', 'purchase_id',
'invoice_id', 'Invoices', copy=False,
help="Invoices generated for a purchase order"),
'picking_ids': fields.function(_get_picking_ids, method=True, type='one2many', relation='stock.picking', string='Picking List', help="This is the list of receipts that have been generated for this purchase order."),
'shipped':fields.boolean('Received', readonly=True, select=True, copy=False,
help="It indicates that a picking has been done"),
'shipped_rate': fields.function(_shipped_rate, string='Received Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Invoice Received', type='boolean', copy=False,
help="It indicates that an invoice has been validated"),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced', type='float'),
'invoice_method': fields.selection([('manual','Based on Purchase Order lines'),('order','Based on generated draft invoice'),('picking','Based on incoming shipments')], 'Invoicing Control', required=True,
readonly=True, states={'draft':[('readonly',False)], 'sent':[('readonly',False)]},
help="Based on Purchase Order lines: place individual lines in 'Invoice Control / On Purchase Order lines' from where you can selectively create an invoice.\n" \
"Based on generated invoice: create a draft invoice you can validate later.\n" \
"Based on incoming shipments: let you create an invoice when receipts are validated."
),
'minimum_planned_date':fields.function(_minimum_planned_date, fnct_inv=_set_minimum_planned_date, string='Expected Date', type='date', select=True, help="This is computed as the minimum scheduled date of all purchase order lines' products.",
store = {
'purchase.order.line': (_get_order, ['date_planned'], 10),
'purchase.order': (_get_purchase_order, ['order_line'], 10),
}
),
'amount_untaxed': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Untaxed Amount',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The amount without tax", track_visibility='always'),
'amount_tax': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Taxes',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The tax amount"),
'amount_total': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Total',
store={
'purchase.order.line': (_get_order, None, 10),
}, multi="sums", help="The total amount"),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position'),
'payment_term_id': fields.many2one('account.payment.term', 'Payment Term'),
'incoterm_id': fields.many2one('stock.incoterms', 'Incoterm', help="International Commercial Terms are a series of predefined commercial terms used in international transactions."),
'product_id': fields.related('order_line', 'product_id', type='many2one', relation='product.product', string='Product'),
'create_uid': fields.many2one('res.users', 'Responsible'),
'company_id': fields.many2one('res.company', 'Company', required=True, select=1, states={'confirmed': [('readonly', True)], 'approved': [('readonly', True)]}),
'journal_id': fields.many2one('account.journal', 'Journal'),
'bid_date': fields.date('Bid Received On', readonly=True, help="Date on which the bid was received"),
'bid_validity': fields.date('Bid Valid Until', help="Date on which the bid expired"),
'picking_type_id': fields.many2one('stock.picking.type', 'Deliver To', help="This will determine picking type of incoming shipment", required=True,
states={'confirmed': [('readonly', True)], 'approved': [('readonly', True)], 'done': [('readonly', True)]}),
'related_location_id': fields.related('picking_type_id', 'default_location_dest_id', type='many2one', relation='stock.location', string="Related location", store=True),
'related_usage': fields.related('location_id', 'usage', type='char'),
'shipment_count': fields.function(_count_all, type='integer', string='Incoming Shipments', multi=True),
'invoice_count': fields.function(_count_all, type='integer', string='Invoices', multi=True)
}
_defaults = {
'date_order': fields.datetime.now,
'state': 'draft',
'name': lambda obj, cr, uid, context: '/',
'shipped': 0,
'invoice_method': 'order',
'invoiced': 0,
'pricelist_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').browse(cr, uid, context['partner_id']).property_product_pricelist_purchase.id,
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'purchase.order', context=c),
'journal_id': _get_journal,
'currency_id': lambda self, cr, uid, context: self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.currency_id.id,
'picking_type_id': _get_picking_in,
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_name = "purchase.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Purchase Order"
_order = 'date_order desc, id desc'
def create(self, cr, uid, vals, context=None):
if vals.get('name','/')=='/':
vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'purchase.order') or '/'
context = dict(context or {}, mail_create_nolog=True)
order = super(purchase_order, self).create(cr, uid, vals, context=context)
self.message_post(cr, uid, [order], body=_("RFQ created"), context=context)
return order
def unlink(self, cr, uid, ids, context=None):
purchase_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in purchase_orders:
if s['state'] in ['draft','cancel']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'), _('In order to delete a purchase order, you must cancel it first.'))
# automatically sending subflow.delete upon deletion
self.signal_workflow(cr, uid, unlink_ids, 'purchase_cancel')
return super(purchase_order, self).unlink(cr, uid, unlink_ids, context=context)
def set_order_line_status(self, cr, uid, ids, status, context=None):
line = self.pool.get('purchase.order.line')
order_line_ids = []
proc_obj = self.pool.get('procurement.order')
for order in self.browse(cr, uid, ids, context=context):
if status in ('draft', 'cancel'):
order_line_ids += [po_line.id for po_line in order.order_line]
else: # Do not change the status of already cancelled lines
order_line_ids += [po_line.id for po_line in order.order_line if po_line.state != 'cancel']
if order_line_ids:
line.write(cr, uid, order_line_ids, {'state': status}, context=context)
if order_line_ids and status == 'cancel':
procs = proc_obj.search(cr, uid, [('purchase_line_id', 'in', order_line_ids)], context=context)
if procs:
proc_obj.write(cr, uid, procs, {'state': 'exception'}, context=context)
return True
def button_dummy(self, cr, uid, ids, context=None):
return True
def onchange_pricelist(self, cr, uid, ids, pricelist_id, context=None):
if not pricelist_id:
return {}
return {'value': {'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id}}
#Destination address is used when dropshipping
def onchange_dest_address_id(self, cr, uid, ids, address_id, context=None):
if not address_id:
return {}
address = self.pool.get('res.partner')
values = {}
supplier = address.browse(cr, uid, address_id, context=context)
if supplier:
location_id = supplier.property_stock_customer.id
values.update({'location_id': location_id})
return {'value':values}
def onchange_picking_type_id(self, cr, uid, ids, picking_type_id, context=None):
value = {}
if picking_type_id:
picktype = self.pool.get("stock.picking.type").browse(cr, uid, picking_type_id, context=context)
if picktype.default_location_dest_id:
value.update({'location_id': picktype.default_location_dest_id.id, 'related_usage': picktype.default_location_dest_id.usage})
value.update({'related_location_id': picktype.default_location_dest_id.id})
return {'value': value}
def onchange_partner_id(self, cr, uid, ids, partner_id, context=None):
partner = self.pool.get('res.partner')
if not partner_id:
return {'value': {
'fiscal_position': False,
'payment_term_id': False,
}}
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
if not company_id:
raise osv.except_osv(_('Error!'), _('There is no default company for the current user!'))
fp = self.pool['account.fiscal.position'].get_fiscal_position(cr, uid, company_id, partner_id, context=context)
supplier_address = partner.address_get(cr, uid, [partner_id], ['default'], context=context)
supplier = partner.browse(cr, uid, partner_id, context=context)
return {'value': {
'pricelist_id': supplier.property_product_pricelist_purchase.id,
'fiscal_position': fp or supplier.property_account_position and supplier.property_account_position.id,
'payment_term_id': supplier.property_supplier_payment_term.id or False,
}}
def invoice_open(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree2')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
if not inv_ids:
raise osv.except_osv(_('Error!'), _('Please create Invoices.'))
#choose the view_mode accordingly
if len(inv_ids)>1:
result['domain'] = "[('id','in',["+','.join(map(str, inv_ids))+"])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
context = dict(context or {})
mod_obj = self.pool.get('ir.model.data')
wizard_obj = self.pool.get('purchase.order.line_invoice')
#compute the number of invoices to display
inv_ids = []
for po in self.browse(cr, uid, ids, context=context):
if po.invoice_method == 'manual':
if not po.invoice_ids:
context.update({'active_ids' : [line.id for line in po.order_line if line.state != 'cancel']})
wizard_obj.makeInvoices(cr, uid, [], context=context)
for po in self.browse(cr, uid, ids, context=context):
inv_ids+= [invoice.id for invoice in po.invoice_ids]
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_supplier_form')
res_id = res and res[1] or False
return {
'name': _('Supplier Invoices'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'in_invoice', 'journal_type': 'purchase'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': inv_ids and inv_ids[0] or False,
}
def view_picking(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing picking orders of given purchase order ids.
'''
if context is None:
context = {}
mod_obj = self.pool.get('ir.model.data')
dummy, action_id = tuple(mod_obj.get_object_reference(cr, uid, 'stock', 'action_picking_tree'))
action = self.pool.get('ir.actions.act_window').read(cr, uid, action_id, context=context)
pick_ids = []
for po in self.browse(cr, uid, ids, context=context):
pick_ids += [picking.id for picking in po.picking_ids]
#override the context to get rid of the default filtering on picking type
action['context'] = {}
#choose the view_mode accordingly
if len(pick_ids) > 1:
action['domain'] = "[('id','in',[" + ','.join(map(str, pick_ids)) + "])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'stock', 'view_picking_form')
action['views'] = [(res and res[1] or False, 'form')]
action['res_id'] = pick_ids and pick_ids[0] or False
return action
def wkf_approve_order(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved', 'date_approve': fields.date.context_today(self,cr,uid,context=context)})
return True
def wkf_bid_received(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state':'bid', 'bid_date': fields.date.context_today(self,cr,uid,context=context)})
def wkf_send_rfq(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi purchase template message loaded by default
'''
if not context:
context= {}
ir_model_data = self.pool.get('ir.model.data')
try:
if context.get('send_rfq', False):
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase')[1]
else:
template_id = ir_model_data.get_object_reference(cr, uid, 'purchase', 'email_template_edi_purchase_done')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'purchase.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
})
return {
'name': _('Compose Email'),
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the request for quotation and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
self.signal_workflow(cr, uid, ids, 'send_rfq')
return self.pool['report'].get_action(cr, uid, ids, 'purchase.report_purchasequotation', context=context)
def wkf_confirm_order(self, cr, uid, ids, context=None):
todo = []
for po in self.browse(cr, uid, ids, context=context):
if not any(line.state != 'cancel' for line in po.order_line):
raise osv.except_osv(_('Error!'),_('You cannot confirm a purchase order without any purchase order line.'))
if po.invoice_method == 'picking' and not any([l.product_id and l.product_id.type in ('product', 'consu') and l.state != 'cancel' for l in po.order_line]):
raise osv.except_osv(
_('Error!'),
_("You cannot confirm a purchase order with Invoice Control Method 'Based on incoming shipments' that doesn't contain any stockable item."))
for line in po.order_line:
if line.state=='draft':
todo.append(line.id)
self.pool.get('purchase.order.line').action_confirm(cr, uid, todo, context)
for id in ids:
self.write(cr, uid, [id], {'state' : 'confirmed', 'validator' : uid})
return True
def _choose_account_from_po_line(self, cr, uid, po_line, context=None):
fiscal_obj = self.pool.get('account.fiscal.position')
property_obj = self.pool.get('ir.property')
if po_line.product_id:
acc_id = po_line.product_id.property_account_expense.id
if not acc_id:
acc_id = po_line.product_id.categ_id.property_account_expense_categ.id
if not acc_id:
raise osv.except_osv(_('Error!'), _('Define an expense account for this product: "%s" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))
else:
acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category', context=context).id
fpos = po_line.order_id.fiscal_position or False
return fiscal_obj.map_account(cr, uid, fpos, acc_id)
def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):
"""Collects require data from purchase order line that is used to create invoice line
for that purchase order line
:param account_id: Expense account of the product of PO line if any.
:param browse_record order_line: Purchase order line browse record
:return: Value for fields of invoice lines.
:rtype: dict
"""
return {
'name': order_line.name,
'account_id': account_id,
'price_unit': order_line.price_unit or 0.0,
'quantity': order_line.product_qty,
'product_id': order_line.product_id.id or False,
'uos_id': order_line.product_uom.id or False,
'invoice_line_tax_id': [(6, 0, [x.id for x in order_line.taxes_id])],
'account_analytic_id': order_line.account_analytic_id.id or False,
'purchase_line_id': order_line.id,
}
def _prepare_invoice(self, cr, uid, order, line_ids, context=None):
"""Prepare the dict of values to create the new invoice for a
purchase order. This method may be overridden to implement custom
invoice generation (making sure to call super() to establish
a clean extension chain).
:param browse_record order: purchase.order record to invoice
:param list(int) line_ids: list of invoice line IDs that must be
attached to the invoice
:return: dict of value to create() the invoice
"""
journal_ids = self.pool['account.journal'].search(
cr, uid, [('type', '=', 'purchase'),
('company_id', '=', order.company_id.id)],
limit=1)
if not journal_ids:
raise osv.except_osv(
_('Error!'),
_('Define purchase journal for this company: "%s" (id:%d).') % \
(order.company_id.name, order.company_id.id))
return {
'name': order.partner_ref or order.name,
'reference': order.partner_ref or order.name,
'account_id': order.partner_id.property_account_payable.id,
'type': 'in_invoice',
'partner_id': order.partner_id.id,
'currency_id': order.currency_id.id,
'journal_id': len(journal_ids) and journal_ids[0] or False,
'invoice_line': [(6, 0, line_ids)],
'origin': order.name,
'fiscal_position': order.fiscal_position.id or False,
'payment_term': order.payment_term_id.id or False,
'company_id': order.company_id.id,
}
def action_cancel_draft(self, cr, uid, ids, context=None):
if not len(ids):
return False
self.write(cr, uid, ids, {'state':'draft','shipped':0})
self.set_order_line_status(cr, uid, ids, 'draft', context=context)
for p_id in ids:
# Deleting the existing instance of workflow for PO
self.delete_workflow(cr, uid, [p_id]) # TODO is it necessary to interleave the calls?
self.create_workflow(cr, uid, [p_id])
return True
def wkf_po_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'done'}, context=context)
self.set_order_line_status(cr, uid, ids, 'done', context=context)
def action_invoice_create(self, cr, uid, ids, context=None):
"""Generates invoice for given ids of purchase orders and links that invoice ID to purchase order.
:param ids: list of ids of purchase orders.
:return: ID of created invoice.
:rtype: int
"""
context = dict(context or {})
inv_obj = self.pool.get('account.invoice')
inv_line_obj = self.pool.get('account.invoice.line')
res = False
uid_company_id = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.id
for order in self.browse(cr, uid, ids, context=context):
context.pop('force_company', None)
if order.company_id.id != uid_company_id:
#if the company of the document is different than the current user company, force the company in the context
#then re-do a browse to read the property fields for the good company.
context['force_company'] = order.company_id.id
order = self.browse(cr, uid, order.id, context=context)
# generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line
inv_lines = []
for po_line in order.order_line:
if po_line.state == 'cancel':
continue
acc_id = self._choose_account_from_po_line(cr, uid, po_line, context=context)
inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)
inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)
inv_lines.append(inv_line_id)
po_line.write({'invoice_lines': [(4, inv_line_id)]})
# get invoice data and create invoice
inv_data = self._prepare_invoice(cr, uid, order, inv_lines, context=context)
inv_id = inv_obj.create(cr, uid, inv_data, context=context)
# compute the invoice
inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)
# Link this new invoice to related purchase order
order.write({'invoice_ids': [(4, inv_id)]})
res = inv_id
return res
def invoice_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'approved'}, context=context)
return True
def has_stockable_product(self, cr, uid, ids, *args):
for order in self.browse(cr, uid, ids):
for order_line in order.order_line:
if order_line.state == 'cancel':
continue
if order_line.product_id and order_line.product_id.type in ('product', 'consu'):
return True
return False
def wkf_action_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
self.set_order_line_status(cr, uid, ids, 'cancel', context=context)
def action_cancel(self, cr, uid, ids, context=None):
for purchase in self.browse(cr, uid, ids, context=context):
for pick in purchase.picking_ids:
for move in pick.move_lines:
if pick.state == 'done':
raise osv.except_osv(
_('Unable to cancel the purchase order %s.') % (purchase.name),
_('You have already received some goods for it. '))
self.pool.get('stock.picking').action_cancel(cr, uid, [x.id for x in purchase.picking_ids if x.state != 'cancel'], context=context)
for inv in purchase.invoice_ids:
if inv and inv.state not in ('cancel', 'draft'):
raise osv.except_osv(
_('Unable to cancel this purchase order.'),
_('You must first cancel all invoices related to this purchase order.'))
self.pool.get('account.invoice') \
.signal_workflow(cr, uid, map(attrgetter('id'), purchase.invoice_ids), 'invoice_cancel')
self.signal_workflow(cr, uid, ids, 'purchase_cancel')
return True
def _prepare_order_line_move(self, cr, uid, order, order_line, picking_id, group_id, context=None):
''' prepare the stock move data from the PO line. This function returns a list of dictionary ready to be used in stock.move's create()'''
product_uom = self.pool.get('product.uom')
price_unit = order_line.price_unit
if order_line.product_uom.id != order_line.product_id.uom_id.id:
price_unit *= order_line.product_uom.factor / order_line.product_id.uom_id.factor
if order.currency_id.id != order.company_id.currency_id.id:
#we don't round the price_unit, as we may want to store the standard price with more digits than allowed by the currency
price_unit = self.pool.get('res.currency').compute(cr, uid, order.currency_id.id, order.company_id.currency_id.id, price_unit, round=False, context=context)
res = []
move_template = {
'name': order_line.name or '',
'product_id': order_line.product_id.id,
'product_uom': order_line.product_uom.id,
'product_uos': order_line.product_uom.id,
'date': order.date_order,
'date_expected': fields.date.date_to_datetime(self, cr, uid, order_line.date_planned, context),
'location_id': order.partner_id.property_stock_supplier.id,
'location_dest_id': order.location_id.id,
'picking_id': picking_id,
'partner_id': order.dest_address_id.id,
'move_dest_id': False,
'state': 'draft',
'purchase_line_id': order_line.id,
'company_id': order.company_id.id,
'price_unit': price_unit,
'picking_type_id': order.picking_type_id.id,
'group_id': group_id,
'procurement_id': False,
'origin': order.name,
'route_ids': order.picking_type_id.warehouse_id and [(6, 0, [x.id for x in order.picking_type_id.warehouse_id.route_ids])] or [],
'warehouse_id':order.picking_type_id.warehouse_id.id,
'invoice_state': order.invoice_method == 'picking' and '2binvoiced' or 'none',
}
diff_quantity = order_line.product_qty
for procurement in order_line.procurement_ids:
procurement_qty = product_uom._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, to_uom_id=order_line.product_uom.id)
tmp = move_template.copy()
tmp.update({
'product_uom_qty': min(procurement_qty, diff_quantity),
'product_uos_qty': min(procurement_qty, diff_quantity),
'move_dest_id': procurement.move_dest_id.id, #move destination is same as procurement destination
'group_id': procurement.group_id.id or group_id, #move group is same as group of procurements if it exists, otherwise take another group
'procurement_id': procurement.id,
'invoice_state': procurement.rule_id.invoice_state or (procurement.location_id and procurement.location_id.usage == 'customer' and procurement.invoice_state=='2binvoiced' and '2binvoiced') or (order.invoice_method == 'picking' and '2binvoiced') or 'none', #dropship case takes from sale
'propagate': procurement.rule_id.propagate,
})
diff_quantity -= min(procurement_qty, diff_quantity)
res.append(tmp)
#if the order line has a bigger quantity than the procurement it was for (manually changed or minimal quantity), then
#split the future stock move in two because the route followed may be different.
if float_compare(diff_quantity, 0.0, precision_rounding=order_line.product_uom.rounding) > 0:
move_template['product_uom_qty'] = diff_quantity
move_template['product_uos_qty'] = diff_quantity
res.append(move_template)
return res
def _create_stock_moves(self, cr, uid, order, order_lines, picking_id=False, context=None):
"""Creates appropriate stock moves for given order lines, whose can optionally create a
picking if none is given or no suitable is found, then confirms the moves, makes them
available, and confirms the pickings.
If ``picking_id`` is provided, the stock moves will be added to it, otherwise a standard
incoming picking will be created to wrap the stock moves (default behavior of the stock.move)
Modules that wish to customize the procurements or partition the stock moves over
multiple stock pickings may override this method and call ``super()`` with
different subsets of ``order_lines`` and/or preset ``picking_id`` values.
:param browse_record order: purchase order to which the order lines belong
:param list(browse_record) order_lines: purchase order line records for which picking
and moves should be created.
:param int picking_id: optional ID of a stock picking to which the created stock moves
will be added. A new picking will be created if omitted.
:return: None
"""
stock_move = self.pool.get('stock.move')
todo_moves = []
new_group = self.pool.get("procurement.group").create(cr, uid, {'name': order.name, 'partner_id': order.partner_id.id}, context=context)
for order_line in order_lines:
if order_line.state == 'cancel':
continue
if not order_line.product_id:
continue
if order_line.product_id.type in ('product', 'consu'):
for vals in self._prepare_order_line_move(cr, uid, order, order_line, picking_id, new_group, context=context):
move = stock_move.create(cr, uid, vals, context=context)
todo_moves.append(move)
todo_moves = stock_move.action_confirm(cr, uid, todo_moves)
stock_move.force_assign(cr, uid, todo_moves)
def test_moves_done(self, cr, uid, ids, context=None):
'''PO is done at the delivery side if all the incoming shipments are done'''
for purchase in self.browse(cr, uid, ids, context=context):
for picking in purchase.picking_ids:
if picking.state != 'done':
return False
return True
def test_moves_except(self, cr, uid, ids, context=None):
''' PO is in exception at the delivery side if one of the picking is canceled
and the other pickings are completed (done or canceled)
'''
at_least_one_canceled = False
alldoneorcancel = True
for purchase in self.browse(cr, uid, ids, context=context):
for picking in purchase.picking_ids:
if picking.state == 'cancel':
at_least_one_canceled = True
if picking.state not in ['done', 'cancel']:
alldoneorcancel = False
return at_least_one_canceled and alldoneorcancel
def move_lines_get(self, cr, uid, ids, *args):
res = []
for order in self.browse(cr, uid, ids, context={}):
for line in order.order_line:
res += [x.id for x in line.move_ids]
return res
def action_picking_create(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids):
picking_vals = {
'picking_type_id': order.picking_type_id.id,
'partner_id': order.partner_id.id,
'date': order.date_order,
'origin': order.name
}
picking_id = self.pool.get('stock.picking').create(cr, uid, picking_vals, context=context)
self._create_stock_moves(cr, uid, order, order.order_line, picking_id, context=context)
return picking_id
def picking_done(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'shipped':1,'state':'approved'}, context=context)
# Do check on related procurements:
proc_obj = self.pool.get("procurement.order")
po_lines = []
for po in self.browse(cr, uid, ids, context=context):
po_lines += [x.id for x in po.order_line if x.state != 'cancel']
if po_lines:
procs = proc_obj.search(cr, uid, [('purchase_line_id', 'in', po_lines)], context=context)
if procs:
proc_obj.check(cr, uid, procs, context=context)
self.message_post(cr, uid, ids, body=_("Products received"), context=context)
return True
def do_merge(self, cr, uid, ids, context=None):
"""
To merge similar type of purchase orders.
Orders will only be merged if:
* Purchase Orders are in draft
* Purchase Orders belong to the same partner
* Purchase Orders are have same stock location, same pricelist, same currency
Lines will only be merged if:
* Order lines are exactly the same except for the quantity and unit
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param ids: the ID or list of IDs
@param context: A standard dictionary
@return: new purchase order id
"""
#TOFIX: merged order line should be unlink
def make_key(br, fields):
list_key = []
for field in fields:
field_val = getattr(br, field)
if field in ('product_id', 'account_analytic_id'):
if not field_val:
field_val = False
if isinstance(field_val, browse_record):
field_val = field_val.id
elif isinstance(field_val, browse_null):
field_val = False
elif isinstance(field_val, browse_record_list):
field_val = ((6, 0, tuple([v.id for v in field_val])),)
list_key.append((field, field_val))
list_key.sort()
return tuple(list_key)
context = dict(context or {})
# Compute what the new orders should contain
new_orders = {}
order_lines_to_move = {}
for porder in [order for order in self.browse(cr, uid, ids, context=context) if order.state == 'draft']:
order_key = make_key(porder, ('partner_id', 'location_id', 'pricelist_id', 'currency_id'))
new_order = new_orders.setdefault(order_key, ({}, []))
new_order[1].append(porder.id)
order_infos = new_order[0]
order_lines_to_move.setdefault(order_key, [])
if not order_infos:
order_infos.update({
'origin': porder.origin,
'date_order': porder.date_order,
'partner_id': porder.partner_id.id,
'dest_address_id': porder.dest_address_id.id,
'picking_type_id': porder.picking_type_id.id,
'location_id': porder.location_id.id,
'pricelist_id': porder.pricelist_id.id,
'currency_id': porder.currency_id.id,
'state': 'draft',
'order_line': {},
'notes': '%s' % (porder.notes or '',),
'fiscal_position': porder.fiscal_position and porder.fiscal_position.id or False,
})
else:
if porder.date_order < order_infos['date_order']:
order_infos['date_order'] = porder.date_order
if porder.notes:
order_infos['notes'] = (order_infos['notes'] or '') + ('\n%s' % (porder.notes,))
if porder.origin:
order_infos['origin'] = (order_infos['origin'] or '') + ' ' + porder.origin
order_lines_to_move[order_key] += [order_line.id for order_line in porder.order_line
if order_line.state != 'cancel']
allorders = []
orders_info = {}
for order_key, (order_data, old_ids) in new_orders.iteritems():
# skip merges with only one order
if len(old_ids) < 2:
allorders += (old_ids or [])
continue
# cleanup order line data
for key, value in order_data['order_line'].iteritems():
del value['uom_factor']
value.update(dict(key))
order_data['order_line'] = [(6, 0, order_lines_to_move[order_key])]
# create the new order
context.update({'mail_create_nolog': True})
neworder_id = self.create(cr, uid, order_data)
self.message_post(cr, uid, [neworder_id], body=_("RFQ created"), context=context)
orders_info.update({neworder_id: old_ids})
allorders.append(neworder_id)
# make triggers pointing to the old orders point to the new order
for old_id in old_ids:
self.redirect_workflow(cr, uid, [(old_id, neworder_id)])
self.signal_workflow(cr, uid, [old_id], 'purchase_cancel')
return orders_info
class purchase_order_line(osv.osv):
def _amount_line(self, cr, uid, ids, prop, arg, context=None):
res = {}
cur_obj=self.pool.get('res.currency')
tax_obj = self.pool.get('account.tax')
for line in self.browse(cr, uid, ids, context=context):
taxes = tax_obj.compute_all(cr, uid, line.taxes_id, line.price_unit, line.product_qty, line.product_id, line.order_id.partner_id)
cur = line.order_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
return res
def _get_uom_id(self, cr, uid, context=None):
try:
proxy = self.pool.get('ir.model.data')
result = proxy.get_object_reference(cr, uid, 'product', 'product_uom_unit')
return result[1]
except Exception, ex:
return False
_columns = {
'name': fields.text('Description', required=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True),
'date_planned': fields.date('Scheduled Date', required=True, select=True),
'taxes_id': fields.many2many('account.tax', 'purchase_order_taxe', 'ord_id', 'tax_id', 'Taxes'),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_id': fields.many2one('product.product', 'Product', domain=[('purchase_ok','=',True)], change_default=True),
'move_ids': fields.one2many('stock.move', 'purchase_line_id', 'Reservation', readonly=True, ondelete='set null'),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute= dp.get_precision('Account')),
'order_id': fields.many2one('purchase.order', 'Order Reference', select=True, required=True, ondelete='cascade'),
'account_analytic_id':fields.many2one('account.analytic.account', 'Analytic Account',),
'company_id': fields.related('order_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'state': fields.selection([('draft', 'Draft'), ('confirmed', 'Confirmed'), ('done', 'Done'), ('cancel', 'Cancelled')],
'Status', required=True, readonly=True, copy=False,
help=' * The \'Draft\' status is set automatically when purchase order in draft status. \
\n* The \'Confirmed\' status is set automatically as confirm when purchase order in confirm status. \
\n* The \'Done\' status is set automatically when purchase order is set as done. \
\n* The \'Cancelled\' status is set automatically when user cancel purchase order.'),
'invoice_lines': fields.many2many('account.invoice.line', 'purchase_order_line_invoice_rel',
'order_line_id', 'invoice_id', 'Invoice Lines',
readonly=True, copy=False),
'invoiced': fields.boolean('Invoiced', readonly=True, copy=False),
'partner_id': fields.related('order_id', 'partner_id', string='Partner', readonly=True, type="many2one", relation="res.partner", store=True),
'date_order': fields.related('order_id', 'date_order', string='Order Date', readonly=True, type="datetime"),
'procurement_ids': fields.one2many('procurement.order', 'purchase_line_id', string='Associated procurements'),
}
_defaults = {
'product_uom' : _get_uom_id,
'product_qty': lambda *a: 1.0,
'state': lambda *args: 'draft',
'invoiced': lambda *a: 0,
}
_table = 'purchase_order_line'
_name = 'purchase.order.line'
_description = 'Purchase Order Line'
def unlink(self, cr, uid, ids, context=None):
for line in self.browse(cr, uid, ids, context=context):
if line.order_id.state in ['approved', 'done'] and line.state not in ['draft', 'cancel']:
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete a purchase order line which is in state \'%s\'.') %(line.state,))
procurement_obj = self.pool.get('procurement.order')
procurement_ids_to_except = procurement_obj.search(cr, uid, [('purchase_line_id', 'in', ids)], context=context)
if procurement_ids_to_except:
for po_id in procurement_ids_to_except:
procurement_obj.message_post(cr, uid, po_id, body=_('Purchase order line deleted.'), context=context)
procurement_obj.write(cr, uid, procurement_ids_to_except, {'state': 'exception'}, context=context)
return super(purchase_order_line, self).unlink(cr, uid, ids, context=context)
def onchange_product_uom(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, state='draft', context=None):
"""
onchange handler of product_uom.
"""
if context is None:
context = {}
if not uom_id:
return {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
context = dict(context, purchase_uom_check=True)
return self.onchange_product_id(cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=date_order, fiscal_position_id=fiscal_position_id, date_planned=date_planned,
name=name, price_unit=price_unit, state=state, context=context)
def _get_date_planned(self, cr, uid, supplier_info, date_order_str, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for
PO Lines that correspond to the given product.supplierinfo,
when ordered at `date_order_str`.
:param browse_record | False supplier_info: product.supplierinfo, used to
determine delivery delay (if False, default delay = 0)
:param str date_order_str: date of order field, as a string in
DEFAULT_SERVER_DATETIME_FORMAT
:rtype: datetime
:return: desired Schedule Date for the PO line
"""
supplier_delay = int(supplier_info.delay) if supplier_info else 0
return datetime.strptime(date_order_str, DEFAULT_SERVER_DATETIME_FORMAT) + relativedelta(days=supplier_delay)
def action_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'cancel'}, context=context)
# We will group by PO first, so we do the check only once for each PO
purchase_orders = list(set([x.order_id for x in self.browse(cr, uid, ids, context=context)]))
for purchase in purchase_orders:
if all([l.state == 'cancel' for l in purchase.order_line]):
self.pool.get('purchase.order').action_cancel(cr, uid, [purchase.id], context=context)
def _check_product_uom_group(self, cr, uid, context=None):
group_uom = self.pool.get('ir.model.data').get_object(cr, uid, 'product', 'group_uom')
res = [user for user in group_uom.users if user.id == uid]
return len(res) and True or False
def onchange_product_id(self, cr, uid, ids, pricelist_id, product_id, qty, uom_id,
partner_id, date_order=False, fiscal_position_id=False, date_planned=False,
name=False, price_unit=False, state='draft', context=None):
"""
onchange handler of product_id.
"""
if context is None:
context = {}
res = {'value': {'price_unit': price_unit or 0.0, 'name': name or '', 'product_uom' : uom_id or False}}
if not product_id:
return res
product_product = self.pool.get('product.product')
product_uom = self.pool.get('product.uom')
res_partner = self.pool.get('res.partner')
product_pricelist = self.pool.get('product.pricelist')
account_fiscal_position = self.pool.get('account.fiscal.position')
account_tax = self.pool.get('account.tax')
# - check for the presence of partner_id and pricelist_id
#if not partner_id:
# raise osv.except_osv(_('No Partner!'), _('Select a partner in purchase order to choose a product.'))
#if not pricelist_id:
# raise osv.except_osv(_('No Pricelist !'), _('Select a price list in the purchase order form before choosing a product.'))
# - determine name and notes based on product in partner lang.
context_partner = context.copy()
if partner_id:
lang = res_partner.browse(cr, uid, partner_id).lang
context_partner.update( {'lang': lang, 'partner_id': partner_id} )
product = product_product.browse(cr, uid, product_id, context=context_partner)
#call name_get() with partner in the context to eventually match name and description in the seller_ids field
if not name or not uom_id:
# The 'or not uom_id' part of the above condition can be removed in master. See commit message of the rev. introducing this line.
dummy, name = product_product.name_get(cr, uid, product_id, context=context_partner)[0]
if product.description_purchase:
name += '\n' + product.description_purchase
res['value'].update({'name': name})
# - set a domain on product_uom
res['domain'] = {'product_uom': [('category_id','=',product.uom_id.category_id.id)]}
# - check that uom and product uom belong to the same category
product_uom_po_id = product.uom_po_id.id
if not uom_id:
uom_id = product_uom_po_id
if product.uom_id.category_id.id != product_uom.browse(cr, uid, uom_id, context=context).category_id.id:
if context.get('purchase_uom_check') and self._check_product_uom_group(cr, uid, context=context):
res['warning'] = {'title': _('Warning!'), 'message': _('Selected Unit of Measure does not belong to the same category as the product Unit of Measure.')}
uom_id = product_uom_po_id
res['value'].update({'product_uom': uom_id})
# - determine product_qty and date_planned based on seller info
if not date_order:
date_order = fields.datetime.now()
supplierinfo = False
precision = self.pool.get('decimal.precision').precision_get(cr, uid, 'Product Unit of Measure')
for supplier in product.seller_ids:
if partner_id and (supplier.name.id == partner_id):
supplierinfo = supplier
if supplierinfo.product_uom.id != uom_id:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier only sells this product by %s') % supplierinfo.product_uom.name }
min_qty = product_uom._compute_qty(cr, uid, supplierinfo.product_uom.id, supplierinfo.min_qty, to_uom_id=uom_id)
if float_compare(min_qty , qty, precision_digits=precision) == 1: # If the supplier quantity is greater than entered from user, set minimal.
if qty:
res['warning'] = {'title': _('Warning!'), 'message': _('The selected supplier has a minimal quantity set to %s %s, you should not purchase less.') % (supplierinfo.min_qty, supplierinfo.product_uom.name)}
qty = min_qty
dt = self._get_date_planned(cr, uid, supplierinfo, date_order, context=context).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
qty = qty or 1.0
res['value'].update({'date_planned': date_planned or dt})
if qty:
res['value'].update({'product_qty': qty})
price = price_unit
if price_unit is False or price_unit is None:
# - determine price_unit and taxes_id
if pricelist_id:
date_order_str = datetime.strptime(date_order, DEFAULT_SERVER_DATETIME_FORMAT).strftime(DEFAULT_SERVER_DATE_FORMAT)
price = product_pricelist.price_get(cr, uid, [pricelist_id],
product.id, qty or 1.0, partner_id or False, {'uom': uom_id, 'date': date_order_str})[pricelist_id]
else:
price = product.standard_price
taxes = account_tax.browse(cr, uid, map(lambda x: x.id, product.supplier_taxes_id))
fpos = fiscal_position_id and account_fiscal_position.browse(cr, uid, fiscal_position_id, context=context) or False
taxes_ids = account_fiscal_position.map_tax(cr, uid, fpos, taxes)
res['value'].update({'price_unit': price, 'taxes_id': taxes_ids})
return res
product_id_change = onchange_product_id
product_uom_change = onchange_product_uom
def action_confirm(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'confirmed'}, context=context)
return True
class procurement_rule(osv.osv):
_inherit = 'procurement.rule'
def _get_action(self, cr, uid, context=None):
return [('buy', _('Buy'))] + super(procurement_rule, self)._get_action(cr, uid, context=context)
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'purchase_line_id': fields.many2one('purchase.order.line', 'Purchase Order Line'),
'purchase_id': fields.related('purchase_line_id', 'order_id', type='many2one', relation='purchase.order', string='Purchase Order'),
}
def propagate_cancel(self, cr, uid, procurement, context=None):
if procurement.rule_id.action == 'buy' and procurement.purchase_line_id:
purchase_line_obj = self.pool.get('purchase.order.line')
if procurement.purchase_line_id.state not in ('draft', 'cancel'):
raise osv.except_osv(_('Error!'),
_('Can not cancel this procurement as the related purchase order has been confirmed already. Please cancel the purchase order first. '))
new_qty, new_price = self._calc_new_qty_price(cr, uid, procurement, cancel=True, context=context)
if new_qty != procurement.purchase_line_id.product_qty:
purchase_line_obj.write(cr, uid, [procurement.purchase_line_id.id], {'product_qty': new_qty, 'price_unit': new_price}, context=context)
if float_compare(new_qty, 0.0, precision_rounding=procurement.product_uom.rounding) != 1:
purchase_line_obj.action_cancel(cr, uid, [procurement.purchase_line_id.id], context=context)
purchase_line_obj.unlink(cr, uid, [procurement.purchase_line_id.id], context=context)
return super(procurement_order, self).propagate_cancel(cr, uid, procurement, context=context)
def _run(self, cr, uid, procurement, context=None):
if procurement.rule_id and procurement.rule_id.action == 'buy':
#make a purchase order for the procurement
return self.make_po(cr, uid, [procurement.id], context=context)[procurement.id]
return super(procurement_order, self)._run(cr, uid, procurement, context=context)
def _check(self, cr, uid, procurement, context=None):
if procurement.purchase_line_id:
if procurement.purchase_line_id.order_id.shipped:
return True
elif procurement.move_ids:
moves = self.pool.get('stock.move').browse(cr, uid, [x.id for x in procurement.move_ids], context=context)
return all(move.state == 'done' for move in moves)
return super(procurement_order, self)._check(cr, uid, procurement, context=context)
def _check_supplier_info(self, cr, uid, ids, context=None):
''' Check the supplier info field of a product and write an error message on the procurement if needed.
Returns True if all needed information is there, False if some configuration mistake is detected.
'''
partner_obj = self.pool.get('res.partner')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
message = ''
partner = procurement.product_id.seller_id #Taken Main Supplier of Product of Procurement.
if not procurement.product_id.seller_ids:
message = _('No supplier defined for this product !')
elif not partner:
message = _('No default supplier defined for this product')
elif not partner_obj.address_get(cr, uid, [partner.id], ['delivery'])['delivery']:
message = _('No address defined for the supplier')
if message:
if procurement.message != message:
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
return False
if user.company_id and user.company_id.partner_id:
if partner.id == user.company_id.partner_id.id:
raise osv.except_osv(_('Configuration Error!'), _('The product "%s" has been defined with your company as reseller which seems to be a configuration error!' % procurement.product_id.name))
return True
def create_procurement_purchase_order(self, cr, uid, procurement, po_vals, line_vals, context=None):
"""Create the purchase order from the procurement, using
the provided field values, after adding the given purchase
order line in the purchase order.
:params procurement: the procurement object generating the purchase order
:params dict po_vals: field values for the new purchase order (the
``order_line`` field will be overwritten with one
single line, as passed in ``line_vals``).
:params dict line_vals: field values of the single purchase order line that
the purchase order will contain.
:return: id of the newly created purchase order
:rtype: int
"""
po_vals.update({'order_line': [(0,0,line_vals)]})
return self.pool.get('purchase.order').create(cr, uid, po_vals, context=context)
def _get_purchase_schedule_date(self, cr, uid, procurement, company, context=None):
"""Return the datetime value to use as Schedule Date (``date_planned``) for the
Purchase Order Lines created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:rtype: datetime
:return: the desired Schedule Date for the PO lines
"""
procurement_date_planned = datetime.strptime(procurement.date_planned, DEFAULT_SERVER_DATETIME_FORMAT)
schedule_date = (procurement_date_planned - relativedelta(days=company.po_lead))
return schedule_date
def _get_purchase_order_date(self, cr, uid, procurement, company, schedule_date, context=None):
"""Return the datetime value to use as Order Date (``date_order``) for the
Purchase Order created to satisfy the given procurement.
:param browse_record procurement: the procurement for which a PO will be created.
:param browse_report company: the company to which the new PO will belong to.
:param datetime schedule_date: desired Scheduled Date for the Purchase Order lines.
:rtype: datetime
:return: the desired Order Date for the PO
"""
seller_delay = int(procurement.product_id.seller_delay)
return schedule_date - relativedelta(days=seller_delay)
def _get_product_supplier(self, cr, uid, procurement, context=None):
''' returns the main supplier of the procurement's product given as argument'''
supplierinfo = self.pool['product.supplierinfo']
company_supplier = supplierinfo.search(cr, uid,
[('product_tmpl_id', '=', procurement.product_id.product_tmpl_id.id), ('company_id', '=', procurement.company_id.id)], limit=1, context=context)
if company_supplier:
return supplierinfo.browse(cr, uid, company_supplier[0], context=context).name
return procurement.product_id.seller_id
def _get_po_line_values_from_proc(self, cr, uid, procurement, partner, company, schedule_date, context=None):
if context is None:
context = {}
uom_obj = self.pool.get('product.uom')
pricelist_obj = self.pool.get('product.pricelist')
prod_obj = self.pool.get('product.product')
acc_pos_obj = self.pool.get('account.fiscal.position')
po_obj = self.pool.get('purchase.order')
seller_qty = procurement.product_id.seller_qty if procurement.location_id.usage != 'customer' else 0.0
pricelist_id = partner.property_product_pricelist_purchase.id
uom_id = procurement.product_id.uom_po_id.id
qty = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, uom_id)
if seller_qty:
qty = max(qty, seller_qty)
price = pricelist_obj.price_get(cr, uid, [pricelist_id], procurement.product_id.id, qty, partner.id, {'uom': uom_id})[pricelist_id]
#Passing partner_id to context for purchase order line integrity of Line name
new_context = context.copy()
new_context.update({'lang': partner.lang, 'partner_id': partner.id})
product = prod_obj.browse(cr, uid, procurement.product_id.id, context=new_context)
taxes_ids = procurement.product_id.supplier_taxes_id
# It is necessary to have the appropriate fiscal position to get the right tax mapping
fiscal_position = False
fiscal_position_id = po_obj.onchange_partner_id(cr, uid, None, partner.id, context=context)['value']['fiscal_position']
if fiscal_position_id:
fiscal_position = acc_pos_obj.browse(cr, uid, fiscal_position_id, context=context)
taxes = acc_pos_obj.map_tax(cr, uid, fiscal_position, taxes_ids)
name = product.display_name
if product.description_purchase:
name += '\n' + product.description_purchase
return {
'name': name,
'product_qty': qty,
'product_id': procurement.product_id.id,
'product_uom': uom_id,
'price_unit': price or 0.0,
'date_planned': schedule_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'taxes_id': [(6, 0, taxes)],
}
def _calc_new_qty_price(self, cr, uid, procurement, po_line=None, cancel=False, context=None):
if not po_line:
po_line = procurement.purchase_line_id
uom_obj = self.pool.get('product.uom')
qty = uom_obj._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty,
procurement.product_id.uom_po_id.id)
if cancel:
qty = -qty
# Make sure we use the minimum quantity of the partner corresponding to the PO
# This does not apply in case of dropshipping
supplierinfo_min_qty = 0.0
if po_line.order_id.location_id.usage != 'customer':
if po_line.product_id.seller_id.id == po_line.order_id.partner_id.id:
supplierinfo_min_qty = po_line.product_id.seller_qty
else:
supplierinfo_obj = self.pool.get('product.supplierinfo')
supplierinfo_ids = supplierinfo_obj.search(cr, uid, [('name', '=', po_line.order_id.partner_id.id), ('product_tmpl_id', '=', po_line.product_id.product_tmpl_id.id)])
supplierinfo_min_qty = supplierinfo_obj.browse(cr, uid, supplierinfo_ids).min_qty
if supplierinfo_min_qty == 0.0:
qty += po_line.product_qty
else:
# Recompute quantity by adding existing running procurements.
for proc in po_line.procurement_ids:
qty += uom_obj._compute_qty(cr, uid, proc.product_uom.id, proc.product_qty,
proc.product_id.uom_po_id.id) if proc.state == 'running' else 0.0
qty = max(qty, supplierinfo_min_qty) if qty > 0.0 else 0.0
price = po_line.price_unit
if qty != po_line.product_qty:
pricelist_obj = self.pool.get('product.pricelist')
pricelist_id = po_line.order_id.partner_id.property_product_pricelist_purchase.id
price = pricelist_obj.price_get(cr, uid, [pricelist_id], procurement.product_id.id, qty, po_line.order_id.partner_id.id, {'uom': procurement.product_uom.id})[pricelist_id]
return qty, price
def make_po(self, cr, uid, ids, context=None):
""" Resolve the purchase from procurement, which may result in a new PO creation, a new PO line creation or a quantity change on existing PO line.
Note that some operations (as the PO creation) are made as SUPERUSER because the current user may not have rights to do it (mto product launched by a sale for example)
@return: dictionary giving for each procurement its related resolving PO line.
"""
res = {}
company = self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id
po_obj = self.pool.get('purchase.order')
po_line_obj = self.pool.get('purchase.order.line')
seq_obj = self.pool.get('ir.sequence')
pass_ids = []
linked_po_ids = []
sum_po_line_ids = []
for procurement in self.browse(cr, uid, ids, context=context):
partner = self._get_product_supplier(cr, uid, procurement, context=context)
if not partner:
self.message_post(cr, uid, [procurement.id], _('There is no supplier associated to product %s') % (procurement.product_id.name))
res[procurement.id] = False
else:
schedule_date = self._get_purchase_schedule_date(cr, uid, procurement, company, context=context)
purchase_date = self._get_purchase_order_date(cr, uid, procurement, company, schedule_date, context=context)
line_vals = self._get_po_line_values_from_proc(cr, uid, procurement, partner, company, schedule_date, context=context)
#look for any other draft PO for the same supplier, to attach the new line on instead of creating a new draft one
available_draft_po_ids = po_obj.search(cr, uid, [
('partner_id', '=', partner.id), ('state', '=', 'draft'), ('picking_type_id', '=', procurement.rule_id.picking_type_id.id),
('location_id', '=', procurement.location_id.id), ('company_id', '=', procurement.company_id.id), ('dest_address_id', '=', procurement.partner_dest_id.id)], context=context)
if available_draft_po_ids:
po_id = available_draft_po_ids[0]
po_rec = po_obj.browse(cr, uid, po_id, context=context)
#if the product has to be ordered earlier those in the existing PO, we replace the purchase date on the order to avoid ordering it too late
if datetime.strptime(po_rec.date_order, DEFAULT_SERVER_DATETIME_FORMAT) > purchase_date:
po_obj.write(cr, uid, [po_id], {'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT)}, context=context)
#look for any other PO line in the selected PO with same product and UoM to sum quantities instead of creating a new po line
available_po_line_ids = po_line_obj.search(cr, uid, [('order_id', '=', po_id), ('product_id', '=', line_vals['product_id']), ('product_uom', '=', line_vals['product_uom'])], context=context)
if available_po_line_ids:
po_line = po_line_obj.browse(cr, uid, available_po_line_ids[0], context=context)
po_line_id = po_line.id
new_qty, new_price = self._calc_new_qty_price(cr, uid, procurement, po_line=po_line, context=context)
if new_qty > po_line.product_qty:
po_line_obj.write(cr, SUPERUSER_ID, po_line.id, {'product_qty': new_qty, 'price_unit': new_price}, context=context)
sum_po_line_ids.append(procurement.id)
else:
line_vals.update(order_id=po_id)
po_line_id = po_line_obj.create(cr, SUPERUSER_ID, line_vals, context=context)
linked_po_ids.append(procurement.id)
else:
name = seq_obj.get(cr, uid, 'purchase.order') or _('PO: %s') % procurement.name
po_vals = {
'name': name,
'origin': procurement.origin,
'partner_id': partner.id,
'location_id': procurement.location_id.id,
'picking_type_id': procurement.rule_id.picking_type_id.id,
'pricelist_id': partner.property_product_pricelist_purchase.id,
'currency_id': partner.property_product_pricelist_purchase and partner.property_product_pricelist_purchase.currency_id.id or procurement.company_id.currency_id.id,
'date_order': purchase_date.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'company_id': procurement.company_id.id,
'fiscal_position': po_obj.onchange_partner_id(cr, uid, None, partner.id, context=context)['value']['fiscal_position'],
'payment_term_id': partner.property_supplier_payment_term.id or False,
'dest_address_id': procurement.partner_dest_id.id,
}
po_id = self.create_procurement_purchase_order(cr, SUPERUSER_ID, procurement, po_vals, line_vals, context=context)
po_line_id = po_obj.browse(cr, uid, po_id, context=context).order_line[0].id
pass_ids.append(procurement.id)
res[procurement.id] = po_line_id
self.write(cr, uid, [procurement.id], {'purchase_line_id': po_line_id}, context=context)
if pass_ids:
self.message_post(cr, uid, pass_ids, body=_("Draft Purchase Order created"), context=context)
if linked_po_ids:
self.message_post(cr, uid, linked_po_ids, body=_("Purchase line created and linked to an existing Purchase Order"), context=context)
if sum_po_line_ids:
self.message_post(cr, uid, sum_po_line_ids, body=_("Quantity added in existing Purchase Order Line"), context=context)
return res
class mail_mail(osv.Model):
_name = 'mail.mail'
_inherit = 'mail.mail'
def _postprocess_sent_message(self, cr, uid, mail, context=None, mail_sent=True):
if mail_sent and mail.model == 'purchase.order':
obj = self.pool.get('purchase.order').browse(cr, uid, mail.res_id, context=context)
if obj.state == 'draft':
self.pool.get('purchase.order').signal_workflow(cr, uid, [mail.res_id], 'send_rfq')
return super(mail_mail, self)._postprocess_sent_message(cr, uid, mail=mail, context=context, mail_sent=mail_sent)
class product_template(osv.Model):
_name = 'product.template'
_inherit = 'product.template'
def _get_buy_route(self, cr, uid, context=None):
buy_route = self.pool.get('ir.model.data').xmlid_to_res_id(cr, uid, 'purchase.route_warehouse0_buy')
if buy_route:
return [buy_route]
return []
def _purchase_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0)
for template in self.browse(cr, uid, ids, context=context):
res[template.id] = sum([p.purchase_count for p in template.product_variant_ids])
return res
_columns = {
'purchase_ok': fields.boolean('Can be Purchased', help="Specify if the product can be selected in a purchase order line."),
'purchase_count': fields.function(_purchase_count, string='# Purchases', type='integer'),
}
_defaults = {
'purchase_ok': 1,
'route_ids': _get_buy_route,
}
def action_view_purchases(self, cr, uid, ids, context=None):
products = self._get_products(cr, uid, ids, context=context)
result = self._get_act_window_dict(cr, uid, 'purchase.action_purchase_line_product_tree', context=context)
result['domain'] = "[('product_id','in',[" + ','.join(map(str, products)) + "])]"
return result
class product_product(osv.Model):
_name = 'product.product'
_inherit = 'product.product'
def _purchase_count(self, cr, uid, ids, field_name, arg, context=None):
Purchase = self.pool['purchase.order']
return {
product_id: Purchase.search_count(cr,uid, [('order_line.product_id', '=', product_id)], context=context)
for product_id in ids
}
def action_view_purchases(self, cr, uid, ids, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
result = self.pool['product.template']._get_act_window_dict(cr, uid, 'purchase.action_purchase_line_product_tree', context=context)
result['domain'] = "[('product_id','in',[" + ','.join(map(str, ids)) + "])]"
return result
_columns = {
'purchase_count': fields.function(_purchase_count, string='# Purchases', type='integer'),
}
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'purchase.order' and context.get('default_res_id'):
context = dict(context, mail_post_autofollow=True)
self.pool.get('purchase.order').signal_workflow(cr, uid, [context['default_res_id']], 'send_rfq')
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
class account_invoice(osv.Model):
""" Override account_invoice to add Chatter messages on the related purchase
orders, logging the invoice receipt or payment. """
_inherit = 'account.invoice'
def invoice_validate(self, cr, uid, ids, context=None):
res = super(account_invoice, self).invoice_validate(cr, uid, ids, context=context)
purchase_order_obj = self.pool.get('purchase.order')
# read access on purchase.order object is not required
if not purchase_order_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
user_id = SUPERUSER_ID
else:
user_id = uid
po_ids = purchase_order_obj.search(cr, user_id, [('invoice_ids', 'in', ids)], context=context)
for order in purchase_order_obj.browse(cr, user_id, po_ids, context=context):
purchase_order_obj.message_post(cr, user_id, order.id, body=_("Invoice received"), context=context)
invoiced = []
shipped = True
# for invoice method manual or order, don't care about shipping state
# for invoices based on incoming shippment, beware of partial deliveries
if (order.invoice_method == 'picking' and
not all(picking.invoice_state in ['invoiced'] for picking in order.picking_ids)):
shipped = False
for po_line in order.order_line:
if (po_line.invoice_lines and
all(line.invoice_id.state not in ['draft', 'cancel'] for line in po_line.invoice_lines)):
invoiced.append(po_line.id)
if invoiced and shipped:
self.pool['purchase.order.line'].write(cr, user_id, invoiced, {'invoiced': True})
workflow.trg_write(user_id, 'purchase.order', order.id, cr)
return res
def confirm_paid(self, cr, uid, ids, context=None):
res = super(account_invoice, self).confirm_paid(cr, uid, ids, context=context)
purchase_order_obj = self.pool.get('purchase.order')
# read access on purchase.order object is not required
if not purchase_order_obj.check_access_rights(cr, uid, 'read', raise_exception=False):
user_id = SUPERUSER_ID
else:
user_id = uid
po_ids = purchase_order_obj.search(cr, user_id, [('invoice_ids', 'in', ids)], context=context)
for po_id in po_ids:
purchase_order_obj.message_post(cr, user_id, po_id, body=_("Invoice paid"), context=context)
return res
class account_invoice_line(osv.Model):
""" Override account_invoice_line to add the link to the purchase order line it is related to"""
_inherit = 'account.invoice.line'
_columns = {
'purchase_line_id': fields.many2one('purchase.order.line',
'Purchase Order Line', ondelete='set null', select=True,
readonly=True),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
40223117cda/2015cdaw13 | static/Brython3.1.1-20150328-091302/Lib/markdown2.py | 669 | 8143 | import browser.html
import re
class URL:
def __init__(self,src):
elts = src.split(maxsplit=1)
self.href = elts[0]
self.alt = ''
if len(elts)==2:
alt = elts[1]
if alt[0]=='"' and alt[-1]=='"':self.alt=alt[1:-1]
elif alt[0]=="'" and alt[-1]=="'":self.alt=alt[1:-1]
elif alt[0]=="(" and alt[-1]==")":self.alt=alt[1:-1]
class CodeBlock:
def __init__(self,line):
self.lines = [line]
def to_html(self):
if self.lines[0].startswith("`"):
self.lines.pop(0)
res = escape('\n'.join(self.lines))
res = unmark(res)
res = '<pre class="marked">%s</pre>\n' %res
return res,[]
class Marked:
def __init__(self, line=''):
self.line = line
self.children = []
def to_html(self):
return apply_markdown(self.line)
# get references
refs = {}
ref_pattern = r"^\[(.*)\]:\s+(.*)"
def mark(src):
global refs
refs = {}
# split source in sections
# sections can be :
# - a block-level HTML element (markdown syntax will not be processed)
# - a script
# - a span-level HTML tag (markdown syntax will be processed)
# - a code block
# normalise line feeds
src = src.replace('\r\n','\n')
# lines followed by dashes
src = re.sub(r'(.*?)\n=+\n', '\n# \\1\n', src)
src = re.sub(r'(.*?)\n-+\n', '\n## \\1\n', src)
lines = src.split('\n')
i = bq = 0
ul = ol = 0
while i<len(lines):
# enclose lines starting by > in a blockquote
if lines[i].startswith('>'):
nb = 1
while nb<len(lines[i]) and lines[i][nb]=='>':
nb += 1
lines[i] = lines[i][nb:]
if nb>bq:
lines.insert(i,'<blockquote>'*(nb-bq))
i += 1
bq = nb
elif nb<bq:
lines.insert(i,'</blockquote>'*(bq-nb))
i += 1
bq = nb
elif bq>0:
lines.insert(i,'</blockquote>'*bq)
i += 1
bq = 0
# unordered lists
if lines[i].strip() and lines[i].lstrip()[0] in '-+*' \
and (i==0 or ul or not lines[i-1].strip()):
print('is ul',lines[i])
# line indentation indicates nesting level
nb = 1+len(lines[i])-len(lines[i].lstrip())
lines[i] = '<li>'+lines[i][1+nb:]
if nb>ul:
lines.insert(i,'<ul>'*(nb-ul))
i += 1
elif nb<ul:
lines.insert(i,'</ul>'*(ul-nb))
i += 1
ul = nb
elif ul:
lines.insert(i,'</ul>'*ul)
i += 1
ul = 0
# ordered lists
mo = re.search(r'^(\d+\.)',lines[i])
if mo:
if not ol:
lines.insert(i,'<ol>')
i += 1
lines[i] = '<li>'+lines[i][len(mo.groups()[0]):]
ol = 1
elif ol:
lines.insert(i,'</ol>')
i += 1
ol = 0
i += 1
sections = []
scripts = []
section = Marked()
i = 0
while i<len(lines):
line = lines[i]
if line.strip() and line.startswith(' '):
if isinstance(section,Marked) and section.line:
sections.append(section)
section = CodeBlock(line[4:])
j = i+1
while j<len(lines) and lines[j].strip() \
and lines[j].startswith(' '):
section.lines.append(lines[j][4:])
j += 1
sections.append(section)
section = Marked()
i = j
continue
elif line.lower().startswith('<script'):
if isinstance(section,Marked) and section.line:
sections.append(section)
section = Marked()
j = i+1
while j<len(lines):
if lines[j].lower().startswith('</script>'):
scripts.append('\n'.join(lines[i+1:j]))
for k in range(i,j+1):
lines[k] = ''
break
j += 1
i = j
continue
else:
mo = re.search(ref_pattern,line)
if mo is not None:
if isinstance(section,Marked) and section.line:
sections.append(section)
section = Marked()
key = mo.groups()[0]
value = URL(mo.groups()[1])
refs[key.lower()] = value
else:
if line.strip():
if section.line:
section.line += ' '
section.line += line
else:
sections.append(section)
section = Marked()
i += 1
res = ''
for section in sections:
mk,_scripts = section.to_html()
res += '<p>'+mk+'\n'
scripts += _scripts
return res,scripts
def escape(czone):
czone = czone.replace('&','&')
czone = czone.replace('<','<')
czone = czone.replace('>','>')
return czone
def s_escape(mo):
# used in re.sub
czone = mo.string[mo.start():mo.end()]
return escape(czone)
def unmark(code_zone):
# convert _ to _ inside inline code
code_zone = code_zone.replace('_','_')
return code_zone
def s_unmark(mo):
# convert _ to _ inside inline code
code_zone = mo.string[mo.start():mo.end()]
code_zone = code_zone.replace('_','_')
return code_zone
def apply_markdown(src):
scripts = []
# replace \` by `
src = re.sub(r'\\\`','`',src)
# escape < > & in inline code
code_pattern = r'\`(\S.*?\S)\`'
src = re.sub(code_pattern,s_escape,src)
# also convert _
src = re.sub(code_pattern,s_unmark,src)
# inline links
link_pattern1 = r'\[(.+?)\]\s?\((.+?)\)'
def repl(mo):
g1,g2 = mo.groups()
g2 = re.sub('_','_',g2)
return '<a href="%s">%s</a>' %(g2,g1)
src = re.sub(link_pattern1,repl,src)
# reference links
link_pattern2 = r'\[(.+?)\]\s?\[(.*?)\]'
while True:
mo = re.search(link_pattern2,src)
if mo is None:break
text,key = mo.groups()
print(text,key)
if not key:key=text # implicit link name
if key.lower() not in refs:
raise KeyError('unknow reference %s' %key)
url = refs[key.lower()]
repl = '<a href="'+url.href+'"'
if url.alt:
repl += ' title="'+url.alt+'"'
repl += '>%s</a>' %text
src = re.sub(link_pattern2,repl,src,count=1)
# emphasis
# replace \* by *
src = re.sub(r'\\\*','*',src)
# replace \_ by _
src = re.sub(r'\\\_','_',src)
# _ and * surrounded by spaces are not markup
src = re.sub(r' _ ',' _ ',src)
src = re.sub(r' \* ',' * ',src)
strong_patterns = [('STRONG',r'\*\*(.*?)\*\*'),('B',r'__(.*?)__')]
for tag,strong_pattern in strong_patterns:
src = re.sub(strong_pattern,r'<%s>\1</%s>' %(tag,tag),src)
em_patterns = [('EM',r'\*(.*?)\*'),('I',r'\_(.*?)\_')]
for tag,em_pattern in em_patterns:
src = re.sub(em_pattern,r'<%s>\1</%s>' %(tag,tag),src)
# inline code
# replace \` by `
src = re.sub(r'\\\`','`',src)
code_pattern = r'\`(.*?)\`'
src = re.sub(code_pattern,r'<code>\1</code>',src)
# ordered lists
lines = src.split('\n')
atx_header_pattern = '^(#+)(.*)(#*)'
for i,line in enumerate(lines):
print('line [%s]' %line, line.startswith('#'))
mo = re.search(atx_header_pattern,line)
if not mo:continue
print('pattern matches')
level = len(mo.groups()[0])
lines[i] = re.sub(atx_header_pattern,
'<H%s>%s</H%s>\n' %(level,mo.groups()[1],level),
line,count=1)
src = '\n'.join(lines)
src = re.sub('\n\n+','\n<p>',src)+'\n'
return src,scripts
| gpl-3.0 |
plissonf/scikit-learn | sklearn/ensemble/tests/test_bagging.py | 72 | 25573 | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.grid_search import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.cross_validation import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
for f in ['predict', 'predict_proba', 'predict_log_proba', 'decision_function']:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = getattr(sparse_classifier, f)(X_test_sparse)
# Trained on dense format
dense_classifier = BaggingClassifier(
base_estimator=CustomSVC(),
random_state=1,
**params
).fit(X_train, y_train)
dense_results = getattr(dense_classifier, f)(X_test)
assert_array_equal(sparse_results, dense_results)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstraping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstraping features may generate dupplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
| bsd-3-clause |
BioRoboticsUNAM/pyRobotics | pyrobotics/shared_variables.py | 1 | 10146 | import re
from StringIO import StringIO
import BB
from messages import Message, MessageTypes, Command, Response
class SharedVarTypes(object):
'''
Pseudo-enum of the types of shared variables.
The existing values are:
* SharedVarTypes.BYTE_ARRAY
* SharedVarTypes.INT
* SharedVarTypes.INT_ARRAY
* SharedVarTypes.LONG
* SharedVarTypes.LONG_ARRAY
* SharedVarTypes.DOUBLE
* SharedVarTypes.DOUBLE_ARRAY
* SharedVarTypes.STRING
* SharedVarTypes.MATRIX
* SharedVarTypes.RECOGNIZED_SPEECH
* SharedVarTypes.VAR
The type VAR can be considered as an "unknown" type. It doesn't do any special serialization or deserialization processing,
instead it sends the string the user inputs as parameters directly.
'''
BYTE_ARRAY = 'byte[]'
INT = 'int'
INT_ARRAY = 'int[]'
LONG = 'long'
LONG_ARRAY = 'long[]'
DOUBLE = 'double'
DOUBLE_ARRAY = 'double[]'
STRING = 'string'
MATRIX = 'matrix'
RECOGNIZED_SPEECH = 'RecognizedSpeech'
VAR = 'var'
class SubscriptionTypes(object):
'''
Pseudo-enum of the types of subscriptions. (See :meth:`SubscribeToSharedVar <pyRobotics.BB.SubscribeToSharedVar>`)
The existing values are:
* SubscriptionTypes.CREATION
* SubscriptionTypes.WRITE_MODULE
* SubscriptionTypes.WRITE_OTHERS
* SubscriptionTypes.WRITE_ANY
'''
CREATION = 'creation'
WRITE_MODULE = 'writemodule'
WRITE_OTHERS = 'writeothers'
WRITE_ANY = 'writeany'
class ReportTypes(object):
'''
Pseudo-enum of the types of reports. (See :meth:`SubscribeToSharedVar <pyRobotics.BB.SubscribeToSharedVar>`)
The existing values are:
* ReportTypes.CONTENT
* ReportTypes.NOTIFY
'''
CONTENT = 'content'
NOTIFY = 'notify'
class SharedVar(Message):
'''
A wrapper for the shared variable notifications.
An object of this type is passed as parameter to every subscription handler function. (See :meth:`SubscribeToSharedVar <pyRobotics.BB.SubscribeToSharedVar>`)
Objects of this class include the members:
sv.varName
The name of the shared variable of which a notification was received.
sv.svType
One of the class variables in the pseudo-enum :class:`SharedVarTypes`.
sv.size
-1 if it is not an array type, an integer if sv.svType is of type SharedVariableTypes.BYTE_ARRAY,
SharedVariableTypes.INT_ARRAY, SharedVariableTypes.LONG_ARRAY or SharedVariableTypes.DOUBLE_ARRAY.
sv.data
Contains the deserialized data of this shared variable, depending on its type.
'''
_rx = re.compile(r'^\s*({\s*)?(?P<type>([a-zA-Z_][_a-zA-Z0-9]*))(?P<array>(\[(?P<size>\d+)?\]))?\s+(?P<name>([a-zA-Z_][_a-zA-Z0-9]*))\s+(?P<data>(("(\\.|[^"])*")|({[^}]*})))?\s*}?((\s+%)?\s+(?P<report>(\w+))\s+%\s+(?P<subscription>(\w+))\s+%\s+(?P<writer>([A-Z][0-9A-Z\-]*)))?')
def __init__(self, responseObj):
super(SharedVar, self).__init__(responseObj.name)
self.type = MessageTypes.SHARED_VAR
self.successful = responseObj.successful
self.params = responseObj.params
self._id = responseObj._id
@classmethod
def Parse(cls, s):
r = Response.Parse(s)
if not (r and r.name == 'read_var'):
return r
var = SharedVar(r)
m = SharedVar._rx.match(var.params)
if not m:
print 'read_var received but failed to parse:'
print var.params
return None
var.svType = m.group('type') + ('[]' if m.group('array') else '')
var.size = -1 if not m.group('size') else int(m.group('size'))
var.varName = m.group('name')
var.data = m.group('data')
var.report = m.group('report')
var.subscription = m.group('subscription')
var.writer = m.group('writer')
var.isNotification = not not var.report
var.data = SharedVar._ProcessReadVar(var)
return var
@classmethod
def _ProcessReadVar(cls, var):
try:
if not var.data:
return None
if var.svType == SharedVarTypes.STRING:
return Message._DeserializeString(var.data)
if var.svType in [SharedVarTypes.INT, SharedVarTypes.LONG]:
return int(var.data)
if var.svType == SharedVarTypes.DOUBLE:
return float(var.data)
if var.svType in [SharedVarTypes.INT_ARRAY, SharedVarTypes.LONG_ARRAY]:
return [int(x) for x in var.data.split()]
if var.svType == SharedVarTypes.DOUBLE_ARRAY:
return [float(x) for x in var.data.split()]
if var.svType == SharedVarTypes.BYTE_ARRAY:
return SharedVar._DeserializeByteArray(var.data)
if var.svType == SharedVarTypes.RECOGNIZED_SPEECH:
return SharedVar._DeserializeRecognizedSpeech(var.data)
if var.svType == SharedVarTypes.MATRIX:
return SharedVar._DeserializeMatrix(var.data)
if var.svType == SharedVarTypes.VAR:
if var.data == 'null':
return None
return var.data
except:
pass
print 'Error parsing type: ' + var.svType
return None
@classmethod
def _DeserializeByteArray(cls, data):
data = data[2:]
l = []
while data:
l.append(int(data[:2], 16))
data = data[2:]
return l
@classmethod
def _DeserializeMatrix(cls, data):
rows, data = data.split(None, 1)
x = rows.find('x')
columns = int(rows[x+1:])
rows = int(rows[:x])
matrix = []
for _ in range(rows):
l = []
for _ in range(columns):
x = data.find(' ')
if x > -1:
item, data = data.split(None, 1)
else:
item = data
l.append(float(item))
matrix.append(l)
return matrix
@classmethod
def _SerializeMatrix(cls, data):
rows = len(data)
cols = len(data[0])
txt = StringIO()
txt.wtite(str(rows) + 'x' + str(cols))
for r in data:
for c in r:
txt.write(' ' + str(c))
return txt.getvalue()
@classmethod
def _SerializeRecognizedSpeech(cls, data):
if not data:
return None
txt = StringIO()
txt.write('{ ' + str(len(data)) + ' ')
for t in data:
txt.write(Message._SerializeString(t[0]) + ' ' + str(t[1]) + ' ')
txt.write('}')
return txt.getvalue()
@classmethod
def _DeserializeRecognizedSpeech(cls, data):
'''Returns a list which contains tuples (2 elements each) with string and confidence.'''
if data == '' or data == 'null':
return None
if data[0] != '{' or data[-1] != '}':
return None
data = data[1:-1].strip()
count = 0
if data.find(' ') > -1:
count, data = data.split(None, 1)
else:
return None
count = int(count)
l = []
for _ in range(count):
if data[0] != '"':
return None
pos = 1
str_len = len(data)
while pos < str_len:
if data[pos] == '"':
break
elif data[pos] == '\\':
pos += 1
pos += 1
if pos == str_len:
return None
currentText = Message._DeserializeString(data[:pos + 1])
data = data[pos+1:].strip()
if data.find(' ') > -1:
currentConfidence, data = data.split(None, 1)
else:
currentConfidence = data
currentConfidence = float(currentConfidence)
l.append((currentText, currentConfidence))
return l
def _CreateSharedVar(svType, name):
r = BB.SendAndWait(Command('create_var', svType + ' ' + name) , 2000, 2)
return (r and r.successful)
def _WriteSharedVar(sharedVarType, name, data):
w = str(data)
if sharedVarType == SharedVarTypes.BYTE_ARRAY:
w = '0x' + ''.join([ "%02X" % x for x in data ])
elif sharedVarType in [SharedVarTypes.INT, SharedVarTypes.LONG]:
w = str(int(data))
elif sharedVarType == SharedVarTypes.DOUBLE:
w = str(float(data))
elif sharedVarType in [SharedVarTypes.INT_ARRAY, SharedVarTypes.LONG_ARRAY]:
w = ' '.join([str(int(x)) for x in data])
elif sharedVarType == SharedVarTypes.DOUBLE_ARRAY:
w = ' '.join([str(float(x)) for x in data])
elif sharedVarType == SharedVarTypes.STRING:
w = Message._SerializeString(data)
elif sharedVarType == SharedVarTypes.MATRIX:
w = SharedVar._SerializeMatrix(data)
elif sharedVarType == SharedVarTypes.RECOGNIZED_SPEECH:
w = SharedVar._SerializeRecognizedSpeech(data)
else:
print 'pyRobotics - ERROR: Unhandled shared var type'
return False
r = BB.SendAndWait(Command('write_var', sharedVarType + ' ' + name + ' ' + w), 2000, 2)
return (r and r.successful)
def _ReadSharedVar(name):
r = BB.SendAndWait(Command('read_var', name), 2000)
if not (r and r.successful):
return None
return r.data
def _SubscribeToSharedVar(name, subscriptionType, reportType):
r = BB.SendAndWait(Command('suscribe_var', name + ' suscribe=' + subscriptionType + ' report=' + reportType), 2000, 2)
return (r and r.successful)
| mit |
icasdri/pybatterymonitor | setup.py | 1 | 1441 | # Copyright 2014 icasdri
#
# This file is part of pybatterymonitor.
#
# pybatterymonitor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pybatterymonitor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pybatterymonitor. If not, see <http://www.gnu.org/licenses/>.
__author__ = 'icasdri'
from pybatterymonitor.pybatterymonitorconfig import VERSION, DESCRIPTION
from distutils.core import setup
setup(
name='pybatterymonitor',
version=str(VERSION),
license='GPL3',
author='icasdri',
author_email='[email protected]',
description=DESCRIPTION,
url='https://github.com/icasdri/pybatterymonitor',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: End Users/Desktop',
'Topic :: Utilities',
'License :: OSI Approved :: GPL License',
'Programming Language :: Python',
'Programming Language :: Python :: 3'
],
packages=['pybatterymonitor'],
scripts=['distributing/pybatterymonitor']
)
| gpl-3.0 |
aspidites/django | django/contrib/sessions/backends/signed_cookies.py | 383 | 2895 | from django.conf import settings
from django.contrib.sessions.backends.base import SessionBase
from django.core import signing
class SessionStore(SessionBase):
def load(self):
"""
We load the data from the key itself instead of fetching from
some external data store. Opposite of _get_session_key(),
raises BadSignature if signature fails.
"""
try:
return signing.loads(self.session_key,
serializer=self.serializer,
# This doesn't handle non-default expiry dates, see #19201
max_age=settings.SESSION_COOKIE_AGE,
salt='django.contrib.sessions.backends.signed_cookies')
except Exception:
# BadSignature, ValueError, or unpickling exceptions. If any of
# these happen, reset the session.
self.create()
return {}
def create(self):
"""
To create a new key, we simply make sure that the modified flag is set
so that the cookie is set on the client for the current request.
"""
self.modified = True
def save(self, must_create=False):
"""
To save, we get the session key as a securely signed string and then
set the modified flag so that the cookie is set on the client for the
current request.
"""
self._session_key = self._get_session_key()
self.modified = True
def exists(self, session_key=None):
"""
This method makes sense when you're talking to a shared resource, but
it doesn't matter when you're storing the information in the client's
cookie.
"""
return False
def delete(self, session_key=None):
"""
To delete, we clear the session key and the underlying data structure
and set the modified flag so that the cookie is set on the client for
the current request.
"""
self._session_key = ''
self._session_cache = {}
self.modified = True
def cycle_key(self):
"""
Keeps the same data but with a new key. To do this, we just have to
call ``save()`` and it will automatically save a cookie with a new key
at the end of the request.
"""
self.save()
def _get_session_key(self):
"""
Most session backends don't need to override this method, but we do,
because instead of generating a random string, we want to actually
generate a secure url-safe Base64-encoded string of data as our
session key.
"""
session_cache = getattr(self, '_session_cache', {})
return signing.dumps(session_cache, compress=True,
salt='django.contrib.sessions.backends.signed_cookies',
serializer=self.serializer)
@classmethod
def clear_expired(cls):
pass
| bsd-3-clause |
tectronics/cortex-vfx | test/IECore/ops/mayaUserData/mayaUserData-1.py | 12 | 2775 | ##########################################################################
#
# Copyright (c) 2010, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import IECore
class mayaUserData( IECore.Op ) :
def __init__( self ) :
IECore.Op.__init__( self,
"An Op to test the IECoreMaya::ParameterHandler support for userData.",
IECore.IntParameter(
name = "result",
description = "d",
defaultValue = 2,
)
)
self.parameters().addParameters(
[
IECore.IntParameter(
"t",
"",
100,
userData = {
"maya" : {
"defaultConnection" : IECore.StringData( "time1.outTime" ),
}
}
),
IECore.IntParameter(
"e",
"",
100,
userData = {
"maya" : {
"defaultExpression" : IECore.StringData( " = time1.outTime * 10" ),
}
}
),
IECore.StringParameter(
"s",
"",
"",
userData = {
"maya" : {
"valueProvider" : IECore.StringData( "connectedNodeName" ),
}
}
),
]
)
def doOperation( self, args ) :
return IECore.IntData( 2 )
IECore.registerRunTimeTyped( mayaUserData )
| bsd-3-clause |
rickerc/nova_audit | nova/openstack/common/strutils.py | 25 | 7322 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import re
import sys
import unicodedata
from nova.openstack.common.gettextutils import _
# Used for looking up extensions of text
# to their 'multiplied' byte amount
BYTE_MULTIPLIERS = {
'': 1,
't': 1024 ** 4,
'g': 1024 ** 3,
'm': 1024 ** 2,
'k': 1024,
}
BYTE_REGEX = re.compile(r'(^-?\d+)(\D*)')
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
def int_from_bool_as_string(subject):
"""Interpret a string as a boolean and return either 1 or 0.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
return bool_from_string(subject) and 1 or 0
def bool_from_string(subject, strict=False):
"""Interpret a string as a boolean.
A case-insensitive match is performed such that strings matching 't',
'true', 'on', 'y', 'yes', or '1' are considered True and, when
`strict=False`, anything else is considered False.
Useful for JSON-decoded stuff and config file parsing.
If `strict=True`, unrecognized values, including None, will raise a
ValueError which is useful when parsing values passed in from an API call.
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if not isinstance(subject, basestring):
subject = str(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
elif lowered in FALSE_STRINGS:
return False
elif strict:
acceptable = ', '.join(
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
msg = _("Unrecognized value '%(val)s', acceptable values are:"
" %(acceptable)s") % {'val': subject,
'acceptable': acceptable}
raise ValueError(msg)
else:
return False
def safe_decode(text, incoming=None, errors='strict'):
"""Decodes incoming str using `incoming` if they're not already unicode.
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded
representation of it.
:raises TypeError: If text is not an isntance of basestring
"""
if not isinstance(text, basestring):
raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, unicode):
return text
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
try:
return text.decode(incoming, errors)
except UnicodeDecodeError:
# Note(flaper87) If we get here, it means that
# sys.stdin.encoding / sys.getdefaultencoding
# didn't return a suitable encoding to decode
# text. This happens mostly when global LANG
# var is not set correctly and there's no
# default encoding. In this case, most likely
# python will use ASCII or ANSI encoders as
# default encodings but they won't be capable
# of decoding non-ASCII characters.
#
# Also, UTF-8 is being used since it's an ASCII
# extension.
return text.decode('utf-8', errors)
def safe_encode(text, incoming=None,
encoding='utf-8', errors='strict'):
"""Encodes incoming str/unicode using `encoding`.
If incoming is not specified, text is expected to be encoded with
current python's default encoding. (`sys.getdefaultencoding`)
:param incoming: Text's current encoding
:param encoding: Expected encoding for text (Default UTF-8)
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a bytestring `encoding` encoded
representation of it.
:raises TypeError: If text is not an isntance of basestring
"""
if not isinstance(text, basestring):
raise TypeError("%s can't be encoded" % type(text))
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
if isinstance(text, unicode):
return text.encode(encoding, errors)
elif text and encoding != incoming:
# Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors)
return text.encode(encoding, errors)
return text
def to_bytes(text, default=0):
"""Converts a string into an integer of bytes.
Looks at the last characters of the text to determine
what conversion is needed to turn the input text into a byte number.
Supports "B, K(B), M(B), G(B), and T(B)". (case insensitive)
:param text: String input for bytes size conversion.
:param default: Default return value when text is blank.
"""
match = BYTE_REGEX.search(text)
if match:
magnitude = int(match.group(1))
mult_key_org = match.group(2)
if not mult_key_org:
return magnitude
elif text:
msg = _('Invalid string format: %s') % text
raise TypeError(msg)
else:
return default
mult_key = mult_key_org.lower().replace('b', '', 1)
multiplier = BYTE_MULTIPLIERS.get(mult_key)
if multiplier is None:
msg = _('Unknown byte multiplier: %s') % mult_key_org
raise TypeError(msg)
return magnitude * multiplier
def to_slug(value, incoming=None, errors="strict"):
"""Normalize string.
Convert to lowercase, remove non-word characters, and convert spaces
to hyphens.
Inspired by Django's `slugify` filter.
:param value: Text to slugify
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: slugified unicode representation of `value`
:raises TypeError: If text is not an instance of basestring
"""
value = safe_decode(value, incoming, errors)
# NOTE(aababilov): no need to use safe_(encode|decode) here:
# encodings are always "ascii", error handling is always "ignore"
# and types are always known (first: unicode; second: str)
value = unicodedata.normalize("NFKD", value).encode(
"ascii", "ignore").decode("ascii")
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
return SLUGIFY_HYPHENATE_RE.sub("-", value)
| apache-2.0 |
ramanajee/phantomjs | src/breakpad/src/tools/gyp/pylib/gyp/generator/xcode.py | 137 | 50429 | #!/usr/bin/python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import filecmp
import gyp.common
import gyp.xcodeproj_file
import errno
import os
import posixpath
import re
import shutil
import subprocess
import tempfile
# Project files generated by this module will use _intermediate_var as a
# custom Xcode setting whose value is a DerivedSources-like directory that's
# project-specific and configuration-specific. The normal choice,
# DERIVED_FILE_DIR, is target-specific, which is thought to be too restrictive
# as it is likely that multiple targets within a single project file will want
# to access the same set of generated files. The other option,
# PROJECT_DERIVED_FILE_DIR, is unsuitable because while it is project-specific,
# it is not configuration-specific. INTERMEDIATE_DIR is defined as
# $(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION).
_intermediate_var = 'INTERMEDIATE_DIR'
# SHARED_INTERMEDIATE_DIR is the same, except that it is shared among all
# targets that share the same BUILT_PRODUCTS_DIR.
_shared_intermediate_var = 'SHARED_INTERMEDIATE_DIR'
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': '',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.dylib',
# INTERMEDIATE_DIR is a place for targets to build up intermediate products.
# It is specific to each build environment. It is only guaranteed to exist
# and be constant within the context of a project, corresponding to a single
# input file. Some build environments may allow their intermediate directory
# to be shared on a wider scale, but this is not guaranteed.
'INTERMEDIATE_DIR': '$(%s)' % _intermediate_var,
'OS': 'mac',
'PRODUCT_DIR': '$(BUILT_PRODUCTS_DIR)',
'LIB_DIR': '$(BUILT_PRODUCTS_DIR)',
'RULE_INPUT_ROOT': '$(INPUT_FILE_BASE)',
'RULE_INPUT_EXT': '$(INPUT_FILE_SUFFIX)',
'RULE_INPUT_NAME': '$(INPUT_FILE_NAME)',
'RULE_INPUT_PATH': '$(INPUT_FILE_PATH)',
'SHARED_INTERMEDIATE_DIR': '$(%s)' % _shared_intermediate_var,
'CONFIGURATION_NAME': '$(CONFIGURATION)',
}
# The Xcode-specific sections that hold paths.
generator_additional_path_sections = [
'mac_bundle_resources',
# 'mac_framework_dirs', input already handles _dirs endings.
]
# The Xcode-specific keys that exist on targets and aren't moved down to
# configurations.
generator_additional_non_configuration_keys = [
'mac_bundle',
'mac_bundle_resources',
'xcode_create_dependents_test_runner',
]
# We want to let any rules apply to files that are resources also.
generator_extra_sources_for_rules = [
'mac_bundle_resources',
]
def CreateXCConfigurationList(configuration_names):
xccl = gyp.xcodeproj_file.XCConfigurationList({'buildConfigurations': []})
for configuration_name in configuration_names:
xcbc = gyp.xcodeproj_file.XCBuildConfiguration({
'name': configuration_name})
xccl.AppendProperty('buildConfigurations', xcbc)
xccl.SetProperty('defaultConfigurationName', configuration_names[0])
return xccl
class XcodeProject(object):
def __init__(self, gyp_path, path, build_file_dict):
self.gyp_path = gyp_path
self.path = path
self.project = gyp.xcodeproj_file.PBXProject(path=path)
projectDirPath = gyp.common.RelativePath(
os.path.dirname(os.path.abspath(self.gyp_path)),
os.path.dirname(path) or '.')
self.project.SetProperty('projectDirPath', projectDirPath)
self.project_file = \
gyp.xcodeproj_file.XCProjectFile({'rootObject': self.project})
self.build_file_dict = build_file_dict
# TODO(mark): add destructor that cleans up self.path if created_dir is
# True and things didn't complete successfully. Or do something even
# better with "try"?
self.created_dir = False
try:
os.makedirs(self.path)
self.created_dir = True
except OSError, e:
if e.errno != errno.EEXIST:
raise
def Finalize1(self, xcode_targets, serialize_all_tests):
# Collect a list of all of the build configuration names used by the
# various targets in the file. It is very heavily advised to keep each
# target in an entire project (even across multiple project files) using
# the same set of configuration names.
configurations = []
for xct in self.project.GetProperty('targets'):
xccl = xct.GetProperty('buildConfigurationList')
xcbcs = xccl.GetProperty('buildConfigurations')
for xcbc in xcbcs:
name = xcbc.GetProperty('name')
if name not in configurations:
configurations.append(name)
# Replace the XCConfigurationList attached to the PBXProject object with
# a new one specifying all of the configuration names used by the various
# targets.
try:
xccl = CreateXCConfigurationList(configurations)
self.project.SetProperty('buildConfigurationList', xccl)
except:
import sys
sys.stderr.write("Problem with gyp file %s\n" % self.gyp_path)
raise
# The need for this setting is explained above where _intermediate_var is
# defined. The comments below about wanting to avoid project-wide build
# settings apply here too, but this needs to be set on a project-wide basis
# so that files relative to the _intermediate_var setting can be displayed
# properly in the Xcode UI.
#
# Note that for configuration-relative files such as anything relative to
# _intermediate_var, for the purposes of UI tree view display, Xcode will
# only resolve the configuration name once, when the project file is
# opened. If the active build configuration is changed, the project file
# must be closed and reopened if it is desired for the tree view to update.
# This is filed as Apple radar 6588391.
xccl.SetBuildSetting(_intermediate_var,
'$(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION)')
xccl.SetBuildSetting(_shared_intermediate_var,
'$(SYMROOT)/DerivedSources/$(CONFIGURATION)')
# Set user-specified project-wide build settings. This is intended to be
# used very sparingly. Really, almost everything should go into
# target-specific build settings sections. The project-wide settings are
# only intended to be used in cases where Xcode attempts to resolve
# variable references in a project context as opposed to a target context,
# such as when resolving sourceTree references while building up the tree
# tree view for UI display.
for xck, xcv in self.build_file_dict.get('xcode_settings', {}).iteritems():
xccl.SetBuildSetting(xck, xcv)
# Sort the targets based on how they appeared in the input.
# TODO(mark): Like a lot of other things here, this assumes internal
# knowledge of PBXProject - in this case, of its "targets" property.
# ordinary_targets are ordinary targets that are already in the project
# file. run_test_targets are the targets that run unittests and should be
# used for the Run All Tests target. support_targets are the action/rule
# targets used by GYP file targets, just kept for the assert check.
ordinary_targets = []
run_test_targets = []
support_targets = []
# targets is full list of targets in the project.
targets = []
# does the it define it's own "all"?
has_custom_all = False
# targets_for_all is the list of ordinary_targets that should be listed
# in this project's "All" target. It includes each non_runtest_target
# that does not have suppress_wildcard set.
targets_for_all = []
for target in self.build_file_dict['targets']:
target_name = target['target_name']
toolset = target['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path, target_name,
toolset)
xcode_target = xcode_targets[qualified_target]
# Make sure that the target being added to the sorted list is already in
# the unsorted list.
assert xcode_target in self.project._properties['targets']
targets.append(xcode_target)
ordinary_targets.append(xcode_target)
if xcode_target.support_target:
support_targets.append(xcode_target.support_target)
targets.append(xcode_target.support_target)
if not int(target.get('suppress_wildcard', False)):
targets_for_all.append(xcode_target)
if target_name.lower() == 'all':
has_custom_all = True;
# If this target has a 'run_as' attribute, or is a test, add its
# target to the targets, and (if it's a test) add it the to the
# test targets.
is_test = int(target.get('test', 0))
if target.get('run_as') or is_test:
# Make a target to run something. It should have one
# dependency, the parent xcode target.
xccl = CreateXCConfigurationList(configurations)
run_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run ' + target_name,
'productName': xcode_target.GetProperty('productName'),
'buildConfigurationList': xccl,
},
parent=self.project)
run_target.AddDependency(xcode_target)
# The test runner target has a build phase that executes the
# test, if this has the 'test' attribute. If the 'run_as' tag
# doesn't exist (meaning that this must be a test), then we
# define a default test command line.
command = target.get('run_as', {
'action': ['${BUILT_PRODUCTS_DIR}/${PRODUCT_NAME}']
})
script = ''
if command.get('working_directory'):
script = script + 'cd "%s"\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
command.get('working_directory'))
if command.get('environment'):
script = script + "\n".join(
['export %s="%s"' %
(key, gyp.xcodeproj_file.ConvertVariablesToShellSyntax(val))
for (key, val) in command.get('environment').iteritems()]) + "\n"
# Some test end up using sockets, files on disk, etc. and can get
# confused if more then one test runs at a time. The generator
# flag 'xcode_serialize_all_test_runs' controls the forcing of all
# tests serially. It defaults to True. To get serial runs this
# little bit of python does the same as the linux flock utility to
# make sure only one runs at a time.
command_prefix = ''
if is_test and serialize_all_tests:
command_prefix = \
"""python -c "import fcntl, subprocess, sys
file = open('$TMPDIR/GYP_serialize_test_runs', 'a')
fcntl.flock(file.fileno(), fcntl.LOCK_EX)
sys.exit(subprocess.call(sys.argv[1:]))" """
# If we were unable to exec for some reason, we want to exit
# with an error, and fixup variable references to be shell
# syntax instead of xcode syntax.
script = script + 'exec ' + command_prefix + '%s\nexit 1\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
gyp.common.EncodePOSIXShellList(command.get('action')))
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'shellScript': script,
'showEnvVarsInLog': 0,
})
run_target.AppendProperty('buildPhases', ssbp)
# Add the run target to the project file.
targets.append(run_target)
if is_test:
run_test_targets.append(run_target)
xcode_target.test_runner = run_target
# Make sure that the list of targets being replaced is the same length as
# the one replacing it, but allow for the added test runner targets.
assert len(self.project._properties['targets']) == \
len(ordinary_targets) + len(support_targets)
self.project._properties['targets'] = targets
# Get rid of unnecessary levels of depth in groups like the Source group.
self.project.RootGroupsTakeOverOnlyChildren(True)
# Sort the groups nicely. Do this after sorting the targets, because the
# Products group is sorted based on the order of the targets.
self.project.SortGroups()
# Create an "All" target if there's more than one target in this project
# file and the project didn't define its own "All" target. Put a generated
# "All" target first so that people opening up the project for the first
# time will build everything by default.
if len(targets_for_all) > 1 and not has_custom_all:
xccl = CreateXCConfigurationList(configurations)
all_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'All',
},
parent=self.project)
for target in targets_for_all:
all_target.AddDependency(target)
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._properties. It's important to get the "All" target first,
# though.
self.project._properties['targets'].insert(0, all_target)
# The same, but for run_test_targets.
if len(run_test_targets) > 1:
xccl = CreateXCConfigurationList(configurations)
run_all_tests_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'Run All Tests',
},
parent=self.project)
for run_test_target in run_test_targets:
run_all_tests_target.AddDependency(run_test_target)
# Insert after the "All" target, which must exist if there is more than
# one run_test_target.
self.project._properties['targets'].insert(1, run_all_tests_target)
def Finalize2(self, xcode_targets, xcode_target_to_target_dict):
# Finalize2 needs to happen in a separate step because the process of
# updating references to other projects depends on the ordering of targets
# within remote project files. Finalize1 is responsible for sorting duty,
# and once all project files are sorted, Finalize2 can come in and update
# these references.
# To support making a "test runner" target that will run all the tests
# that are direct dependents of any given target, we look for
# xcode_create_dependents_test_runner being set on an Aggregate target,
# and generate a second target that will run the tests runners found under
# the marked target.
for bf_tgt in self.build_file_dict['targets']:
if int(bf_tgt.get('xcode_create_dependents_test_runner', 0)):
tgt_name = bf_tgt['target_name']
toolset = bf_tgt['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path,
tgt_name, toolset)
xcode_target = xcode_targets[qualified_target]
if isinstance(xcode_target, gyp.xcodeproj_file.PBXAggregateTarget):
# Collect all the run test targets.
all_run_tests = []
pbxtds = xcode_target.GetProperty('dependencies')
for pbxtd in pbxtds:
pbxcip = pbxtd.GetProperty('targetProxy')
dependency_xct = pbxcip.GetProperty('remoteGlobalIDString')
target_dict = xcode_target_to_target_dict[dependency_xct]
if target_dict and int(target_dict.get('test', 0)):
assert dependency_xct.test_runner
all_run_tests.append(dependency_xct.test_runner)
# Directly depend on all the runners as they depend on the target
# that builds them.
if len(all_run_tests) > 0:
run_all_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run %s Tests' % tgt_name,
'productName': tgt_name,
},
parent=self.project)
for run_test_target in all_run_tests:
run_all_target.AddDependency(run_test_target)
# Insert the test runner after the related target.
idx = self.project._properties['targets'].index(xcode_target)
self.project._properties['targets'].insert(idx + 1, run_all_target)
# Update all references to other projects, to make sure that the lists of
# remote products are complete. Otherwise, Xcode will fill them in when
# it opens the project file, which will result in unnecessary diffs.
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._other_pbxprojects.
for other_pbxproject in self.project._other_pbxprojects.keys():
self.project.AddOrGetProjectReference(other_pbxproject)
self.project.SortRemoteProductReferences()
# Give everything an ID.
self.project_file.ComputeIDs()
# Make sure that no two objects in the project file have the same ID. If
# multiple objects wind up with the same ID, upon loading the file, Xcode
# will only recognize one object (the last one in the file?) and the
# results are unpredictable.
self.project_file.EnsureNoIDCollisions()
def Write(self):
# Write the project file to a temporary location first. Xcode watches for
# changes to the project file and presents a UI sheet offering to reload
# the project when it does change. However, in some cases, especially when
# multiple projects are open or when Xcode is busy, things don't work so
# seamlessly. Sometimes, Xcode is able to detect that a project file has
# changed but can't unload it because something else is referencing it.
# To mitigate this problem, and to avoid even having Xcode present the UI
# sheet when an open project is rewritten for inconsequential changes, the
# project file is written to a temporary file in the xcodeproj directory
# first. The new temporary file is then compared to the existing project
# file, if any. If they differ, the new file replaces the old; otherwise,
# the new project file is simply deleted. Xcode properly detects a file
# being renamed over an open project file as a change and so it remains
# able to present the "project file changed" sheet under this system.
# Writing to a temporary file first also avoids the possible problem of
# Xcode rereading an incomplete project file.
(output_fd, new_pbxproj_path) = \
tempfile.mkstemp(suffix='.tmp', prefix='project.pbxproj.gyp.',
dir=self.path)
try:
output_file = os.fdopen(output_fd, 'wb')
self.project_file.Print(output_file)
output_file.close()
pbxproj_path = os.path.join(self.path, 'project.pbxproj')
same = False
try:
same = filecmp.cmp(pbxproj_path, new_pbxproj_path, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(new_pbxproj_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(new_pbxproj_path, 0666 & ~umask)
os.rename(new_pbxproj_path, pbxproj_path)
except Exception:
# Don't leave turds behind. In fact, if this code was responsible for
# creating the xcodeproj directory, get rid of that too.
os.unlink(new_pbxproj_path)
if self.created_dir:
shutil.rmtree(self.path, True)
raise
cached_xcode_version = None
def InstalledXcodeVersion():
"""Fetches the installed version of Xcode, returns empty string if it is
unable to figure it out."""
global cached_xcode_version
if not cached_xcode_version is None:
return cached_xcode_version
# Default to an empty string
cached_xcode_version = ''
# Collect the xcodebuild's version information.
try:
import subprocess
cmd = ['/usr/bin/xcodebuild', '-version']
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
xcodebuild_version_info = proc.communicate()[0]
# Any error, return empty string
if proc.returncode:
xcodebuild_version_info = ''
except OSError:
# We failed to launch the tool
xcodebuild_version_info = ''
# Pull out the Xcode version itself.
match_line = re.search('^Xcode (.*)$', xcodebuild_version_info, re.MULTILINE)
if match_line:
cached_xcode_version = match_line.group(1)
# Done!
return cached_xcode_version
def AddSourceToTarget(source, pbxp, xct):
# TODO(mark): Perhaps this can be made a little bit fancier.
source_extensions = ['c', 'cc', 'cpp', 'cxx', 'm', 'mm', 's']
basename = posixpath.basename(source)
(root, ext) = posixpath.splitext(basename)
if ext != '':
ext = ext[1:].lower()
if ext in source_extensions:
xct.SourcesPhase().AddFile(source)
else:
# Files that aren't added to a sources build phase can still go into
# the project file, just not as part of a build phase.
pbxp.AddOrGetFileInRootGroup(source)
def AddResourceToTarget(resource, pbxp, xct):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
xct.ResourcesPhase().AddFile(resource)
_xcode_variable_re = re.compile('(\$\((.*?)\))')
def ExpandXcodeVariables(string, expansions):
"""Expands Xcode-style $(VARIABLES) in string per the expansions dict.
In some rare cases, it is appropriate to expand Xcode variables when a
project file is generated. For any substring $(VAR) in string, if VAR is a
key in the expansions dict, $(VAR) will be replaced with expansions[VAR].
Any $(VAR) substring in string for which VAR is not a key in the expansions
dict will remain in the returned string.
"""
matches = _xcode_variable_re.findall(string)
if matches == None:
return string
matches.reverse()
for match in matches:
(to_replace, variable) = match
if not variable in expansions:
continue
replacement = expansions[variable]
string = re.sub(re.escape(to_replace), replacement, string)
return string
def GenerateOutput(target_list, target_dicts, data, params):
options = params['options']
generator_flags = params.get('generator_flags', {})
parallel_builds = generator_flags.get('xcode_parallel_builds', True)
serialize_all_tests = \
generator_flags.get('xcode_serialize_all_test_runs', True)
xcode_projects = {}
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
xcp = XcodeProject(build_file, xcodeproj_path, build_file_dict)
xcode_projects[build_file] = xcp
pbxp = xcp.project
if parallel_builds:
pbxp.SetProperty('attributes',
{'BuildIndependentTargetsInParallel': 'YES'})
main_group = pbxp.GetProperty('mainGroup')
build_group = gyp.xcodeproj_file.PBXGroup({'name': 'Build'})
main_group.AppendChild(build_group)
for included_file in build_file_dict['included_files']:
build_group.AddOrGetFileByPath(included_file, False)
xcode_targets = {}
xcode_target_to_target_dict = {}
for qualified_target in target_list:
[build_file, target_name, toolset] = \
gyp.common.ParseQualifiedTarget(qualified_target)
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in xcode build (target %s)' %
qualified_target)
configuration_names = [spec['default_configuration']]
for configuration_name in sorted(spec['configurations'].keys()):
if configuration_name not in configuration_names:
configuration_names.append(configuration_name)
xcp = xcode_projects[build_file]
pbxp = xcp.project
# Set up the configurations for the target according to the list of names
# supplied.
xccl = CreateXCConfigurationList(configuration_names)
# Create an XCTarget subclass object for the target. We use the type
# with "+bundle" appended if the target has "mac_bundle" set.
_types = {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
'executable+bundle': 'com.apple.product-type.application',
'loadable_module+bundle': 'com.apple.product-type.bundle',
'shared_library+bundle': 'com.apple.product-type.framework',
}
target_properties = {
'buildConfigurationList': xccl,
'name': target_name,
}
type = spec['type']
is_bundle = int(spec.get('mac_bundle', 0))
if type != 'none':
type_bundle_key = type
if is_bundle:
type_bundle_key += '+bundle'
xctarget_type = gyp.xcodeproj_file.PBXNativeTarget
try:
target_properties['productType'] = _types[type_bundle_key]
except KeyError, e:
gyp.common.ExceptionAppend(e, "-- unknown product type while "
"writing target %s" % target_name)
raise
else:
xctarget_type = gyp.xcodeproj_file.PBXAggregateTarget
target_product_name = spec.get('product_name', None)
if target_product_name:
target_properties['productName'] = target_product_name
xct = xctarget_type(target_properties, parent=pbxp,
force_extension=spec.get('product_extension', None))
pbxp.AppendProperty('targets', xct)
xcode_targets[qualified_target] = xct
xcode_target_to_target_dict[xct] = spec
# Xcode does not have a distinct type for loadable_modules that are pure
# BSD targets (ie-unbundled). It uses the same setup as a shared_library
# but the mach-o type is explictly set in the settings. So before we do
# anything else, for this one case, we stuff in that one setting. This
# would allow the other data in the spec to change it if need be.
if type == 'loadable_module' and not is_bundle:
xccl.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
spec_actions = spec.get('actions', [])
spec_rules = spec.get('rules', [])
# Xcode has some "issues" with checking dependencies for the "Compile
# sources" step with any source files/headers generated by actions/rules.
# To work around this, if a target is building anything directly (not
# type "none"), then a second target as used to run the GYP actions/rules
# and is made a dependency of this target. This way the work is done
# before the dependency checks for what should be recompiled.
support_xct = None
if type != 'none' and (spec_actions or spec_rules):
support_xccl = CreateXCConfigurationList(configuration_names);
support_target_properties = {
'buildConfigurationList': support_xccl,
'name': target_name + ' Support',
}
if target_product_name:
support_target_properties['productName'] = \
target_product_name + ' Support'
support_xct = \
gyp.xcodeproj_file.PBXAggregateTarget(support_target_properties,
parent=pbxp)
pbxp.AppendProperty('targets', support_xct)
xct.AddDependency(support_xct)
# Hang the support target off the main target so it can be tested/found
# by the generator during Finalize.
xct.support_target = support_xct
prebuild_index = 0
# Add custom shell script phases for "actions" sections.
for action in spec_actions:
# There's no need to write anything into the script to ensure that the
# output directories already exist, because Xcode will look at the
# declared outputs and automatically ensure that they exist for us.
# Do we have a message to print when this action runs?
message = action.get('message')
if message:
message = 'echo note: ' + gyp.common.EncodePOSIXShellArgument(message)
else:
message = ''
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(action['action'])
# Convert Xcode-type variable references to sh-compatible environment
# variable references.
message_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(message)
action_string_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
action_string)
script = ''
# Include the optional message
if message_sh:
script += message_sh + '\n'
# Be sure the script runs in exec, and that if exec fails, the script
# exits signalling an error.
script += 'exec ' + action_string_sh + '\nexit 1\n'
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': action['inputs'],
'name': 'Action "' + action['action_name'] + '"',
'outputPaths': action['outputs'],
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# TODO(mark): Should verify that at most one of these is specified.
if int(action.get('process_outputs_as_sources', False)):
for output in action['outputs']:
AddSourceToTarget(output, pbxp, xct)
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
for output in action['outputs']:
AddResourceToTarget(output, pbxp, xct)
# tgt_mac_bundle_resources holds the list of bundle resources so
# the rule processing can check against it.
if is_bundle:
tgt_mac_bundle_resources = spec.get('mac_bundle_resources', [])
else:
tgt_mac_bundle_resources = []
# Add custom shell script phases driving "make" for "rules" sections.
#
# Xcode's built-in rule support is almost powerful enough to use directly,
# but there are a few significant deficiencies that render them unusable.
# There are workarounds for some of its inadequacies, but in aggregate,
# the workarounds added complexity to the generator, and some workarounds
# actually require input files to be crafted more carefully than I'd like.
# Consequently, until Xcode rules are made more capable, "rules" input
# sections will be handled in Xcode output by shell script build phases
# performed prior to the compilation phase.
#
# The following problems with Xcode rules were found. The numbers are
# Apple radar IDs. I hope that these shortcomings are addressed, I really
# liked having the rules handled directly in Xcode during the period that
# I was prototyping this.
#
# 6588600 Xcode compiles custom script rule outputs too soon, compilation
# fails. This occurs when rule outputs from distinct inputs are
# interdependent. The only workaround is to put rules and their
# inputs in a separate target from the one that compiles the rule
# outputs. This requires input file cooperation and it means that
# process_outputs_as_sources is unusable.
# 6584932 Need to declare that custom rule outputs should be excluded from
# compilation. A possible workaround is to lie to Xcode about a
# rule's output, giving it a dummy file it doesn't know how to
# compile. The rule action script would need to touch the dummy.
# 6584839 I need a way to declare additional inputs to a custom rule.
# A possible workaround is a shell script phase prior to
# compilation that touches a rule's primary input files if any
# would-be additional inputs are newer than the output. Modifying
# the source tree - even just modification times - feels dirty.
# 6564240 Xcode "custom script" build rules always dump all environment
# variables. This is a low-prioroty problem and is not a
# show-stopper.
rules_by_ext = {}
for rule in spec_rules:
rules_by_ext[rule['extension']] = rule
# First, some definitions:
#
# A "rule source" is a file that was listed in a target's "sources"
# list and will have a rule applied to it on the basis of matching the
# rule's "extensions" attribute. Rule sources are direct inputs to
# rules.
#
# Rule definitions may specify additional inputs in their "inputs"
# attribute. These additional inputs are used for dependency tracking
# purposes.
#
# A "concrete output" is a rule output with input-dependent variables
# resolved. For example, given a rule with:
# 'extension': 'ext', 'outputs': ['$(INPUT_FILE_BASE).cc'],
# if the target's "sources" list contained "one.ext" and "two.ext",
# the "concrete output" for rule input "two.ext" would be "two.cc". If
# a rule specifies multiple outputs, each input file that the rule is
# applied to will have the same number of concrete outputs.
#
# If any concrete outputs are outdated or missing relative to their
# corresponding rule_source or to any specified additional input, the
# rule action must be performed to generate the concrete outputs.
# concrete_outputs_by_rule_source will have an item at the same index
# as the rule['rule_sources'] that it corresponds to. Each item is a
# list of all of the concrete outputs for the rule_source.
concrete_outputs_by_rule_source = []
# concrete_outputs_all is a flat list of all concrete outputs that this
# rule is able to produce, given the known set of input files
# (rule_sources) that apply to it.
concrete_outputs_all = []
# messages & actions are keyed by the same indices as rule['rule_sources']
# and concrete_outputs_by_rule_source. They contain the message and
# action to perform after resolving input-dependent variables. The
# message is optional, in which case None is stored for each rule source.
messages = []
actions = []
for rule_source in rule.get('rule_sources', []):
rule_source_basename = posixpath.basename(rule_source)
(rule_source_root, rule_source_ext) = \
posixpath.splitext(rule_source_basename)
# These are the same variable names that Xcode uses for its own native
# rule support. Because Xcode's rule engine is not being used, they
# need to be expanded as they are written to the makefile.
rule_input_dict = {
'INPUT_FILE_BASE': rule_source_root,
'INPUT_FILE_SUFFIX': rule_source_ext,
'INPUT_FILE_NAME': rule_source_basename,
'INPUT_FILE_PATH': rule_source,
}
concrete_outputs_for_this_rule_source = []
for output in rule.get('outputs', []):
# Fortunately, Xcode and make both use $(VAR) format for their
# variables, so the expansion is the only transformation necessary.
# Any remaning $(VAR)-type variables in the string can be given
# directly to make, which will pick up the correct settings from
# what Xcode puts into the environment.
concrete_output = ExpandXcodeVariables(output, rule_input_dict)
concrete_outputs_for_this_rule_source.append(concrete_output)
# Add all concrete outputs to the project.
pbxp.AddOrGetFileInRootGroup(concrete_output)
concrete_outputs_by_rule_source.append( \
concrete_outputs_for_this_rule_source)
concrete_outputs_all.extend(concrete_outputs_for_this_rule_source)
# TODO(mark): Should verify that at most one of these is specified.
if int(rule.get('process_outputs_as_sources', False)):
for output in concrete_outputs_for_this_rule_source:
AddSourceToTarget(output, pbxp, xct)
# If the file came from the mac_bundle_resources list or if the rule
# is marked to process outputs as bundle resource, do so.
was_mac_bundle_resource = rule_source in tgt_mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
for output in concrete_outputs_for_this_rule_source:
AddResourceToTarget(output, pbxp, xct)
# Do we have a message to print when this rule runs?
message = rule.get('message')
if message:
message = gyp.common.EncodePOSIXShellArgument(message)
message = '@echo note: ' + ExpandXcodeVariables(message,
rule_input_dict)
messages.append(message)
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(rule['action'])
action = ExpandXcodeVariables(action_string, rule_input_dict)
actions.append(action)
if len(concrete_outputs_all) > 0:
# TODO(mark): There's a possibilty for collision here. Consider
# target "t" rule "A_r" and target "t_A" rule "r".
makefile_name = '%s_%s.make' % (target_name, rule['rule_name'])
makefile_path = os.path.join(xcode_projects[build_file].path,
makefile_name)
# TODO(mark): try/close? Write to a temporary file and swap it only
# if it's got changes?
makefile = open(makefile_path, 'wb')
# make will build the first target in the makefile by default. By
# convention, it's called "all". List all (or at least one)
# concrete output for each rule source as a prerequisite of the "all"
# target.
makefile.write('all: \\\n')
for concrete_output_index in \
xrange(0, len(concrete_outputs_by_rule_source)):
# Only list the first (index [0]) concrete output of each input
# in the "all" target. Otherwise, a parallel make (-j > 1) would
# attempt to process each input multiple times simultaneously.
# Otherwise, "all" could just contain the entire list of
# concrete_outputs_all.
concrete_output = \
concrete_outputs_by_rule_source[concrete_output_index][0]
if concrete_output_index == len(concrete_outputs_by_rule_source) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (concrete_output, eol))
for (rule_source, concrete_outputs, message, action) in \
zip(rule['rule_sources'], concrete_outputs_by_rule_source,
messages, actions):
makefile.write('\n')
# Add a rule that declares it can build each concrete output of a
# rule source. Collect the names of the directories that are
# required.
concrete_output_dirs = []
for concrete_output_index in xrange(0, len(concrete_outputs)):
concrete_output = concrete_outputs[concrete_output_index]
if concrete_output_index == 0:
bol = ''
else:
bol = ' '
makefile.write('%s%s \\\n' % (bol, concrete_output))
concrete_output_dir = posixpath.dirname(concrete_output)
if (concrete_output_dir and
concrete_output_dir not in concrete_output_dirs):
concrete_output_dirs.append(concrete_output_dir)
makefile.write(' : \\\n')
# The prerequisites for this rule are the rule source itself and
# the set of additional rule inputs, if any.
prerequisites = [rule_source]
prerequisites.extend(rule.get('inputs', []))
for prerequisite_index in xrange(0, len(prerequisites)):
prerequisite = prerequisites[prerequisite_index]
if prerequisite_index == len(prerequisites) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (prerequisite, eol))
# Make sure that output directories exist before executing the rule
# action.
# TODO(mark): quote the list of concrete_output_dirs.
if len(concrete_output_dirs) > 0:
makefile.write('\tmkdir -p %s\n' % ' '.join(concrete_output_dirs))
# The rule message and action have already had the necessary variable
# substitutions performed.
if message:
makefile.write('\t%s\n' % message)
makefile.write('\t%s\n' % action)
makefile.close()
# It might be nice to ensure that needed output directories exist
# here rather than in each target in the Makefile, but that wouldn't
# work if there ever was a concrete output that had an input-dependent
# variable anywhere other than in the leaf position.
# Don't declare any inputPaths or outputPaths. If they're present,
# Xcode will provide a slight optimization by only running the script
# phase if any output is missing or outdated relative to any input.
# Unfortunately, it will also assume that all outputs are touched by
# the script, and if the outputs serve as files in a compilation
# phase, they will be unconditionally rebuilt. Since make might not
# rebuild everything that could be declared here as an output, this
# extra compilation activity is unnecessary. With inputPaths and
# outputPaths not supplied, make will always be called, but it knows
# enough to not do anything when everything is up-to-date.
# To help speed things up, pass -j COUNT to make so it does some work
# in parallel. Don't use ncpus because Xcode will build ncpus targets
# in parallel and if each target happens to have a rules step, there
# would be ncpus^2 things going. With a machine that has 2 quad-core
# Xeons, a build can quickly run out of processes based on
# scheduling/other tasks, and randomly failing builds are no good.
script = \
"""JOB_COUNT="$(sysctl -n hw.ncpu)"
if [ "${JOB_COUNT}" -gt 4 ]; then
JOB_COUNT=4
fi
exec "${DEVELOPER_BIN_DIR}/make" -f "${PROJECT_FILE_PATH}/%s" -j "${JOB_COUNT}"
exit 1
""" % makefile_name
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'name': 'Rule "' + rule['rule_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# Extra rule inputs also go into the project file. Concrete outputs were
# already added when they were computed.
for group in ['inputs', 'inputs_excluded']:
for item in rule.get(group, []):
pbxp.AddOrGetFileInRootGroup(item)
# Add "sources".
for source in spec.get('sources', []):
(source_root, source_extension) = posixpath.splitext(source)
if source_extension[1:] not in rules_by_ext:
# AddSourceToTarget will add the file to a root group if it's not
# already there.
AddSourceToTarget(source, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(source)
# Add "mac_bundle_resources" if it's a bundle of any type.
if is_bundle:
for resource in tgt_mac_bundle_resources:
(resource_root, resource_extension) = posixpath.splitext(resource)
if resource_extension[1:] not in rules_by_ext:
AddResourceToTarget(resource, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(resource)
# Add "copies".
for copy_group in spec.get('copies', []):
pbxcp = gyp.xcodeproj_file.PBXCopyFilesBuildPhase({
'name': 'Copy to ' + copy_group['destination']
},
parent=xct)
dest = copy_group['destination']
if dest[0] not in ('/', '$'):
# Relative paths are relative to $(SRCROOT).
dest = '$(SRCROOT)/' + dest
pbxcp.SetDestination(dest)
# TODO(mark): The usual comment about this knowing too much about
# gyp.xcodeproj_file internals applies.
xct._properties['buildPhases'].insert(prebuild_index, pbxcp)
for file in copy_group['files']:
pbxcp.AddFile(file)
# Excluded files can also go into the project file.
for key in ['sources', 'mac_bundle_resources']:
excluded_key = key + '_excluded'
for item in spec.get(excluded_key, []):
pbxp.AddOrGetFileInRootGroup(item)
# So can "inputs" and "outputs" sections of "actions" groups.
for action in spec.get('actions', []):
groups = ['inputs', 'inputs_excluded', 'outputs', 'outputs_excluded']
for group in groups:
for item in action.get(group, []):
# Exclude anything in BUILT_PRODUCTS_DIR. They're products, not
# sources.
if not item.startswith('$(BUILT_PRODUCTS_DIR)/'):
pbxp.AddOrGetFileInRootGroup(item)
for postbuild in spec.get('postbuilds', []):
action_string_sh = gyp.common.EncodePOSIXShellList(postbuild['action'])
script = 'exec ' + action_string_sh + '\nexit 1\n'
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'name': 'Postbuild "' + postbuild['postbuild_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
xct.AppendProperty('buildPhases', ssbp)
# Add dependencies before libraries, because adding a dependency may imply
# adding a library. It's preferable to keep dependencies listed first
# during a link phase so that they can override symbols that would
# otherwise be provided by libraries, which will usually include system
# libraries. On some systems, ld is finicky and even requires the
# libraries to be ordered in such a way that unresolved symbols in
# earlier-listed libraries may only be resolved by later-listed libraries.
# The Mac linker doesn't work that way, but other platforms do, and so
# their linker invocations need to be constructed in this way. There's
# no compelling reason for Xcode's linker invocations to differ.
if 'dependencies' in spec:
for dependency in spec['dependencies']:
xct.AddDependency(xcode_targets[dependency])
# The support project also gets the dependencies (in case they are
# needed for the actions/rules to work).
if support_xct:
support_xct.AddDependency(xcode_targets[dependency])
if 'libraries' in spec:
for library in spec['libraries']:
xct.FrameworksPhase().AddFile(library)
# Add the library's directory to LIBRARY_SEARCH_PATHS if necessary.
# I wish Xcode handled this automatically.
# TODO(mark): this logic isn't right. There are certain directories
# that are always searched, we should check to see if the library is
# in one of those directories, and if not, we should do the
# AppendBuildSetting thing.
if not posixpath.isabs(library) and not library.startswith('$'):
# TODO(mark): Need to check to see if library_dir is already in
# LIBRARY_SEARCH_PATHS.
library_dir = posixpath.dirname(library)
xct.AppendBuildSetting('LIBRARY_SEARCH_PATHS', library_dir)
for configuration_name in configuration_names:
configuration = spec['configurations'][configuration_name]
xcbc = xct.ConfigurationNamed(configuration_name)
for include_dir in configuration.get('mac_framework_dirs', []):
xcbc.AppendBuildSetting('FRAMEWORK_SEARCH_PATHS', include_dir)
for include_dir in configuration.get('include_dirs', []):
xcbc.AppendBuildSetting('HEADER_SEARCH_PATHS', include_dir)
if 'defines' in configuration:
for define in configuration['defines']:
# If the define is of the form A="B", escape the quotes
# yielding A=\"\\\"B\\\"\". The extra set of quotes tell
# Xcode NOT to split on spaces, and still define a string
# literal (with quotes).
set_define = re.sub(r'^([^=]*=)"([^"]*)"$',
r'\1"\"\2\""', define)
xcbc.AppendBuildSetting('GCC_PREPROCESSOR_DEFINITIONS', set_define)
if 'xcode_settings' in configuration:
for xck, xcv in configuration['xcode_settings'].iteritems():
xcbc.SetBuildSetting(xck, xcv)
build_files = []
for build_file, build_file_dict in data.iteritems():
if build_file.endswith('.gyp'):
build_files.append(build_file)
for build_file in build_files:
xcode_projects[build_file].Finalize1(xcode_targets, serialize_all_tests)
for build_file in build_files:
xcode_projects[build_file].Finalize2(xcode_targets,
xcode_target_to_target_dict)
for build_file in build_files:
xcode_projects[build_file].Write()
| bsd-3-clause |
prds21/barrial-movie | barrial-movie/channels/mitube.py | 2 | 6850 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# pelisalacarta - XBMC Plugin
# Canal para buscar y ver video de yotube (por gasmuro1)
#
#------------------------------------------------------------
import urlparse,urllib2,urllib,re,pafy
import os, sys,json,time
if sys.version_info[:2] >= (3, 0):
# pylint: disable=E0611,F0401
uni, byt, xinput = str, bytes, input
from urllib.parse import urlencode
else:
uni, byt, xinput = unicode, str, raw_input
uni = unicode
from urllib import urlencode
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
DEBUG = config.get_setting("debug")
__category__ = "A"
__type__ = "generic"
__title__ = "mitube"
__channel__ = "mitube"
__language__ = "ES"
__creationdate__ = "20111014"
ANIMEFLV_REQUEST_HEADERS = []
ANIMEFLV_REQUEST_HEADERS.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10.8; rv:22.0) Gecko/20100101 Firefox/22.0"])
ANIMEFLV_REQUEST_HEADERS.append(["Accept-Encoding","gzip, deflate"])
ANIMEFLV_REQUEST_HEADERS.append(["Cache-Control","max-age=0"])
ANIMEFLV_REQUEST_HEADERS.append(["Connection","keep-alive"])
ANIMEFLV_REQUEST_HEADERS.append(["Accept","text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"])
ANIMEFLV_REQUEST_HEADERS.append(["Accept-Language","es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3"])
def isGeneric():
return True
def mainlist(item):
logger.info("[mitube.py] mainlist")
itemlist = []
itemlist.append( Item(channel=__channel__, action="search" , title="Buscar" , url="https://gdata.youtube.com/feeds/api/videos" ))
return itemlist
def utf8_encode(x):
""" Encode Unicode. """
return x.encode("utf8") if type(x) == uni else x
def generate_search_qs(term, page, result_count=None):
""" Return query string. """
aliases = dict(relevance="relevance", date="published", rating="rating",
views="viewCount")
term = utf8_encode(term)
qs = {
'q': term,
'v': 2,
'alt': 'jsonc',
'start-index': ((page - 1) * 50 + 1) or 1,
'safeSearch': "none",
'max-results': 50,
'paid-content': "false",
'orderby': "relevance"
}
return qs
def search(item,texto):
logger.info("[mitube.py] search")
term=texto
original_term = term
print ("search for %s", original_term)
url = "https://gdata.youtube.com/feeds/api/videos"
query = generate_search_qs(urllib2.unquote(term), 1)
##have_results = _search(url, original_term, query)
item.url = url + "?" + urlencode(query) if query else url
# use cached value if exists
try:
return series(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
print line
logger.error( "%s" % line )
return []
def get_tracks_from_json(jsons):
""" Get search results from web page. """
try:
items = jsons['data']['items']
except KeyError:
items = []
songs = []
for item in items:
ytid = item['id']
cursong = Video(ytid=ytid, title=item['title'].strip(),
length=int(item['duration']))
likes = item.get('likeCount', "0")
likes = int(re.sub(r"\D", "", likes))
total = item.get('ratingCount', 0)
dislikes = total - likes
g.meta[ytid] = dict(
rating=uni(item.get('rating', "0."))
[:4].ljust(4, "0"),
uploader=item['uploader'],
category=item['category'],
aspect=item.get('aspectRatio', "custom"),
uploaded=yt_datetime(item['uploaded'])[1],
likes=uni(num_repr(likes)),
dislikes=uni(num_repr(dislikes)),
commentCount=uni(num_repr(item.get('commentCount', 0))),
viewCount=uni(num_repr(item.get("viewCount", 0))),
title=item['title'],
length=uni(fmt_time(cursong.length)))
songs.append(cursong)
if not items:
dbg("got unexpected data or no search results")
return False
return songs
def fmt_time(seconds):
""" Format number of seconds to %H:%M:%S. """
hms = time.strftime('%H:%M:%S', time.gmtime(int(seconds)))
H, M, S = hms.split(":")
if H == "00":
hms = M + ":" + S
elif H == "01" and int(M) < 40:
hms = uni(int(M) + 60) + ":" + S
elif H.startswith("0"):
hms = ":".join([H[1], M, S])
return hms
def series(item):
logger.info("[mitube.py] series")
# Descarga la pagina
# print item.url
data = scrapertools.cache_page(item.url) #, headers = ANIMEFLV_REQUEST_HEADERS)
# print data
wdata = json.loads(data)
# print wdata
## songs = get_tracks_from_json(wdata)
try:
items = wdata['data']['items']
except KeyError:
items = []
itemlist = []
##print items
for item in items:
print "hola"
ytid = item['id']
title= item['title'].encode('ascii','ignore')
url="https://www.youtube.com/watch?v="+ytid
thumbnail=item['thumbnail']['hqDefault'].replace('https','http')
show=""
plot=fmt_time(item['duration'])
## print "title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"], plot[="+plot+"]"
if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"], plot[="+plot+"]")
##itemlist.append( Item(channel=__channel__, action="play", server="youtube",title=title, url=url , thumbnail=thumbnail , fanart=thumbnail, folder=False) )
itemlist.append( Item(channel=__channel__, action="ver",title=title+" "+plot, url=url , thumbnail=thumbnail ,plot=plot, viewmode="movie_with_plot") )
return itemlist
def descargabg(item):
logger.info("[mitube.py] get_video_url(page_url='%s')" % item.url)
os.system('echo 2|mpsyt dlurl '+item.url+' &')
itemlist = []
itemlist.append( Item(title='Bajando'))
itemlist.append( Item(title='Para ver el video ve a la carpeta youtube y busca el archivo'))
return itemlist
def ver(item):
logger.info("[mitube.py] get_video_url(page_url='%s')" % item.url)
video = pafy.new(item.url)
itemlist = []
streams = video.streams
for s in streams:
itemlist.append( Item(channel=__channel__, action="play_video", server="directo", title=s.resolution+" "+s.extension, url=s.url , thumbnail=item.thumbnail , fanart=item.thumbnail, folder=False))
return itemlist
# Verificación automática de canales: Esta función debe devolver "True" si todo está ok en el canal.
def test():
bien = True
return bien
| gpl-3.0 |
mbareta/edx-platform-ft | cms/djangoapps/contentstore/views/tests/test_container_page.py | 158 | 9232 | """
Unit tests for the container page.
"""
import re
import datetime
from pytz import UTC
from mock import patch, Mock
from django.http import Http404
from django.test.client import RequestFactory
from django.utils import http
import contentstore.views.component as views
from contentstore.views.tests.utils import StudioPageTestCase
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.tests.factories import ItemFactory
class ContainerPageTestCase(StudioPageTestCase):
"""
Unit tests for the container page.
"""
container_view = 'container_preview'
reorderable_child_view = 'reorderable_container_child_preview'
def setUp(self):
super(ContainerPageTestCase, self).setUp()
self.vertical = self._create_item(self.sequential.location, 'vertical', 'Unit')
self.html = self._create_item(self.vertical.location, "html", "HTML")
self.child_container = self._create_item(self.vertical.location, 'split_test', 'Split Test')
self.child_vertical = self._create_item(self.child_container.location, 'vertical', 'Child Vertical')
self.video = self._create_item(self.child_vertical.location, "video", "My Video")
self.store = modulestore()
past = datetime.datetime(1970, 1, 1, tzinfo=UTC)
future = datetime.datetime.now(UTC) + datetime.timedelta(days=1)
self.released_private_vertical = self._create_item(
parent_location=self.sequential.location, category='vertical', display_name='Released Private Unit',
start=past)
self.unreleased_private_vertical = self._create_item(
parent_location=self.sequential.location, category='vertical', display_name='Unreleased Private Unit',
start=future)
self.released_public_vertical = self._create_item(
parent_location=self.sequential.location, category='vertical', display_name='Released Public Unit',
start=past)
self.unreleased_public_vertical = self._create_item(
parent_location=self.sequential.location, category='vertical', display_name='Unreleased Public Unit',
start=future)
self.store.publish(self.unreleased_public_vertical.location, self.user.id)
self.store.publish(self.released_public_vertical.location, self.user.id)
def test_container_html(self):
self._test_html_content(
self.child_container,
expected_section_tag=(
'<section class="wrapper-xblock level-page is-hidden studio-xblock-wrapper" '
'data-locator="{0}" data-course-key="{0.course_key}">'.format(self.child_container.location)
),
expected_breadcrumbs=(
r'<a href="/course/{course}{section_parameters}" class="{classes}">\s*Week 1\s*</a>\s*'
r'<a href="/course/{course}{subsection_parameters}" class="{classes}">\s*Lesson 1\s*</a>\s*'
r'<a href="/container/{unit}" class="{classes}">\s*Unit\s*</a>'
).format(
course=re.escape(unicode(self.course.id)),
unit=re.escape(unicode(self.vertical.location)),
classes='navigation-item navigation-link navigation-parent',
section_parameters=re.escape(u'?show={}'.format(http.urlquote(self.chapter.location))),
subsection_parameters=re.escape(u'?show={}'.format(http.urlquote(self.sequential.location))),
),
)
def test_container_on_container_html(self):
"""
Create the scenario of an xblock with children (non-vertical) on the container page.
This should create a container page that is a child of another container page.
"""
draft_container = self._create_item(self.child_container.location, "wrapper", "Wrapper")
self._create_item(draft_container.location, "html", "Child HTML")
def test_container_html(xblock):
self._test_html_content(
xblock,
expected_section_tag=(
'<section class="wrapper-xblock level-page is-hidden studio-xblock-wrapper" '
'data-locator="{0}" data-course-key="{0.course_key}">'.format(draft_container.location)
),
expected_breadcrumbs=(
r'<a href="/course/{course}{section_parameters}" class="{classes}">\s*Week 1\s*</a>\s*'
r'<a href="/course/{course}{subsection_parameters}" class="{classes}">\s*Lesson 1\s*</a>\s*'
r'<a href="/container/{unit}" class="{classes}">\s*Unit\s*</a>\s*'
r'<a href="/container/{split_test}" class="{classes}">\s*Split Test\s*</a>'
).format(
course=re.escape(unicode(self.course.id)),
unit=re.escape(unicode(self.vertical.location)),
split_test=re.escape(unicode(self.child_container.location)),
classes='navigation-item navigation-link navigation-parent',
section_parameters=re.escape(u'?show={}'.format(http.urlquote(self.chapter.location))),
subsection_parameters=re.escape(u'?show={}'.format(http.urlquote(self.sequential.location))),
),
)
# Test the draft version of the container
test_container_html(draft_container)
# Now publish the unit and validate again
self.store.publish(self.vertical.location, self.user.id)
draft_container = self.store.get_item(draft_container.location)
test_container_html(draft_container)
def _test_html_content(self, xblock, expected_section_tag, expected_breadcrumbs):
"""
Get the HTML for a container page and verify the section tag is correct
and the breadcrumbs trail is correct.
"""
html = self.get_page_html(xblock)
self.assertIn(expected_section_tag, html)
self.assertRegexpMatches(html, expected_breadcrumbs)
def test_public_container_preview_html(self):
"""
Verify that a public xblock's container preview returns the expected HTML.
"""
published_unit = self.store.publish(self.vertical.location, self.user.id)
published_child_container = self.store.get_item(self.child_container.location)
published_child_vertical = self.store.get_item(self.child_vertical.location)
self.validate_preview_html(published_unit, self.container_view)
self.validate_preview_html(published_child_container, self.container_view)
self.validate_preview_html(published_child_vertical, self.reorderable_child_view)
def test_draft_container_preview_html(self):
"""
Verify that a draft xblock's container preview returns the expected HTML.
"""
self.validate_preview_html(self.vertical, self.container_view)
self.validate_preview_html(self.child_container, self.container_view)
self.validate_preview_html(self.child_vertical, self.reorderable_child_view)
def _create_item(self, parent_location, category, display_name, **kwargs):
"""
creates an item in the module store, without publishing it.
"""
return ItemFactory.create(
parent_location=parent_location,
category=category,
display_name=display_name,
publish_item=False,
user_id=self.user.id,
**kwargs
)
def test_public_child_container_preview_html(self):
"""
Verify that a public container rendered as a child of the container page returns the expected HTML.
"""
empty_child_container = self._create_item(self.vertical.location, 'split_test', 'Split Test')
published_empty_child_container = self.store.publish(empty_child_container.location, self.user.id)
self.validate_preview_html(published_empty_child_container, self.reorderable_child_view, can_add=False)
def test_draft_child_container_preview_html(self):
"""
Verify that a draft container rendered as a child of the container page returns the expected HTML.
"""
empty_child_container = self._create_item(self.vertical.location, 'split_test', 'Split Test')
self.validate_preview_html(empty_child_container, self.reorderable_child_view, can_add=False)
@patch('contentstore.views.component.render_to_response', Mock(return_value=Mock(status_code=200, content='')))
def test_container_page_with_valid_and_invalid_usage_key_string(self):
"""
Check that invalid 'usage_key_string' raises Http404.
"""
request = RequestFactory().get('foo')
request.user = self.user
# Check for invalid 'usage_key_strings'
self.assertRaises(
Http404, views.container_handler,
request,
usage_key_string='i4x://InvalidOrg/InvalidCourse/vertical/static/InvalidContent',
)
# Check 200 response if 'usage_key_string' is correct
response = views.container_handler(
request=request,
usage_key_string=unicode(self.vertical.location)
)
self.assertEqual(response.status_code, 200)
| agpl-3.0 |
aonotas/chainer | chainer/functions/math/det.py | 3 | 3637 | import numpy
import chainer
from chainer.backends import cuda
from chainer import function_node
import chainer.functions
from chainer.functions.math import matmul
from chainer import utils
from chainer.utils import type_check
def _det_gpu(b):
# We do a batched LU decomposition on the GPU to compute
# and compute the determinant by multiplying the diagonal.
# Change the shape of the array to be size=1 minibatch if necessary.
# Also copy the matrix as the elments will be modified in-place.
a = matmul._as_batch_mat(b).copy()
n = a.shape[1]
n_matrices = len(a)
# Pivot array
p = cuda.cupy.zeros((n_matrices, n), dtype='int32')
# Output array
# These arrays hold information on the execution success
# or if the matrix was singular.
info = cuda.cupy.zeros(n_matrices, dtype=numpy.intp)
ap = matmul._mat_ptrs(a)
_, lda = matmul._get_ld(a)
cuda.cublas.sgetrfBatched(cuda.Device().cublas_handle, n, ap.data.ptr, lda,
p.data.ptr, info.data.ptr, n_matrices)
det = cuda.cupy.prod(a.diagonal(axis1=1, axis2=2), axis=1)
# The determinant is equal to the product of the diagonal entries
# of `a` where the sign of `a` is flipped depending on whether
# the pivot array is equal to its index.
rng = cuda.cupy.arange(1, n + 1, dtype='int32')
parity = cuda.cupy.sum(p != rng, axis=1) % 2
sign = 1. - 2. * parity.astype('float32')
return det * sign, info
class BatchDet(function_node.FunctionNode):
@property
def label(self):
return 'det'
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
a_type, = in_types
type_check.expect(a_type.dtype.kind == 'f')
# Only a minibatch of 2D array shapes allowed.
type_check.expect(a_type.ndim == 3)
# Matrix inversion only allowed for square matrices
# so assert the last two dimensions are equal.
type_check.expect(a_type.shape[-1] == a_type.shape[-2])
def forward_cpu(self, x):
self.retain_inputs((0,))
self.retain_outputs((0,))
detx = utils.force_array(numpy.linalg.det(x[0]))
return detx,
def forward_gpu(self, x):
self.retain_inputs((0,))
self.retain_outputs((0,))
detx, _ = _det_gpu(x[0])
return detx,
def backward(self, indexes, gy):
x, = self.get_retained_inputs()
detx, = self.get_retained_outputs()
gy, = gy
inv_x = chainer.functions.batch_inv(
chainer.functions.transpose(x, (0, 2, 1)))
gy = chainer.functions.broadcast_to(gy[:, None, None], inv_x.shape)
detx = chainer.functions.broadcast_to(detx[:, None, None], inv_x.shape)
grad = gy * detx * inv_x
return grad,
def batch_det(a):
"""Computes the determinant of a batch of square matrices.
Args:
a (Variable): Input array to compute the determinant for.
The first dimension should iterate over each matrix and be
of the batchsize.
Returns:
~chainer.Variable: vector of determinants for every matrix
in the batch.
"""
return BatchDet().apply((a,))[0]
def det(a):
"""Computes the determinant of a single square matrix.
Args:
a (Variable): Input array to compute the determinant for.
Returns:
~chainer.Variable: Scalar determinant of the matrix a.
"""
shape = (1, a.shape[0], a.shape[1])
batched_a = chainer.functions.reshape(a, shape)
batched_det = BatchDet().apply((batched_a,))[0]
return chainer.functions.reshape(batched_det, ())
| mit |
jvazquez/job_seeker | job_seeker/scrape/tasks.py | 1 | 4823 | from __future__ import absolute_import
import datetime
import logging
import urllib2
import sys
from bs4 import BeautifulSoup
from celery import Celery
from scrape.models import ClSite, ClOffer, ClOfferText
from utils.dbutils import session_scope
QS = "/search/eng?is_telecommuting=1"
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
log_format = '[%(name)s] - %(levelname)s [# %(lineno)d] - %(message)s'
formatter = logging.Formatter(log_format)
# File
file_log = logging.FileHandler("{myself}.log".format(myself=__name__))
file_log.setFormatter(formatter)
logger.addHandler(file_log)
# Stdout
shell = logging.StreamHandler(sys.stdout)
shell.setLevel(logging.DEBUG)
shell.setFormatter(formatter)
logger.addHandler(shell)
# TODO This should be configurable
TIMEOUT_IN_SECONDS = 4
CRAIGLIST_LIST = 'http://www.craigslist.org/about/sites'
app = Celery()
app.config_from_object('celeryconfig')
@app.task
def scan_craiglist_list_of_sites():
try:
response = urllib2.urlopen(CRAIGLIST_LIST, timeout=TIMEOUT_IN_SECONDS)
html = response.read()
soup = BeautifulSoup(html)
anchors = soup.find_all('a')
with session_scope() as session:
for site in anchors:
msg = "Site is this:{this}".format(this=site)
logger.debug(msg)
if 'href' in site.attrs.keys():
mirror_link = ClSite(url=site.attrs['href'],
name=site.text,
created=datetime.datetime.now())
session.add(mirror_link)
session.commit()
except Exception as e:
msg = "Got error {err}".format(err=e)
logger.error(msg)
@app.task
def scan_craiglist():
logger.info("Starting scan_craiglist")
with session_scope() as session:
for site in session.query(ClSite).all():
scrape_site.apply_async((site.url, site.id,),
queue='craiglist_scrape')
logger.info("Launching a scan against {sit}".format(sit=site.url))
@app.task
def scrape_site(url, site_id):
try:
site = "{site}{query}".format(site=url, query=QS)
logger.info("Trying to open {site}".format(site=site))
response = urllib2.urlopen(site, timeout=TIMEOUT_IN_SECONDS)
html = response.read()
soup = BeautifulSoup(html)
anchors = soup.find_all('a', {'class': 'hdrlnk'})
logger.info("I have a total of {sites}".format(sites=len(anchors)))
with session_scope() as session:
for offer in anchors:
msg = "Offer is this:{this}".format(this=offer)
logger.info(msg)
link = ClOffer(site_id=site_id,
url=offer.attrs['href'],
title=offer.text,
created=datetime.datetime.now())
session.add(link)
# TODO Too much reading but I need th id for the link or
# redo-this
session.commit()
session.flush()
scrape_offer_text.apply_async((site_id, offer.attrs['href'],
link.id))
except Exception as e:
msg = "Got error {err}".format(err=e)
logger.error(msg)
raise scrape_site.retry((url,), max_retries=5, countdown=60)
@app.task
def scrape_offer_text(site_id, offer_url, offer_id):
try:
if offer_url.startswith('http', 0) is False:
site = None
with session_scope() as session:
site = session.query(ClSite).filter_by(id=site_id).first()
target_url = site.url
else:
target_url = offer_url
response = urllib2.urlopen(target_url, timeout=TIMEOUT_IN_SECONDS)
html = response.read()
soup = BeautifulSoup(html)
body = soup.find_all('section', {'class': 'userbody'})
if len(body) == 1:
offer = ClOfferText(offer_text=body[0].text, offer_id=offer_id)
else:
offer = ClOfferText(offer_text="Failed", offer_id=offer_id)
with session_scope() as session:
session.add(offer)
session.commit()
except Exception as e:
logger.error("Got error {err}".format(err=e.message))
@app.task
def monitor_cl_sites():
with session_scope() as session:
affected = session.query(ClSite.id).filter(~ClSite.url.like('http://%'))\
.count()
session.query(ClSite).filter(~ClSite.url.like('http://%'))\
.delete(synchronize_session=False)
logger.info("Finished the task.Deleted {rows} rows".format(rows=affected))
return affected
if __name__ == '__main__':
app.start()
| gpl-2.0 |
dwightgunning/django | tests/view_tests/models.py | 281 | 1329 | """
Regression tests for Django built-in views.
"""
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Author(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
def get_absolute_url(self):
return '/authors/%s/' % self.id
@python_2_unicode_compatible
class BaseArticle(models.Model):
"""
An abstract article Model so that we can create article models with and
without a get_absolute_url method (for create_update generic views tests).
"""
title = models.CharField(max_length=100)
slug = models.SlugField()
author = models.ForeignKey(Author, models.CASCADE)
class Meta:
abstract = True
def __str__(self):
return self.title
class Article(BaseArticle):
date_created = models.DateTimeField()
class UrlArticle(BaseArticle):
"""
An Article class with a get_absolute_url defined.
"""
date_created = models.DateTimeField()
def get_absolute_url(self):
return '/urlarticles/%s/' % self.slug
get_absolute_url.purge = True
class DateArticle(BaseArticle):
"""
An article Model with a DateField instead of DateTimeField,
for testing #7602
"""
date_created = models.DateField()
| bsd-3-clause |
SiccarPoint/landlab | landlab/grid/raster_mappers.py | 1 | 24611 | #! /usr/bin/env python
"""Grid element mappers that are specific to raster grids.
Mapping functions unique to raster grids
+++++++++++++++++++++++
.. autosummary::
:toctree: generated/
~landlab.grid.raster_mappers.map_sum_of_inlinks_to_node
~landlab.grid.raster_mappers.map_mean_of_inlinks_to_node
~landlab.grid.raster_mappers.map_max_of_inlinks_to_node
~landlab.grid.raster_mappers.map_min_of_inlinks_to_node
~landlab.grid.raster_mappers.map_sum_of_outlinks_to_node
~landlab.grid.raster_mappers.map_mean_of_outlinks_to_node
~landlab.grid.raster_mappers.map_max_of_outlinks_to_node
~landlab.grid.raster_mappers.map_min_of_outlinks_to_node
~landlab.grid.raster_mappers.map_mean_of_links_to_node
~landlab.grid.raster_mappers.map_mean_of_horizontal_links_to_node
~landlab.grid.raster_mappers.map_mean_of_horizontal_active_links_to_node
~landlab.grid.raster_mappers.map_mean_of_vertical_links_to_node
~landlab.grid.raster_mappers.map_mean_of_vertical_active_links_to_node
"""
from __future__ import division
import numpy as np
from landlab.grid.structured_quad import links
def map_sum_of_inlinks_to_node(grid, var_name, out=None):
"""Map the sum of links entering a node to the node.
map_sum_of_inlinks_to_node takes an array *at the links* and finds the
inlink values for each node in the grid. it sums the inlinks and returns
values at the nodes.
.. note::
This considers all inactive links to have a value of 0.
Construction::
map_sum_of_inlinks_to_node(grid, var_name, out=None)
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_sum_of_inlinks_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field('link', 'z', np.arange(17.))
>>> map_sum_of_inlinks_to_node(rmg, 'z')
array([ 0., 0., 1., 2., 3., 11., 13., 15., 10., 25., 27.,
29.])
"""
if out is None:
out = grid.empty(centering='node')
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links = np.append(values_at_links, 0)
south, west = links._node_in_link_ids(grid.shape)
south, west = south.reshape(south.size), west.reshape(west.size)
out[:] = values_at_links[south] + values_at_links[west]
return out
def map_mean_of_inlinks_to_node(grid, var_name, out=None):
"""Map the mean of links entering a node to the node.
map_mean_of_inlinks_to_node takes an array *at the links* and finds the
inlink values for each node in the grid. It finds the average of
the inlinks and returns values at the nodes.
This considers all inactive links to have a value of 0.
Construction::
map_mean_of_inlinks_to_node(grid, var_name, out=None)
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_mean_of_inlinks_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field('link', 'z', np.arange(17.))
>>> map_mean_of_inlinks_to_node(rmg, 'z')
array([ 0. , 0. , 0.5, 1. , 1.5, 5.5, 6.5, 7.5, 5. ,
12.5, 13.5, 14.5])
"""
if out is None:
out = grid.empty(centering='node')
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links = np.append(values_at_links, 0)
south, west = links._node_in_link_ids(grid.shape)
south, west = south.reshape(south.size), west.reshape(west.size)
out[:] = 0.5 * (values_at_links[south] + values_at_links[west])
return out
def map_max_of_inlinks_to_node(grid, var_name, out=None):
"""Map the maximum of links entering a node to the node.
map_max_of_inlinks_to_node takes an array *at the links* and finds the
inlink values for each node in the grid. it finds the maximum value at the
the inlinks and returns values at the nodes.
.. note::
This considers all inactive links to have a value of 0.
Construction::
map_max_of_inlinks_to_node(grid, var_name, out=None)
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_max_of_inlinks_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field('link', 'z', np.arange(17.))
>>> map_max_of_inlinks_to_node(rmg, 'z')
array([ 0., 0., 1., 2.,
3., 7., 8., 9.,
10., 14., 15., 16.])
"""
if out is None:
out = grid.empty(centering='node')
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links = np.append(values_at_links, 0)
south, west = links._node_in_link_ids(grid.shape)
south, west = south.reshape(south.size), west.reshape(west.size)
out[:] = np.maximum(values_at_links[south], values_at_links[west])
return out
def map_min_of_inlinks_to_node(grid, var_name, out=None):
"""Map the minimum of links entering a node to the node.
map_min_of_inlinks_to_node takes an array *at the links* and finds the
inlink values for each node in the grid. it finds the minimum value at the
the inlinks and returns values at the nodes.
.. note::
This considers all inactive links to have a value of 0.
Construction::
map_min_of_inlinks_to_node(grid, var_name, out=None)
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_min_of_inlinks_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field('link', 'z', np.arange(17.))
>>> map_min_of_inlinks_to_node(rmg, 'z')
array([ 0., 0., 0., 0., 0., 4., 5., 6., 0., 11., 12.,
13.]) """
if out is None:
out = grid.empty(centering='node')
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links = np.append(values_at_links, 0)
south, west = links._node_in_link_ids(grid.shape)
south, west = south.reshape(south.size), west.reshape(west.size)
out[:] = np.minimum(values_at_links[south], values_at_links[west])
return out
def map_sum_of_outlinks_to_node(grid, var_name, out=None):
"""Map the sum of links leaving a node to the node.
map_sum_of_outlinks_to_node takes an array *at the links* and finds the
outlink values for each node in the grid. it sums the outlinks and returns
values at the nodes.
.. note::
This considers all inactive links to have a value of 0.
Construction::
map_sum_of_outlinks_to_node(grid, var_name, out=None)
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_sum_of_outlinks_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field('link', 'z', np.arange(17.))
>>> map_sum_of_outlinks_to_node(rmg, 'z')
array([ 3., 5., 7., 6., 17., 19., 21., 13., 14., 15., 16.,
0.])
"""
if out is None:
out = grid.empty(centering='node')
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links = np.append(values_at_links, 0)
north, east = links._node_out_link_ids(grid.shape)
north, east = north.reshape(north.size), east.reshape(east.size)
out[:] = values_at_links[north] + values_at_links[east]
return out
def map_mean_of_outlinks_to_node(grid, var_name, out=None):
"""Map the mean of links leaving a node to the node.
map_mean_of_outlinks_to_node takes an array *at the links* and finds the
outlink values for each node in the grid. it finds the average of
the outlinks and returns values at the nodes.
.. note::
This considers all inactive links to have a value of 0.
Construction::
map_mean_of_outlinks_to_node(grid, var_name, out=None)
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_mean_of_outlinks_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field('link', 'z', np.arange(17.))
>>> map_mean_of_outlinks_to_node(rmg, 'z')
array([ 1.5, 2.5, 3.5, 3. , 8.5, 9.5, 10.5, 6.5, 7. ,
7.5, 8. , 0. ])
"""
if out is None:
out = grid.empty(centering='node')
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links = np.append(values_at_links, 0)
north, east = links._node_out_link_ids(grid.shape)
north, east = north.reshape(north.size), east.reshape(east.size)
out[:] = 0.5 * (values_at_links[north] + values_at_links[east])
return out
def map_max_of_outlinks_to_node(grid, var_name, out=None):
"""Map the max of links leaving a node to the node.
map_max_of_outlinks_to_node takes an array *at the links* and finds the
outlink values for each node in the grid. it finds the maximum value at the
the outlinks and returns values at the nodes.
.. note::
This considers all inactive links to have a value of 0.
Construction::
map_max_of_outlinks_to_node(grid, var_name, out=None)
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_max_of_outlinks_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field('link', 'z', np.arange(17.))
>>> map_max_of_outlinks_to_node(rmg, 'z')
array([ 3., 4., 5., 6., 10., 11., 12., 13., 14., 15., 16.,
0.])
"""
if out is None:
out = grid.empty(centering='node')
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links = np.append(values_at_links, 0)
north, east = links._node_out_link_ids(grid.shape)
north, east = north.reshape(north.size), east.reshape(east.size)
np.maximum(values_at_links[north], values_at_links[east], out=out)
return out
def map_min_of_outlinks_to_node(grid, var_name, out=None):
"""Map the min of links leaving a node to the node.
map_min_of_outlinks_to_node takes an array *at the links* and finds the
outlink values for each node in the grid. It finds the minimum value at the
the outlinks and returns values at the nodes.
.. note::
This considers all inactive links to have a value of 0.
Construction::
map_min_of_outlinks_to_node(grid, var_name, out=None)
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_min_of_outlinks_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field('link', 'z', np.arange(17.))
>>> map_min_of_outlinks_to_node(rmg, 'z')
array([ 0., 1., 2., 0., 7., 8., 9., 0., 0., 0., 0., 0.])
"""
if out is None:
out = grid.empty(centering='node')
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links = np.append(values_at_links, 0)
north, east = links._node_out_link_ids(grid.shape)
north, east = north.reshape(north.size), east.reshape(east.size)
np.minimum(values_at_links[north], values_at_links[east], out=out)
return out
def map_mean_of_links_to_node(grid, var_name, out=None):
"""Map the mean of links touching a node to the node.
map_mean_all_links_to_node takes an array *at the links* and finds the
average of all ~existing~ link neighbor values for each node in the grid.
it returns values at the nodes.
.. note::
This considers all inactive links to have a value of 0.
Construction::
map_mean_of_links_to_node(grid, var_name, out=None)
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_mean_of_links_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field('link', 'z', np.arange(17.))
>>> map_mean_of_links_to_node(rmg, 'z')
array([ 1.5 , 1.66666667, 2.66666667, 4. ,
6.66666667, 7.5 , 8.5 , 9.33333333,
12. , 13.33333333, 14.33333333, 14.5 ])
"""
if out is None:
out = grid.empty(centering='node')
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
values_at_links = np.append(values_at_links, 0)
north, east = links._node_out_link_ids(grid.shape)
north, east = north.reshape(north.size), east.reshape(east.size)
south, west = links._node_in_link_ids(grid.shape)
south, west = south.reshape(south.size), west.reshape(west.size)
number_of_links = links.number_of_links_per_node(grid.shape)
number_of_links = number_of_links.reshape(number_of_links.size)
number_of_links.astype(float, copy=False)
out[:] = (values_at_links[north] + values_at_links[east] +
values_at_links[south] + values_at_links[west]) / number_of_links
return out
def map_mean_of_horizontal_links_to_node(grid, var_name, out=None):
"""
Map the mean of links in the x direction touching a node to the node.
map_mean_of_horizontal_links_to_node takes an array *at the links* and
finds the average of all horizontal (x-direction) link neighbor values
for each node in the grid.
It returns an array at the nodes of the mean of these values. If a link
is absent, it is ignored.
Note that here a positive returned value means flux to the east, and
a negative to the west.
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_mean_of_horizontal_links_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field('link', 'z', np.arange(17.))
>>> map_mean_of_horizontal_links_to_node(rmg, 'z')
array([ 0. , 0.5, 1.5, 2. , 7. , 7.5, 8.5, 9. , 14. ,
14.5, 15.5, 16. ])
"""
if out is None:
out = grid.empty(centering='node')
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
hoz_links = grid.links_at_node[:, [0, 2]]
hoz_link_dirs = np.fabs(grid.link_dirs_at_node[:, [0, 2]])
# ^retain "true" directions of links
valid_links = values_at_links[hoz_links]*hoz_link_dirs # invalids = 0
num_valid_links = hoz_link_dirs.sum(axis=1)
np.divide(valid_links.sum(axis=1), num_valid_links, out=out)
return out
def map_mean_of_horizontal_active_links_to_node(grid, var_name, out=None):
"""
Map the mean of active links in the x direction touching node to the node.
map_mean_of_horizontal_active_links_to_node takes an array *at the links*
and finds the average of all horizontal (x-direction) link neighbor values
for each node in the grid.
It returns an array at the nodes of the mean of these values. If a link
is absent, it is ignored. If a node has no active links, it receives 0.
Note that here a positive returned value means flux to the east, and
a negative to the west.
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_mean_of_horizontal_active_links_to_node
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field('link', 'z', -np.arange(17, dtype=float))
>>> rmg.status_at_node[rmg.nodes_at_left_edge] = CLOSED_BOUNDARY
>>> map_mean_of_horizontal_active_links_to_node(rmg, 'z')
array([ 0. , 0. , 0. , 0. , 0. , -8. , -8.5, -9. , 0. , 0. , 0. ,
0. ])
"""
if out is None:
out = grid.zeros(centering='node', dtype=float)
else:
out.fill(0.)
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
hoz_links = grid.links_at_node[:, [0, 2]]
hoz_link_dirs = np.fabs(grid.active_link_dirs_at_node[:, [0, 2]])
# ^retain "true" directions of links; no inactives now
valid_links = values_at_links[hoz_links]*hoz_link_dirs # invalids = 0
num_valid_links = hoz_link_dirs.sum(axis=1)
good_nodes = num_valid_links != 0
out[good_nodes] = (valid_links.sum(axis=1)[good_nodes] /
num_valid_links[good_nodes])
return out
def map_mean_of_vertical_links_to_node(grid, var_name, out=None):
"""
Map the mean of links in the y direction touching a node to the node.
map_mean_of_vertical_links_to_node takes an array *at the links* and
finds the average of all vertical (y-direction) link neighbor values
for each node in the grid.
It returns an array at the nodes of the mean of these values. If a link
is absent, it is ignored.
Note that here a positive returned value means flux to the north, and
a negative to the south.
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_mean_of_vertical_links_to_node
>>> from landlab import RasterModelGrid
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field('link', 'z', np.arange(17.))
>>> map_mean_of_vertical_links_to_node(rmg, 'z')
array([ 3. , 4. , 5. , 6. , 6.5, 7.5, 8.5, 9.5, 10. ,
11. , 12. , 13. ])
"""
if out is None:
out = grid.empty(centering='node')
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
vert_links = grid.links_at_node[:, [1, 3]]
vert_link_dirs = np.fabs(grid.link_dirs_at_node[:, [1, 3]])
# ^retain "true" directions of links
valid_links = values_at_links[vert_links]*vert_link_dirs # invalids = 0
num_valid_links = vert_link_dirs.sum(axis=1)
np.divide(valid_links.sum(axis=1), num_valid_links, out=out)
return out
def map_mean_of_vertical_active_links_to_node(grid, var_name, out=None):
"""
Map the mean of active links in the y direction touching node to the node.
map_mean_of_vertical_active_links_to_node takes an array *at the links*
and finds the average of all vertical (y-direction) link neighbor values
for each node in the grid.
It returns an array at the nodes of the mean of these values. If a link
is absent, it is ignored. If a node has no active links, it receives 0.
Note that here a positive returned value means flux to the north, and
a negative to the south.
Parameters
----------
grid : ModelGrid
A landlab ModelGrid.
var_name : array or field name
Values defined at links.
out : ndarray, optional
Buffer to place mapped values into or `None` to create a new array.
Returns
-------
ndarray
Mapped values at nodes.
Examples
--------
>>> import numpy as np
>>> from landlab.grid.raster_mappers import map_mean_of_vertical_active_links_to_node
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> rmg = RasterModelGrid((3, 4))
>>> _ = rmg.add_field('link', 'z', -np.arange(17, dtype=float))
>>> rmg.status_at_node[rmg.nodes_at_bottom_edge] = CLOSED_BOUNDARY
>>> map_mean_of_vertical_active_links_to_node(rmg, 'z')
array([ 0., 0., 0., 0., 0., -11., -12., 0., 0., -11., -12.,
0.])
"""
if out is None:
out = grid.zeros(centering='node', dtype=float)
else:
out.fill(0.)
if type(var_name) is str:
values_at_links = grid.at_link[var_name]
else:
values_at_links = var_name
vert_links = grid.links_at_node[:, [1, 3]]
vert_link_dirs = np.fabs(grid.active_link_dirs_at_node[:, [1, 3]])
# ^retain "true" directions of links; no inactives now
valid_links = values_at_links[vert_links]*vert_link_dirs # invalids = 0
num_valid_links = vert_link_dirs.sum(axis=1)
good_nodes = num_valid_links != 0
out[good_nodes] = (valid_links.sum(axis=1)[good_nodes] /
num_valid_links[good_nodes])
return out
| mit |
mydongistiny/external_chromium_org | third_party/re2/re2/make_unicode_groups.py | 219 | 2849 | #!/usr/bin/python
# Copyright 2008 The RE2 Authors. All Rights Reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Generate C++ tables for Unicode Script and Category groups."""
import sys
import unicode
_header = """
// GENERATED BY make_unicode_groups.py; DO NOT EDIT.
// make_unicode_groups.py >unicode_groups.cc
#include "re2/unicode_groups.h"
namespace re2 {
"""
_trailer = """
} // namespace re2
"""
n16 = 0
n32 = 0
def MakeRanges(codes):
"""Turn a list like [1,2,3,7,8,9] into a range list [[1,3], [7,9]]"""
ranges = []
last = -100
for c in codes:
if c == last+1:
ranges[-1][1] = c
else:
ranges.append([c, c])
last = c
return ranges
def PrintRanges(type, name, ranges):
"""Print the ranges as an array of type named name."""
print "static %s %s[] = {" % (type, name,)
for lo, hi in ranges:
print "\t{ %d, %d }," % (lo, hi)
print "};"
# def PrintCodes(type, name, codes):
# """Print the codes as an array of type named name."""
# print "static %s %s[] = {" % (type, name,)
# for c in codes:
# print "\t%d," % (c,)
# print "};"
def PrintGroup(name, codes):
"""Print the data structures for the group of codes.
Return a UGroup literal for the group."""
# See unicode_groups.h for a description of the data structure.
# Split codes into 16-bit ranges and 32-bit ranges.
range16 = MakeRanges([c for c in codes if c < 65536])
range32 = MakeRanges([c for c in codes if c >= 65536])
# Pull singleton ranges out of range16.
# code16 = [lo for lo, hi in range16 if lo == hi]
# range16 = [[lo, hi] for lo, hi in range16 if lo != hi]
global n16
global n32
n16 += len(range16)
n32 += len(range32)
ugroup = "{ \"%s\", +1" % (name,)
# if len(code16) > 0:
# PrintCodes("uint16", name+"_code16", code16)
# ugroup += ", %s_code16, %d" % (name, len(code16))
# else:
# ugroup += ", 0, 0"
if len(range16) > 0:
PrintRanges("URange16", name+"_range16", range16)
ugroup += ", %s_range16, %d" % (name, len(range16))
else:
ugroup += ", 0, 0"
if len(range32) > 0:
PrintRanges("URange32", name+"_range32", range32)
ugroup += ", %s_range32, %d" % (name, len(range32))
else:
ugroup += ", 0, 0"
ugroup += " }"
return ugroup
def main():
print _header
ugroups = []
for name, codes in unicode.Categories().iteritems():
ugroups.append(PrintGroup(name, codes))
for name, codes in unicode.Scripts().iteritems():
ugroups.append(PrintGroup(name, codes))
print "// %d 16-bit ranges, %d 32-bit ranges" % (n16, n32)
print "UGroup unicode_groups[] = {";
ugroups.sort()
for ug in ugroups:
print "\t%s," % (ug,)
print "};"
print "int num_unicode_groups = %d;" % (len(ugroups),)
print _trailer
if __name__ == '__main__':
main()
| bsd-3-clause |
fluidinfo/Tickery | bin/list-friends.py | 2 | 1256 | #!/usr/bin/python
# Copyright 2010 Fluidinfo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import pprint
if __name__ == '__main__':
import sys
from twisted.internet import reactor
from tickery.twitter import FriendsFetcher, FollowersFetcher
def ok(names):
pprint.pprint(names)
def nok(failure):
print 'Failed:', failure
def stop(_):
reactor.stop()
if len(sys.argv) != 2:
raise Exception('I need a single username argument.')
if sys.argv[0].find('friends') == -1:
fetchClass = FollowersFetcher
else:
fetchClass = FriendsFetcher
ff = fetchClass(sys.argv[1])
d = ff.fetch()
d.addCallback(ok)
d.addErrback(nok)
d.addBoth(stop)
reactor.run()
| apache-2.0 |
repotvsupertuga/tvsupertuga.repository | script.module.TVsupertuga/lib/js2py/host/jseval.py | 33 | 1522 | from js2py.base import *
import inspect
try:
from js2py.translators.translator import translate_js
except:
pass
@Js
def Eval(code):
local_scope = inspect.stack()[3][0].f_locals['var']
global_scope = this.GlobalObject
# todo fix scope - we have to behave differently if called through variable other than eval
# we will use local scope (default)
globals()['var'] = local_scope
try:
py_code = translate_js(code.to_string().value, '')
except SyntaxError as syn_err:
raise MakeError('SyntaxError', str(syn_err))
lines = py_code.split('\n')
# a simple way to return value from eval. Will not work in complex cases.
has_return = False
for n in xrange(len(lines)):
line = lines[len(lines)-n-1]
if line.strip():
if line.startswith(' '):
break
elif line.strip()=='pass':
continue
elif any(line.startswith(e) for e in ['return ', 'continue ', 'break', 'raise ']):
break
else:
has_return = True
cand = 'EVAL_RESULT = (%s)\n'%line
try:
compile(cand, '', 'exec')
except SyntaxError:
break
lines[len(lines)-n-1] = cand
py_code = '\n'.join(lines)
break
#print py_code
executor(py_code)
if has_return:
return globals()['EVAL_RESULT']
def executor(code):
exec code in globals()
| gpl-2.0 |
pdebuyl/lammps | tools/i-pi/ipi/inputs/normalmodes.py | 41 | 3951 | """Deals with creating the normal mode representation arrays.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Classes:
InputNormalModes: Deals with creating the normal mode objects.
"""
import numpy as np
from copy import copy
from ipi.engine.normalmodes import *
from ipi.utils.inputvalue import *
from ipi.utils.units import *
__all__ = ['InputNormalModes']
class InputNormalModes(InputArray):
""" Storage class for NormalModes engine.
Describes how normal-modes transformation and integration should be
performed.
Attributes:
mode: Specifies the method by which the dynamical masses are created.
transform: Specifies whether the normal mode calculation will be
done using a FFT transform or a matrix multiplication.
"""
attribs = copy(InputArray.attribs)
attribs["mode"] = (InputAttribute, {"dtype" : str,
"default" : "rpmd",
"help" : "Specifies the technique to be used to calculate the dynamical masses. 'rpmd' simply assigns the bead masses the physical mass. 'manual' sets all the normal mode frequencies except the centroid normal mode manually. 'pa-cmd' takes an argument giving the frequency to set all the non-centroid normal modes to. 'wmax-cmd' is similar to 'pa-cmd', except instead of taking one argument it takes two ([wmax,wtarget]). The lowest-lying normal mode will be set to wtarget for a free particle, and all the normal modes will coincide at frequency wmax. ",
"options" : ['pa-cmd', 'wmax-cmd', 'manual', 'rpmd']})
attribs["transform"] = (InputValue,{"dtype" : str,
"default" : "fft",
"help" : "Specifies whether to calculate the normal mode transform using a fast Fourier transform or a matrix multiplication. For small numbers of beads the matrix multiplication may be faster.",
"options" : ['fft', 'matrix']})
default_help = "Deals with the normal mode transformations, including the adjustment of bead masses to give the desired ring polymer normal mode frequencies if appropriate. Takes as arguments frequencies, of which different numbers must be specified and which are used to scale the normal mode frequencies in different ways depending on which 'mode' is specified."
default_label = "NORMALMODES"
def __init__(self, help=None, dimension=None, default=None, dtype=None):
""" Initializes InputNormalModes.
Just calls the parent initialization function with appropriate arguments.
"""
super(InputNormalModes,self).__init__(help=help, default=default, dtype=float, dimension="frequency")
def store(self, nm):
"""Takes a normal modes instance and stores a minimal representation
of it.
Args:
nm: A normal modes object.
"""
super(InputNormalModes,self).store(nm.nm_freqs)
self.mode.store(nm.mode)
self.transform.store(nm.transform_method)
def fetch(self):
"""Creates a normal modes object.
Returns:
A normal modes object.
"""
super(InputNormalModes,self).check()
return NormalModes(self.mode.fetch(), self.transform.fetch(), super(InputNormalModes,self).fetch() )
| gpl-2.0 |
wakatime/komodo-wakatime | components/wakatime/packages/urllib3/util/response.py | 316 | 2343 | from __future__ import absolute_import
from ..packages.six.moves import http_client as httplib
from ..exceptions import HeaderParsingError
def is_fp_closed(obj):
"""
Checks whether a given file-like object is closed.
:param obj:
The file-like object to check.
"""
try:
# Check `isclosed()` first, in case Python3 doesn't set `closed`.
# GH Issue #928
return obj.isclosed()
except AttributeError:
pass
try:
# Check via the official file-like-object way.
return obj.closed
except AttributeError:
pass
try:
# Check if the object is a container for another file-like object that
# gets released on exhaustion (e.g. HTTPResponse).
return obj.fp is None
except AttributeError:
pass
raise ValueError("Unable to determine whether fp is closed.")
def assert_header_parsing(headers):
"""
Asserts whether all headers have been successfully parsed.
Extracts encountered errors from the result of parsing headers.
Only works on Python 3.
:param headers: Headers to verify.
:type headers: `httplib.HTTPMessage`.
:raises urllib3.exceptions.HeaderParsingError:
If parsing errors are found.
"""
# This will fail silently if we pass in the wrong kind of parameter.
# To make debugging easier add an explicit check.
if not isinstance(headers, httplib.HTTPMessage):
raise TypeError('expected httplib.Message, got {0}.'.format(
type(headers)))
defects = getattr(headers, 'defects', None)
get_payload = getattr(headers, 'get_payload', None)
unparsed_data = None
if get_payload: # Platform-specific: Python 3.
unparsed_data = get_payload()
if defects or unparsed_data:
raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)
def is_response_to_head(response):
"""
Checks whether the request of a response has been a HEAD-request.
Handles the quirks of AppEngine.
:param conn:
:type conn: :class:`httplib.HTTPResponse`
"""
# FIXME: Can we do this somehow without accessing private httplib _method?
method = response._method
if isinstance(method, int): # Platform-specific: Appengine
return method == 3
return method.upper() == 'HEAD'
| bsd-3-clause |
anish/buildbot | master/buildbot/steps/source/darcs.py | 1 | 8652 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
"""
Source step code for darcs
"""
from twisted.internet import defer
from twisted.internet import reactor
from twisted.python import log
from buildbot.config import ConfigErrors
from buildbot.interfaces import WorkerTooOldError
from buildbot.process import buildstep
from buildbot.process import remotecommand
from buildbot.process.results import SUCCESS
from buildbot.steps.source.base import Source
class Darcs(Source):
""" Class for Darcs with all smarts """
name = 'darcs'
renderables = ['repourl']
possible_methods = ('clobber', 'copy')
def __init__(self, repourl=None, mode='incremental',
method=None, **kwargs):
self.repourl = repourl
self.method = method
self.mode = mode
super().__init__(**kwargs)
errors = []
if not self._hasAttrGroupMember('mode', self.mode):
errors.append("mode %s is not one of %s" %
(self.mode, self._listAttrGroupMembers('mode')))
if self.mode == 'incremental' and self.method:
errors.append("Incremental mode does not require method")
if self.mode == 'full':
if self.method is None:
self.method = 'copy'
elif self.method not in self.possible_methods:
errors.append("Invalid method for mode == %s" % (self.mode))
if repourl is None:
errors.append("you must provide repourl")
if errors:
raise ConfigErrors(errors)
def startVC(self, branch, revision, patch):
self.revision = revision
self.stdio_log = self.addLogForRemoteCommands("stdio")
d = self.checkDarcs()
@d.addCallback
def checkInstall(darcsInstalled):
if not darcsInstalled:
raise WorkerTooOldError("Darcs is not installed on worker")
return 0
d.addCallback(lambda _: self.sourcedirIsPatched())
@d.addCallback
def checkPatched(patched):
if patched:
return self.copy()
return 0
d.addCallback(self._getAttrGroupMember('mode', self.mode))
if patch:
d.addCallback(self.patch, patch)
d.addCallback(self.parseGotRevision)
d.addCallback(self.finish)
d.addErrback(self.failed)
return d
def checkDarcs(self):
cmd = remotecommand.RemoteShellCommand(self.workdir, ['darcs', '--version'],
env=self.env,
logEnviron=self.logEnviron,
timeout=self.timeout)
cmd.useLog(self.stdio_log, False)
d = self.runCommand(cmd)
@d.addCallback
def evaluate(_):
return cmd.rc == 0
return d
@defer.inlineCallbacks
def mode_full(self, _):
if self.method == 'clobber':
yield self.clobber()
return
elif self.method == 'copy':
yield self.copy()
return
@defer.inlineCallbacks
def mode_incremental(self, _):
updatable = yield self._sourcedirIsUpdatable()
if not updatable:
yield self._checkout()
else:
command = ['darcs', 'pull', '--all', '--verbose']
yield self._dovccmd(command)
def copy(self):
cmd = remotecommand.RemoteCommand('rmdir', {'dir': self.workdir,
'logEnviron': self.logEnviron,
'timeout': self.timeout, })
cmd.useLog(self.stdio_log, False)
d = self.runCommand(cmd)
self.workdir = 'source'
d.addCallback(self.mode_incremental)
@d.addCallback
def copy(_):
cmd = remotecommand.RemoteCommand('cpdir',
{'fromdir': 'source',
'todir': 'build',
'logEnviron': self.logEnviron,
'timeout': self.timeout, })
cmd.useLog(self.stdio_log, False)
d = self.runCommand(cmd)
return d
@d.addCallback
def resetWorkdir(_):
self.workdir = 'build'
return 0
return d
def clobber(self):
d = self.runRmdir(self.workdir)
d.addCallback(lambda _: self._checkout())
return d
def _clone(self, abandonOnFailure=False):
command = ['darcs', 'get', '--verbose',
'--lazy', '--repo-name', self.workdir]
d = defer.succeed(0)
if self.revision:
d.addCallback(
lambda _: self.downloadFileContentToWorker('.darcs-context', self.revision))
command.append('--context')
command.append('.darcs-context')
command.append(self.repourl)
d.addCallback(lambda _: self._dovccmd(command, abandonOnFailure=abandonOnFailure,
wkdir='.'))
return d
def _checkout(self):
if self.retry:
abandonOnFailure = (self.retry[1] <= 0)
else:
abandonOnFailure = True
d = self._clone(abandonOnFailure)
def _retry(res):
if self.stopped or res == 0:
return res
delay, repeats = self.retry
if repeats > 0:
log.msg("Checkout failed, trying %d more times after %d seconds"
% (repeats, delay))
self.retry = (delay, repeats - 1)
df = defer.Deferred()
df.addCallback(lambda _: self.runRmdir(self.workdir))
df.addCallback(lambda _: self._checkout())
reactor.callLater(delay, df.callback, None)
return df
return res
if self.retry:
d.addCallback(_retry)
return d
def finish(self, res):
d = defer.succeed(res)
@d.addCallback
def _gotResults(results):
self.setStatus(self.cmd, results)
log.msg("Closing log, sending result of the command %s " %
(self.cmd))
return results
d.addCallback(self.finished)
return d
@defer.inlineCallbacks
def parseGotRevision(self, _):
revision = yield self._dovccmd(['darcs', 'changes', '--max-count=1'], collectStdout=True)
self.updateSourceProperty('got_revision', revision)
return 0
def _dovccmd(self, command, collectStdout=False, initialStdin=None, decodeRC=None,
abandonOnFailure=True, wkdir=None):
if not command:
raise ValueError("No command specified")
if decodeRC is None:
decodeRC = {0: SUCCESS}
workdir = wkdir or self.workdir
cmd = remotecommand.RemoteShellCommand(workdir, command,
env=self.env,
logEnviron=self.logEnviron,
timeout=self.timeout,
collectStdout=collectStdout,
initialStdin=initialStdin,
decodeRC=decodeRC)
cmd.useLog(self.stdio_log, False)
d = self.runCommand(cmd)
@d.addCallback
def evaluateCommand(_):
if abandonOnFailure and cmd.didFail():
log.msg("Source step failed while running command %s" % cmd)
raise buildstep.BuildStepFailed()
if collectStdout:
return cmd.stdout
return cmd.rc
return d
def _sourcedirIsUpdatable(self):
return self.pathExists(self.build.path_module.join(self.workdir, '_darcs'))
| gpl-2.0 |
GeoscienceAustralia/geodesy-domain-model | aws/amazonia/test/sys_tests/test_sys_lambda_leaf.py | 3 | 1195 | #!/usr/bin/python3
from amazonia.classes.lambda_config import LambdaConfig
from amazonia.classes.amz_lambda import LambdaLeaf
from troposphere import Template
def main():
template = Template()
lambda_config = LambdaConfig(
lambda_s3_bucket='smallest-bucket-in-history',
lambda_s3_key='test_lambda.zip',
lambda_description='test function',
lambda_function_name='test_lambda',
lambda_handler='test_lambda.lambda_handler',
lambda_memory_size=128,
lambda_role_arn='arn:aws:iam::123456789:role/lambda_basic_vpc_execution_with_s3',
lambda_runtime='python2.7',
lambda_timeout=1,
lambda_schedule='rate(5 minutes)'
)
# Test Lambda
LambdaLeaf(leaf_title='MyLambda',
template=template,
dependencies=['app1:80'],
lambda_config=lambda_config,
availability_zones=['ap-southeast-2a', 'ap-southeast-2b', 'ap-southeast-2c'],
public_cidr={'name': 'PublicIp', 'cidr': '0.0.0.0/0'},
tree_name='tree'
)
print(template.to_json(indent=2, separators=(',', ': ')))
if __name__ == '__main__':
main()
| bsd-3-clause |
RichardLitt/wyrd-django-dev | django/template/loaders/cached.py | 110 | 2533 | """
Wrapper class that takes a list of template loaders as an argument and attempts
to load templates from them in order, caching the result.
"""
import hashlib
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader, get_template_from_string, find_template_loader, make_origin
class Loader(BaseLoader):
is_usable = True
def __init__(self, loaders):
self.template_cache = {}
self._loaders = loaders
self._cached_loaders = []
@property
def loaders(self):
# Resolve loaders on demand to avoid circular imports
if not self._cached_loaders:
# Set self._cached_loaders atomically. Otherwise, another thread
# could see an incomplete list. See #17303.
cached_loaders = []
for loader in self._loaders:
cached_loaders.append(find_template_loader(loader))
self._cached_loaders = cached_loaders
return self._cached_loaders
def find_template(self, name, dirs=None):
for loader in self.loaders:
try:
template, display_name = loader(name, dirs)
return (template, make_origin(display_name, loader, name, dirs))
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(name)
def load_template(self, template_name, template_dirs=None):
key = template_name
if template_dirs:
# If template directories were specified, use a hash to differentiate
key = '-'.join([template_name, hashlib.sha1('|'.join(template_dirs)).hexdigest()])
if key not in self.template_cache:
template, origin = self.find_template(template_name, template_dirs)
if not hasattr(template, 'render'):
try:
template = get_template_from_string(template, origin, template_name)
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist,
# back off to returning the source and display name for the template
# we were asked to load. This allows for correct identification (later)
# of the actual template that does not exist.
return template, origin
self.template_cache[key] = template
return self.template_cache[key], None
def reset(self):
"Empty the template cache."
self.template_cache.clear()
| bsd-3-clause |
team-ferret/pip-in-toto | pip/baseparser.py | 339 | 10465 | """Base option parser setup"""
from __future__ import absolute_import
import sys
import optparse
import os
import re
import textwrap
from distutils.util import strtobool
from pip._vendor.six import string_types
from pip._vendor.six.moves import configparser
from pip.locations import (
legacy_config_file, config_basename, running_under_virtualenv,
site_config_files
)
from pip.utils import appdirs, get_terminal_size
_environ_prefix_re = re.compile(r"^PIP_", re.I)
class PrettyHelpFormatter(optparse.IndentedHelpFormatter):
"""A prettier/less verbose help formatter for optparse."""
def __init__(self, *args, **kwargs):
# help position must be aligned with __init__.parseopts.description
kwargs['max_help_position'] = 30
kwargs['indent_increment'] = 1
kwargs['width'] = get_terminal_size()[0] - 2
optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)
def format_option_strings(self, option):
return self._format_option_strings(option, ' <%s>', ', ')
def _format_option_strings(self, option, mvarfmt=' <%s>', optsep=', '):
"""
Return a comma-separated list of option strings and metavars.
:param option: tuple of (short opt, long opt), e.g: ('-f', '--format')
:param mvarfmt: metavar format string - evaluated as mvarfmt % metavar
:param optsep: separator
"""
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, optsep)
if option.takes_value():
metavar = option.metavar or option.dest.lower()
opts.append(mvarfmt % metavar.lower())
return ''.join(opts)
def format_heading(self, heading):
if heading == 'Options':
return ''
return heading + ':\n'
def format_usage(self, usage):
"""
Ensure there is only one newline between usage and the first heading
if there is no description.
"""
msg = '\nUsage: %s\n' % self.indent_lines(textwrap.dedent(usage), " ")
return msg
def format_description(self, description):
# leave full control over description to us
if description:
if hasattr(self.parser, 'main'):
label = 'Commands'
else:
label = 'Description'
# some doc strings have initial newlines, some don't
description = description.lstrip('\n')
# some doc strings have final newlines and spaces, some don't
description = description.rstrip()
# dedent, then reindent
description = self.indent_lines(textwrap.dedent(description), " ")
description = '%s:\n%s\n' % (label, description)
return description
else:
return ''
def format_epilog(self, epilog):
# leave full control over epilog to us
if epilog:
return epilog
else:
return ''
def indent_lines(self, text, indent):
new_lines = [indent + line for line in text.split('\n')]
return "\n".join(new_lines)
class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter):
"""Custom help formatter for use in ConfigOptionParser.
This is updates the defaults before expanding them, allowing
them to show up correctly in the help listing.
"""
def expand_default(self, option):
if self.parser is not None:
self.parser._update_defaults(self.parser.defaults)
return optparse.IndentedHelpFormatter.expand_default(self, option)
class CustomOptionParser(optparse.OptionParser):
def insert_option_group(self, idx, *args, **kwargs):
"""Insert an OptionGroup at a given position."""
group = self.add_option_group(*args, **kwargs)
self.option_groups.pop()
self.option_groups.insert(idx, group)
return group
@property
def option_list_all(self):
"""Get a list of all options, including those in option groups."""
res = self.option_list[:]
for i in self.option_groups:
res.extend(i.option_list)
return res
class ConfigOptionParser(CustomOptionParser):
"""Custom option parser which updates its defaults by checking the
configuration files and environmental variables"""
isolated = False
def __init__(self, *args, **kwargs):
self.config = configparser.RawConfigParser()
self.name = kwargs.pop('name')
self.isolated = kwargs.pop("isolated", False)
self.files = self.get_config_files()
if self.files:
self.config.read(self.files)
assert self.name
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_config_files(self):
# the files returned by this method will be parsed in order with the
# first files listed being overridden by later files in standard
# ConfigParser fashion
config_file = os.environ.get('PIP_CONFIG_FILE', False)
if config_file == os.devnull:
return []
# at the base we have any site-wide configuration
files = list(site_config_files)
# per-user configuration next
if not self.isolated:
if config_file and os.path.exists(config_file):
files.append(config_file)
else:
# This is the legacy config file, we consider it to be a lower
# priority than the new file location.
files.append(legacy_config_file)
# This is the new config file, we consider it to be a higher
# priority than the legacy file.
files.append(
os.path.join(
appdirs.user_config_dir("pip"),
config_basename,
)
)
# finally virtualenv configuration first trumping others
if running_under_virtualenv():
venv_config_file = os.path.join(
sys.prefix,
config_basename,
)
if os.path.exists(venv_config_file):
files.append(venv_config_file)
return files
def check_default(self, option, key, val):
try:
return option.check_value(key, val)
except optparse.OptionValueError as exc:
print("An error occurred during configuration: %s" % exc)
sys.exit(3)
def _update_defaults(self, defaults):
"""Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists)."""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
for section in ('global', self.name):
config.update(
self.normalize_keys(self.get_config_section(section))
)
# 2. environmental variables
if not self.isolated:
config.update(self.normalize_keys(self.get_environ_vars()))
# Accumulate complex default state.
self.values = optparse.Values(self.defaults)
late_eval = set()
# Then set the options with those values
for key, val in config.items():
# ignore empty values
if not val:
continue
option = self.get_option(key)
# Ignore options not present in this parser. E.g. non-globals put
# in [global] by users that want them to apply to all applicable
# commands.
if option is None:
continue
if option.action in ('store_true', 'store_false', 'count'):
val = strtobool(val)
elif option.action == 'append':
val = val.split()
val = [self.check_default(option, key, v) for v in val]
elif option.action == 'callback':
late_eval.add(option.dest)
opt_str = option.get_opt_string()
val = option.convert_value(opt_str, val)
# From take_action
args = option.callback_args or ()
kwargs = option.callback_kwargs or {}
option.callback(option, opt_str, val, self, *args, **kwargs)
else:
val = self.check_default(option, key, val)
defaults[option.dest] = val
for key in late_eval:
defaults[key] = getattr(self.values, key)
self.values = None
return defaults
def normalize_keys(self, items):
"""Return a config dictionary with normalized keys regardless of
whether the keys were specified in environment variables or in config
files"""
normalized = {}
for key, val in items:
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
normalized[key] = val
return normalized
def get_config_section(self, name):
"""Get a section of a configuration"""
if self.config.has_section(name):
return self.config.items(name)
return []
def get_environ_vars(self):
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
if _environ_prefix_re.search(key):
yield (_environ_prefix_re.sub("", key).lower(), val)
def get_default_values(self):
"""Overriding to make updating the defaults after instantiation of
the option parser possible, _update_defaults() does the dirty work."""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self._update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, string_types):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def error(self, msg):
self.print_usage(sys.stderr)
self.exit(2, "%s\n" % msg)
| mit |
huonw/servo | tests/wpt/web-platform-tests/html/tools/update_html5lib_tests.py | 125 | 5358 | import sys
import os
import hashlib
import urllib
import itertools
import re
import json
import glob
import shutil
try:
import genshi
from genshi.template import MarkupTemplate
from html5lib.tests import support
except ImportError:
print """This script requires the Genshi templating library and html5lib source
It is recommended that these are installed in a virtualenv:
virtualenv venv
source venv/bin/activate
pip install genshi
cd venv
git clone [email protected]:html5lib/html5lib-python.git html5lib
cd html5lib
git submodule init
git submodule update
pip install -e ./
Then run this script again, with the virtual environment still active.
When you are done, type "deactivate" to deactivate the virtual environment.
"""
TESTS_PATH = "html/syntax/parsing/"
def get_paths():
script_path = os.path.split(os.path.abspath(__file__))[0]
repo_base = get_repo_base(script_path)
tests_path = os.path.join(repo_base, TESTS_PATH)
return script_path, tests_path
def get_repo_base(path):
while path:
if os.path.exists(os.path.join(path, ".git")):
return path
else:
path = os.path.split(path)[0]
def get_expected(data):
data = "#document\n" + data
return data
def get_hash(data, container=None):
if container == None:
container = ""
return hashlib.sha1("#container%s#data%s"%(container.encode("utf8"),
data.encode("utf8"))).hexdigest()
def make_tests(script_dir, out_dir, input_file_name, test_data):
tests = []
innerHTML_tests = []
ids_seen = {}
print input_file_name
for test in test_data:
if "script-off" in test:
continue
is_innerHTML = "document-fragment" in test
data = test["data"]
container = test["document-fragment"] if is_innerHTML else None
assert test["document"], test
expected = get_expected(test["document"])
test_list = innerHTML_tests if is_innerHTML else tests
test_id = get_hash(data, container)
if test_id in ids_seen:
print "WARNING: id %s seen multiple times in file %s this time for test (%s, %s) before for test %s, skipping"%(test_id, input_file_name, container, data, ids_seen[test_id])
continue
ids_seen[test_id] = (container, data)
test_list.append({'string_uri_encoded_input':"\"%s\""%urllib.quote(data.encode("utf8")),
'input':data,
'expected':expected,
'string_escaped_expected':json.dumps(urllib.quote(expected.encode("utf8"))),
'id':test_id,
'container':container
})
path_normal = None
if tests:
path_normal = write_test_file(script_dir, out_dir,
tests, "html5lib_%s"%input_file_name,
"html5lib_test.xml")
path_innerHTML = None
if innerHTML_tests:
path_innerHTML = write_test_file(script_dir, out_dir,
innerHTML_tests, "html5lib_innerHTML_%s"%input_file_name,
"html5lib_test_fragment.xml")
return path_normal, path_innerHTML
def write_test_file(script_dir, out_dir, tests, file_name, template_file_name):
file_name = os.path.join(out_dir, file_name + ".html")
short_name = os.path.split(file_name)[1]
with open(os.path.join(script_dir, template_file_name)) as f:
template = MarkupTemplate(f)
stream = template.generate(file_name=short_name, tests=tests)
with open(file_name, "w") as f:
f.write(stream.render('html', doctype='html5',
encoding="utf8"))
return file_name
def escape_js_string(in_data):
return in_data.encode("utf8").encode("string-escape")
def serialize_filenames(test_filenames):
return "[" + ",\n".join("\"%s\""%item for item in test_filenames) + "]"
def main():
script_dir, out_dir = get_paths()
test_files = []
inner_html_files = []
if len(sys.argv) > 2:
test_iterator = itertools.izip(
itertools.repeat(False),
sorted(os.path.abspath(item) for item in
glob.glob(os.path.join(sys.argv[2], "*.dat"))))
else:
test_iterator = itertools.chain(
itertools.izip(itertools.repeat(False),
sorted(support.get_data_files("tree-construction"))),
itertools.izip(itertools.repeat(True),
sorted(support.get_data_files(
os.path.join("tree-construction", "scripted")))))
for (scripted, test_file) in test_iterator:
input_file_name = os.path.splitext(os.path.split(test_file)[1])[0]
if scripted:
input_file_name = "scripted_" + input_file_name
test_data = support.TestData(test_file)
test_filename, inner_html_file_name = make_tests(script_dir, out_dir,
input_file_name, test_data)
if test_filename is not None:
test_files.append(test_filename)
if inner_html_file_name is not None:
inner_html_files.append(inner_html_file_name)
if __name__ == "__main__":
main()
| mpl-2.0 |
ritchyteam/odoo | addons/multi_company/__openerp__.py | 52 | 1754 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Multi-Company',
'version': '1.0',
'category': 'Tools',
'description': """
This module is for managing a multicompany environment.
=======================================================
This module is the base module for other multi-company modules.
""",
'author': 'OpenERP SA,SYLEAM',
'website': 'https://www.odoo.com',
'depends': [
'base',
'sale_stock',
'project',
],
'data': ['res_company_view.xml'],
'demo': ['multi_company_demo.xml'],
'installable': True,
'auto_install': False,
'images': ['images/companies.jpeg','images/default_company_per_object_form.jpeg', 'images/default_company_per_object_list.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
larsbutler/swift | test/unit/account/test_reaper.py | 1 | 33365 | # Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import random
import shutil
import tempfile
import unittest
from logging import DEBUG
from mock import patch, call, DEFAULT
import six
import eventlet
from swift.account import reaper
from swift.account.backend import DATADIR
from swift.common.exceptions import ClientException
from swift.common.utils import normalize_timestamp
from test import unit
from swift.common.storage_policy import StoragePolicy, POLICIES
class FakeLogger(object):
def __init__(self, *args, **kwargs):
self.inc = {'return_codes.4': 0,
'return_codes.2': 0,
'objects_failures': 0,
'objects_deleted': 0,
'objects_remaining': 0,
'objects_possibly_remaining': 0,
'containers_failures': 0,
'containers_deleted': 0,
'containers_remaining': 0,
'containers_possibly_remaining': 0}
self.exp = []
def info(self, msg, *args):
self.msg = msg
def error(self, msg, *args):
self.msg = msg
def timing_since(*args, **kwargs):
pass
def getEffectiveLevel(self):
return DEBUG
def exception(self, *args):
self.exp.append(args)
def increment(self, key):
self.inc[key] += 1
class FakeBroker(object):
def __init__(self):
self.info = {}
def get_info(self):
return self.info
class FakeAccountBroker(object):
def __init__(self, containers):
self.containers = containers
self.containers_yielded = []
def get_info(self):
info = {'account': 'a',
'delete_timestamp': time.time() - 10}
return info
def list_containers_iter(self, *args):
for cont in self.containers:
yield cont, None, None, None
def is_status_deleted(self):
return True
def empty(self):
return False
class FakeRing(object):
def __init__(self):
self.nodes = [{'id': '1',
'ip': '10.10.10.1',
'port': 6202,
'device': 'sda1'},
{'id': '2',
'ip': '10.10.10.2',
'port': 6202,
'device': 'sda1'},
{'id': '3',
'ip': '10.10.10.3',
'port': 6202,
'device': None},
{'id': '4',
'ip': '10.10.10.1',
'port': 6202,
'device': 'sda2'},
{'id': '5',
'ip': '10.10.10.1',
'port': 6202,
'device': 'sda3'},
]
def get_nodes(self, *args, **kwargs):
return ('partition', self.nodes)
def get_part_nodes(self, *args, **kwargs):
return self.nodes
acc_nodes = [{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''}]
cont_nodes = [{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''},
{'device': 'sda1',
'ip': '',
'port': ''}]
@unit.patch_policies([StoragePolicy(0, 'zero', False,
object_ring=unit.FakeRing()),
StoragePolicy(1, 'one', True,
object_ring=unit.FakeRing(replicas=4))])
class TestReaper(unittest.TestCase):
def setUp(self):
self.to_delete = []
self.myexp = ClientException("", http_host=None,
http_port=None,
http_device=None,
http_status=404,
http_reason=None
)
def tearDown(self):
for todel in self.to_delete:
shutil.rmtree(todel)
def fake_direct_delete_object(self, *args, **kwargs):
if self.amount_fail < self.max_fail:
self.amount_fail += 1
raise self.myexp
if self.reap_obj_timeout:
raise eventlet.Timeout()
def fake_direct_delete_container(self, *args, **kwargs):
if self.amount_delete_fail < self.max_delete_fail:
self.amount_delete_fail += 1
raise self.myexp
def fake_direct_get_container(self, *args, **kwargs):
if self.get_fail:
raise self.myexp
if self.timeout:
raise eventlet.Timeout()
objects = [{'name': 'o1'},
{'name': 'o2'},
{'name': six.text_type('o3')},
{'name': ''}]
return None, objects
def fake_container_ring(self):
return FakeRing()
def fake_reap_object(self, *args, **kwargs):
if self.reap_obj_fail:
raise Exception
def prepare_data_dir(self, ts=False, device='sda1'):
devices_path = tempfile.mkdtemp()
# will be deleted by teardown
self.to_delete.append(devices_path)
path = os.path.join(devices_path, device, DATADIR)
os.makedirs(path)
path = os.path.join(path, '100',
'a86', 'a8c682d2472e1720f2d81ff8993aba6')
os.makedirs(path)
suffix = 'db'
if ts:
suffix = 'ts'
with open(os.path.join(path, 'a8c682203aba6.%s' % suffix), 'w') as fd:
fd.write('')
return devices_path
def init_reaper(self, conf=None, myips=None, fakelogger=False):
if conf is None:
conf = {}
if myips is None:
myips = ['10.10.10.1']
r = reaper.AccountReaper(conf)
r.stats_return_codes = {}
r.stats_containers_deleted = 0
r.stats_containers_remaining = 0
r.stats_containers_possibly_remaining = 0
r.stats_objects_deleted = 0
r.stats_objects_remaining = 0
r.stats_objects_possibly_remaining = 0
r.myips = myips
if fakelogger:
r.logger = unit.debug_logger('test-reaper')
return r
def fake_reap_account(self, *args, **kwargs):
self.called_amount += 1
def fake_account_ring(self):
return FakeRing()
def test_creation(self):
# later config should be extended to assert more config options
r = reaper.AccountReaper({'node_timeout': '3.5'})
self.assertEqual(r.node_timeout, 3.5)
def test_delay_reaping_conf_default(self):
r = reaper.AccountReaper({})
self.assertEqual(r.delay_reaping, 0)
r = reaper.AccountReaper({'delay_reaping': ''})
self.assertEqual(r.delay_reaping, 0)
def test_delay_reaping_conf_set(self):
r = reaper.AccountReaper({'delay_reaping': '123'})
self.assertEqual(r.delay_reaping, 123)
def test_delay_reaping_conf_bad_value(self):
self.assertRaises(ValueError, reaper.AccountReaper,
{'delay_reaping': 'abc'})
def test_reap_warn_after_conf_set(self):
conf = {'delay_reaping': '2', 'reap_warn_after': '3'}
r = reaper.AccountReaper(conf)
self.assertEqual(r.reap_not_done_after, 5)
def test_reap_warn_after_conf_bad_value(self):
self.assertRaises(ValueError, reaper.AccountReaper,
{'reap_warn_after': 'abc'})
def test_reap_delay(self):
time_value = [100]
def _time():
return time_value[0]
time_orig = reaper.time
try:
reaper.time = _time
r = reaper.AccountReaper({'delay_reaping': '10'})
b = FakeBroker()
b.info['delete_timestamp'] = normalize_timestamp(110)
self.assertFalse(r.reap_account(b, 0, None))
b.info['delete_timestamp'] = normalize_timestamp(100)
self.assertFalse(r.reap_account(b, 0, None))
b.info['delete_timestamp'] = normalize_timestamp(90)
self.assertFalse(r.reap_account(b, 0, None))
# KeyError raised immediately as reap_account tries to get the
# account's name to do the reaping.
b.info['delete_timestamp'] = normalize_timestamp(89)
self.assertRaises(KeyError, r.reap_account, b, 0, None)
b.info['delete_timestamp'] = normalize_timestamp(1)
self.assertRaises(KeyError, r.reap_account, b, 0, None)
finally:
reaper.time = time_orig
def test_reap_object(self):
conf = {
'mount_check': 'false',
}
r = reaper.AccountReaper(conf, logger=unit.debug_logger())
mock_path = 'swift.account.reaper.direct_delete_object'
for policy in POLICIES:
r.reset_stats()
with patch(mock_path) as fake_direct_delete:
with patch('swift.account.reaper.time') as mock_time:
mock_time.return_value = 1429117638.86767
r.reap_object('a', 'c', 'partition', cont_nodes, 'o',
policy.idx)
mock_time.assert_called_once_with()
for i, call_args in enumerate(
fake_direct_delete.call_args_list):
cnode = cont_nodes[i % len(cont_nodes)]
host = '%(ip)s:%(port)s' % cnode
device = cnode['device']
headers = {
'X-Container-Host': host,
'X-Container-Partition': 'partition',
'X-Container-Device': device,
'X-Backend-Storage-Policy-Index': policy.idx,
'X-Timestamp': '1429117638.86767'
}
ring = r.get_object_ring(policy.idx)
expected = call(dict(ring.devs[i], index=i), 0,
'a', 'c', 'o',
headers=headers, conn_timeout=0.5,
response_timeout=10)
self.assertEqual(call_args, expected)
self.assertEqual(policy.object_ring.replicas - 1, i)
self.assertEqual(r.stats_objects_deleted,
policy.object_ring.replicas)
def test_reap_object_fail(self):
r = self.init_reaper({}, fakelogger=True)
self.amount_fail = 0
self.max_fail = 1
self.reap_obj_timeout = False
policy = random.choice(list(POLICIES))
with patch('swift.account.reaper.direct_delete_object',
self.fake_direct_delete_object):
r.reap_object('a', 'c', 'partition', cont_nodes, 'o',
policy.idx)
# IMHO, the stat handling in the node loop of reap object is
# over indented, but no one has complained, so I'm not inclined
# to move it. However it's worth noting we're currently keeping
# stats on deletes per *replica* - which is rather obvious from
# these tests, but this results is surprising because of some
# funny logic to *skip* increments on successful deletes of
# replicas until we have more successful responses than
# failures. This means that while the first replica doesn't
# increment deleted because of the failure, the second one
# *does* get successfully deleted, but *also does not* increment
# the counter (!?).
#
# In the three replica case this leaves only the last deleted
# object incrementing the counter - in the four replica case
# this leaves the last two.
#
# Basically this test will always result in:
# deleted == num_replicas - 2
self.assertEqual(r.stats_objects_deleted,
policy.object_ring.replicas - 2)
self.assertEqual(r.stats_objects_remaining, 1)
self.assertEqual(r.stats_objects_possibly_remaining, 1)
def test_reap_object_timeout(self):
r = self.init_reaper({}, fakelogger=True)
self.amount_fail = 1
self.max_fail = 0
self.reap_obj_timeout = True
with patch('swift.account.reaper.direct_delete_object',
self.fake_direct_delete_object):
r.reap_object('a', 'c', 'partition', cont_nodes, 'o', 1)
self.assertEqual(r.stats_objects_remaining, 4)
self.assertTrue(r.logger.get_lines_for_level(
'error')[-1].startswith('Timeout Exception'))
def test_reap_object_non_exist_policy_index(self):
r = self.init_reaper({}, fakelogger=True)
r.reap_object('a', 'c', 'partition', cont_nodes, 'o', 2)
self.assertEqual(r.stats_objects_deleted, 0)
self.assertEqual(r.stats_objects_remaining, 1)
self.assertEqual(r.stats_objects_possibly_remaining, 0)
@patch('swift.account.reaper.Ring',
lambda *args, **kwargs: unit.FakeRing())
def test_reap_container(self):
policy = random.choice(list(POLICIES))
r = self.init_reaper({}, fakelogger=True)
with patch.multiple('swift.account.reaper',
direct_get_container=DEFAULT,
direct_delete_object=DEFAULT,
direct_delete_container=DEFAULT) as mocks:
headers = {'X-Backend-Storage-Policy-Index': policy.idx}
obj_listing = [{'name': 'o'}]
def fake_get_container(*args, **kwargs):
try:
obj = obj_listing.pop(0)
except IndexError:
obj_list = []
else:
obj_list = [obj]
return headers, obj_list
mocks['direct_get_container'].side_effect = fake_get_container
with patch('swift.account.reaper.time') as mock_time:
mock_time.side_effect = [1429117638.86767, 1429117639.67676]
r.reap_container('a', 'partition', acc_nodes, 'c')
# verify calls to direct_delete_object
mock_calls = mocks['direct_delete_object'].call_args_list
self.assertEqual(policy.object_ring.replicas, len(mock_calls))
for call_args in mock_calls:
_args, kwargs = call_args
self.assertEqual(kwargs['headers']
['X-Backend-Storage-Policy-Index'],
policy.idx)
self.assertEqual(kwargs['headers']
['X-Timestamp'],
'1429117638.86767')
# verify calls to direct_delete_container
self.assertEqual(mocks['direct_delete_container'].call_count, 3)
for i, call_args in enumerate(
mocks['direct_delete_container'].call_args_list):
anode = acc_nodes[i % len(acc_nodes)]
host = '%(ip)s:%(port)s' % anode
device = anode['device']
headers = {
'X-Account-Host': host,
'X-Account-Partition': 'partition',
'X-Account-Device': device,
'X-Account-Override-Deleted': 'yes',
'X-Timestamp': '1429117639.67676'
}
ring = r.get_object_ring(policy.idx)
expected = call(dict(ring.devs[i], index=i), 0, 'a', 'c',
headers=headers, conn_timeout=0.5,
response_timeout=10)
self.assertEqual(call_args, expected)
self.assertEqual(r.stats_objects_deleted, policy.object_ring.replicas)
def test_reap_container_get_object_fail(self):
r = self.init_reaper({}, fakelogger=True)
self.get_fail = True
self.reap_obj_fail = False
self.amount_delete_fail = 0
self.max_delete_fail = 0
with patch('swift.account.reaper.direct_get_container',
self.fake_direct_get_container), \
patch('swift.account.reaper.direct_delete_container',
self.fake_direct_delete_container), \
patch('swift.account.reaper.AccountReaper.get_container_ring',
self.fake_container_ring), \
patch('swift.account.reaper.AccountReaper.reap_object',
self.fake_reap_object):
r.reap_container('a', 'partition', acc_nodes, 'c')
self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 1)
self.assertEqual(r.stats_containers_deleted, 1)
def test_reap_container_partial_fail(self):
r = self.init_reaper({}, fakelogger=True)
self.get_fail = False
self.timeout = False
self.reap_obj_fail = False
self.amount_delete_fail = 0
self.max_delete_fail = 4
with patch('swift.account.reaper.direct_get_container',
self.fake_direct_get_container), \
patch('swift.account.reaper.direct_delete_container',
self.fake_direct_delete_container), \
patch('swift.account.reaper.AccountReaper.get_container_ring',
self.fake_container_ring), \
patch('swift.account.reaper.AccountReaper.reap_object',
self.fake_reap_object):
r.reap_container('a', 'partition', acc_nodes, 'c')
self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 4)
self.assertEqual(r.stats_containers_possibly_remaining, 1)
def test_reap_container_full_fail(self):
r = self.init_reaper({}, fakelogger=True)
self.get_fail = False
self.timeout = False
self.reap_obj_fail = False
self.amount_delete_fail = 0
self.max_delete_fail = 5
with patch('swift.account.reaper.direct_get_container',
self.fake_direct_get_container), \
patch('swift.account.reaper.direct_delete_container',
self.fake_direct_delete_container), \
patch('swift.account.reaper.AccountReaper.get_container_ring',
self.fake_container_ring), \
patch('swift.account.reaper.AccountReaper.reap_object',
self.fake_reap_object):
r.reap_container('a', 'partition', acc_nodes, 'c')
self.assertEqual(r.logger.get_increment_counts()['return_codes.4'], 5)
self.assertEqual(r.stats_containers_remaining, 1)
def test_reap_container_get_object_timeout(self):
r = self.init_reaper({}, fakelogger=True)
self.get_fail = False
self.timeout = True
self.reap_obj_fail = False
self.amount_delete_fail = 0
self.max_delete_fail = 0
with patch('swift.account.reaper.direct_get_container',
self.fake_direct_get_container), \
patch('swift.account.reaper.direct_delete_container',
self.fake_direct_delete_container), \
patch('swift.account.reaper.AccountReaper.get_container_ring',
self.fake_container_ring), \
patch('swift.account.reaper.AccountReaper.reap_object',
self.fake_reap_object):
r.reap_container('a', 'partition', acc_nodes, 'c')
self.assertTrue(r.logger.get_lines_for_level(
'error')[-1].startswith('Timeout Exception'))
@patch('swift.account.reaper.Ring',
lambda *args, **kwargs: unit.FakeRing())
def test_reap_container_non_exist_policy_index(self):
r = self.init_reaper({}, fakelogger=True)
with patch.multiple('swift.account.reaper',
direct_get_container=DEFAULT,
direct_delete_object=DEFAULT,
direct_delete_container=DEFAULT) as mocks:
headers = {'X-Backend-Storage-Policy-Index': 2}
obj_listing = [{'name': 'o'}]
def fake_get_container(*args, **kwargs):
try:
obj = obj_listing.pop(0)
except IndexError:
obj_list = []
else:
obj_list = [obj]
return headers, obj_list
mocks['direct_get_container'].side_effect = fake_get_container
r.reap_container('a', 'partition', acc_nodes, 'c')
self.assertEqual(r.logger.get_lines_for_level('error'), [
'ERROR: invalid storage policy index: 2'])
def fake_reap_container(self, *args, **kwargs):
self.called_amount += 1
self.r.stats_containers_deleted = 1
self.r.stats_objects_deleted = 1
self.r.stats_containers_remaining = 1
self.r.stats_objects_remaining = 1
self.r.stats_containers_possibly_remaining = 1
self.r.stats_objects_possibly_remaining = 1
self.r.stats_return_codes[2] = \
self.r.stats_return_codes.get(2, 0) + 1
def test_reap_account(self):
containers = ('c1', 'c2', 'c3', '')
broker = FakeAccountBroker(containers)
self.called_amount = 0
self.r = r = self.init_reaper({}, fakelogger=True)
r.start_time = time.time()
with patch('swift.account.reaper.AccountReaper.reap_container',
self.fake_reap_container), \
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring):
nodes = r.get_account_ring().get_part_nodes()
for container_shard, node in enumerate(nodes):
self.assertTrue(
r.reap_account(broker, 'partition', nodes,
container_shard=container_shard))
self.assertEqual(self.called_amount, 4)
info_lines = r.logger.get_lines_for_level('info')
self.assertEqual(len(info_lines), 10)
for start_line, stat_line in zip(*[iter(info_lines)] * 2):
self.assertEqual(start_line, 'Beginning pass on account a')
self.assertTrue(stat_line.find('1 containers deleted'))
self.assertTrue(stat_line.find('1 objects deleted'))
self.assertTrue(stat_line.find('1 containers remaining'))
self.assertTrue(stat_line.find('1 objects remaining'))
self.assertTrue(stat_line.find('1 containers possibly remaining'))
self.assertTrue(stat_line.find('1 objects possibly remaining'))
self.assertTrue(stat_line.find('return codes: 2 2xxs'))
@patch('swift.account.reaper.Ring',
lambda *args, **kwargs: unit.FakeRing())
def test_basic_reap_account(self):
self.r = reaper.AccountReaper({})
self.r.account_ring = None
self.r.get_account_ring()
self.assertEqual(self.r.account_ring.replica_count, 3)
self.assertEqual(len(self.r.account_ring.devs), 3)
def test_reap_account_no_container(self):
broker = FakeAccountBroker(tuple())
self.r = r = self.init_reaper({}, fakelogger=True)
self.called_amount = 0
r.start_time = time.time()
with patch('swift.account.reaper.AccountReaper.reap_container',
self.fake_reap_container), \
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring):
nodes = r.get_account_ring().get_part_nodes()
self.assertTrue(r.reap_account(broker, 'partition', nodes))
self.assertTrue(r.logger.get_lines_for_level(
'info')[-1].startswith('Completed pass'))
self.assertEqual(self.called_amount, 0)
def test_reap_device(self):
devices = self.prepare_data_dir()
self.called_amount = 0
conf = {'devices': devices}
r = self.init_reaper(conf)
with patch('swift.account.reaper.AccountBroker',
FakeAccountBroker), \
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring), \
patch('swift.account.reaper.AccountReaper.reap_account',
self.fake_reap_account):
r.reap_device('sda1')
self.assertEqual(self.called_amount, 1)
def test_reap_device_with_ts(self):
devices = self.prepare_data_dir(ts=True)
self.called_amount = 0
conf = {'devices': devices}
r = self.init_reaper(conf=conf)
with patch('swift.account.reaper.AccountBroker',
FakeAccountBroker), \
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring), \
patch('swift.account.reaper.AccountReaper.reap_account',
self.fake_reap_account):
r.reap_device('sda1')
self.assertEqual(self.called_amount, 0)
def test_reap_device_with_not_my_ip(self):
devices = self.prepare_data_dir()
self.called_amount = 0
conf = {'devices': devices}
r = self.init_reaper(conf, myips=['10.10.1.2'])
with patch('swift.account.reaper.AccountBroker',
FakeAccountBroker), \
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring), \
patch('swift.account.reaper.AccountReaper.reap_account',
self.fake_reap_account):
r.reap_device('sda1')
self.assertEqual(self.called_amount, 0)
def test_reap_device_with_sharding(self):
devices = self.prepare_data_dir()
conf = {'devices': devices}
r = self.init_reaper(conf, myips=['10.10.10.2'])
container_shard_used = [-1]
def fake_reap_account(*args, **kwargs):
container_shard_used[0] = kwargs.get('container_shard')
with patch('swift.account.reaper.AccountBroker',
FakeAccountBroker), \
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring), \
patch('swift.account.reaper.AccountReaper.reap_account',
fake_reap_account):
r.reap_device('sda1')
# 10.10.10.2 is second node from ring
self.assertEqual(container_shard_used[0], 1)
def test_reap_device_with_sharding_and_various_devices(self):
devices = self.prepare_data_dir(device='sda2')
conf = {'devices': devices}
r = self.init_reaper(conf)
container_shard_used = [-1]
def fake_reap_account(*args, **kwargs):
container_shard_used[0] = kwargs.get('container_shard')
with patch('swift.account.reaper.AccountBroker',
FakeAccountBroker), \
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring), \
patch('swift.account.reaper.AccountReaper.reap_account',
fake_reap_account):
r.reap_device('sda2')
# 10.10.10.2 is second node from ring
self.assertEqual(container_shard_used[0], 3)
devices = self.prepare_data_dir(device='sda3')
conf = {'devices': devices}
r = self.init_reaper(conf)
container_shard_used = [-1]
with patch('swift.account.reaper.AccountBroker',
FakeAccountBroker), \
patch('swift.account.reaper.AccountReaper.get_account_ring',
self.fake_account_ring), \
patch('swift.account.reaper.AccountReaper.reap_account',
fake_reap_account):
r.reap_device('sda3')
# 10.10.10.2 is second node from ring
self.assertEqual(container_shard_used[0], 4)
def test_reap_account_with_sharding(self):
devices = self.prepare_data_dir()
self.called_amount = 0
conf = {'devices': devices}
r = self.init_reaper(conf, myips=['10.10.10.2'])
container_reaped = [0]
def fake_list_containers_iter(self, *args):
for container in self.containers:
if container in self.containers_yielded:
continue
yield container, None, None, None
self.containers_yielded.append(container)
def fake_reap_container(self, account, account_partition,
account_nodes, container):
container_reaped[0] += 1
fake_ring = FakeRing()
with patch('swift.account.reaper.AccountBroker',
FakeAccountBroker), \
patch(
'swift.account.reaper.AccountBroker.list_containers_iter',
fake_list_containers_iter), \
patch('swift.account.reaper.AccountReaper.reap_container',
fake_reap_container):
fake_broker = FakeAccountBroker(['c', 'd', 'e', 'f', 'g'])
r.reap_account(fake_broker, 10, fake_ring.nodes, 0)
self.assertEqual(container_reaped[0], 0)
fake_broker = FakeAccountBroker(['c', 'd', 'e', 'f', 'g'])
container_reaped[0] = 0
r.reap_account(fake_broker, 10, fake_ring.nodes, 1)
self.assertEqual(container_reaped[0], 1)
container_reaped[0] = 0
fake_broker = FakeAccountBroker(['c', 'd', 'e', 'f', 'g'])
r.reap_account(fake_broker, 10, fake_ring.nodes, 2)
self.assertEqual(container_reaped[0], 0)
container_reaped[0] = 0
fake_broker = FakeAccountBroker(['c', 'd', 'e', 'f', 'g'])
r.reap_account(fake_broker, 10, fake_ring.nodes, 3)
self.assertEqual(container_reaped[0], 3)
container_reaped[0] = 0
fake_broker = FakeAccountBroker(['c', 'd', 'e', 'f', 'g'])
r.reap_account(fake_broker, 10, fake_ring.nodes, 4)
self.assertEqual(container_reaped[0], 1)
def test_run_once(self):
def prepare_data_dir():
devices_path = tempfile.mkdtemp()
# will be deleted by teardown
self.to_delete.append(devices_path)
path = os.path.join(devices_path, 'sda1', DATADIR)
os.makedirs(path)
return devices_path
def init_reaper(devices):
r = reaper.AccountReaper({'devices': devices})
return r
devices = prepare_data_dir()
r = init_reaper(devices)
with patch('swift.account.reaper.ismount', lambda x: True):
with patch(
'swift.account.reaper.AccountReaper.reap_device') as foo:
r.run_once()
self.assertEqual(foo.called, 1)
with patch('swift.account.reaper.ismount', lambda x: False):
with patch(
'swift.account.reaper.AccountReaper.reap_device') as foo:
r.run_once()
self.assertFalse(foo.called)
with patch('swift.account.reaper.AccountReaper.reap_device') as foo:
r.logger = unit.debug_logger('test-reaper')
r.devices = 'thisdeviceisbad'
r.run_once()
self.assertTrue(r.logger.get_lines_for_level(
'error')[-1].startswith('Exception in top-level account reaper'))
def test_run_forever(self):
def fake_sleep(val):
self.val = val
def fake_random():
return 1
def fake_run_once():
raise Exception('exit')
def init_reaper():
r = reaper.AccountReaper({'interval': 1})
r.run_once = fake_run_once
return r
r = init_reaper()
with patch('swift.account.reaper.sleep', fake_sleep):
with patch('swift.account.reaper.random.random', fake_random):
try:
r.run_forever()
except Exception as err:
pass
self.assertEqual(self.val, 1)
self.assertEqual(str(err), 'exit')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
vrieni/orange | Orange/orng/updateOrange.py | 6 | 24544 | #import orngOrangeFoldersQt4
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import os, re, urllib, sys
import md5, cPickle
# This is Orange Update program. It can check on the web if there are any updates available and download them.
# User can select a list of folders that he wants to update and a list of folders that he wants to ignore.
# In case that a file was locally changed and a new version of the same file is available, the program offers the user
# to update to the new file or to keep the old one.
#
defaultIcon = ['16 13 5 1', '. c #040404', '# c #808304', 'a c None', 'b c #f3f704', 'c c #f3f7f3', 'aaaaaaaaa...aaaa', 'aaaaaaaa.aaa.a.a', 'aaaaaaaaaaaaa..a',
'a...aaaaaaaa...a', '.bcb.......aaaaa', '.cbcbcbcbc.aaaaa', '.bcbcbcbcb.aaaaa', '.cbcb...........', '.bcb.#########.a', '.cb.#########.aa', '.b.#########.aaa', '..#########.aaaa', '...........aaaaa']
CONFLICT_ASK = 0
CONFLICT_OVERWRITE = 1
CONFLICT_KEEP = 2
def splitDirs(path):
dirs, filename = os.path.split(path)
listOfDirs = []
while dirs != "":
dirs, dir = os.path.split(dirs)
listOfDirs.insert(0, dir)
return listOfDirs
class OptionsDlg(QDialog):
def __init__(self, settings):
QDialog.__init__(self, None)
self.setWindowTitle("Update Options")
self.setLayout(QVBoxLayout())
self.groupBox = QGroupBox("Updating Options", self)
self.layout().addWidget(self.groupBox)
self.check1 = QCheckBox("Update scripts", self.groupBox)
self.check2 = QCheckBox("Update binary files", self.groupBox)
self.check3 = QCheckBox("Download new files", self.groupBox)
self.groupBox.setLayout(QVBoxLayout())
for c in [self.check1, self.check2, self.check3]:
self.groupBox.layout().addWidget(c)
self.groupBox2 = QGroupBox("Solving Conflicts", self)
self.groupBox2.setLayout(QVBoxLayout())
self.layout().addWidget(self.groupBox2)
label = QLabel("When your local file was edited\nand a newer version is available...", self.groupBox2)
self.groupBox2.layout().addWidget(label)
self.combo = QComboBox(self.groupBox2)
for s in ["Ask what to do", "Overwrite your local copy with new file", "Keep your local file"]:
self.combo.addItem(s)
self.groupBox2.layout().addWidget(self.combo)
self.check1.setChecked(settings["scripts"])
self.check2.setChecked(settings["binary"])
self.check3.setChecked(settings["new"])
self.combo.setCurrentIndex(settings["conflicts"])
widget = QWidget(self)
self.layout().addWidget(widget)
widget.setLayout(QHBoxLayout())
widget.layout().addStretch(1)
okButton = QPushButton('OK', widget)
widget.layout().addWidget(okButton)
## self.topLayout.addWidget(okButton)
self.connect(okButton, SIGNAL('clicked()'),self,SLOT('accept()'))
cancelButton = QPushButton('Cancel', widget)
widget.layout().addWidget(cancelButton)
## self.topLayout.addWidget(cancelButton)
self.connect(cancelButton, SIGNAL('clicked()'),self,SLOT('reject()'))
def accept(self):
self.settings = {"scripts": self.check1.isChecked(), "binary": self.check2.isChecked(), "new": self.check3.isChecked(), "conflicts": self.combo.currentIndex()}
QDialog.accept(self)
class FoldersDlg(QDialog):
def __init__(self, caption):
QDialog.__init__(self, None)
self.setLayout(QVBoxLayout())
self.groupBox = QGroupBox(self)
self.layout().addWidget(self.groupBox)
self.groupBox.setTitle(" " + caption.strip() + " ")
self.groupBox.setLayout(QVBoxLayout())
self.groupBoxLayout.setMargin(20)
self.setWindowCaption("Select Folders")
self.resize(300,100)
self.folders = []
self.checkBoxes = []
def addCategory(self, text, checked = 1, indent = 0):
widget = QWidget(self.groupBox)
self.groupBox.layout().addWidget(widget)
hboxLayout = QHBoxLayout()
widget.setLayout(hboxLayout)
if indent:
sep = QWidget(widget)
sep.setFixedSize(19, 8)
hboxLayout.addWidget(sep)
check = QCheckBox(text, widget)
hboxLayout.addWidget(check)
check.setChecked(checked)
self.checkBoxes.append(check)
self.folders.append(text)
def addLabel(self, text):
label = QLabel(text, self.groupBox)
self.groupBox.layout().addWidget(label)
def finishedAdding(self, ok = 1, cancel = 1):
widget = QWidget(self)
self.layout().addWidget(widget)
widgetLayout = QHBoxLayout(widget)
widget.setLayout(widgetLayout)
widgetLayout.addStretch(1)
if ok:
okButton = QPushButton('OK', widget)
widgetLayout.addWidget(okButton)
self.connect(okButton, SIGNAL('clicked()'),self,SLOT('accept()'))
if cancel:
cancelButton = QPushButton('Cancel', widget)
widgetLayout.addWidget(cancelButton)
self.connect(cancelButton, SIGNAL('clicked()'),self,SLOT('reject()'))
class updateOrangeDlg(QMainWindow):
def __init__(self,*args):
QMainWindow.__init__(self, *args)
self.resize(600,600)
self.setWindowTitle("Orange Update")
self.toolbar = self.addToolBar("Toolbar")
self.text = QTextEdit(self)
self.text.setReadOnly(1)
self.text.zoomIn(2)
self.setCentralWidget(self.text)
self.statusBar = QStatusBar(self)
self.setStatusBar(self.statusBar)
self.statusBar.showMessage('Ready')
import updateOrange
self.orangeDir = os.path.split(os.path.abspath(updateOrange.__file__))[0]
os.chdir(self.orangeDir) # we have to set the current dir to orange dir since we can call update also from orange canvas
self.settings = {"scripts":1, "binary":1, "new":1, "conflicts":0}
if os.path.exists("updateOrange.set"):
file = open("updateOrange.set", "r")
self.settings = cPickle.load(file)
file.close()
self.re_vLocalLine = re.compile(r'(?P<fname>.*)=(?P<version>[.0-9]*)(:?)(?P<md5>.*)')
self.re_vInternetLine = re.compile(r'(?P<fname>.*)=(?P<version>[.0-9]*)(:?)(?P<location>.*)')
self.re_widget = re.compile(r'(?P<category>.*)[/,\\].*')
self.re_documentation = re.compile(r'doc[/,\\].*')
self.downfile = os.path.join(self.orangeDir, "whatsdown.txt")
self.updateUrl = "http://orange.biolab.si/download/update/"
self.binaryUrl = "http://orange.biolab.si/download/binaries/%i%i/" % sys.version_info[:2]
self.whatsupUrl = "http://orange.biolab.si/download/whatsup.txt"
self.updateGroups = []
self.dontUpdateGroups = []
self.newGroups = []
self.downstuff = {}
# read updateGroups and dontUpdateGroups
self.addText("Welcome to the Orange update.")
try:
vf = open(self.downfile)
self.downstuff, self.updateGroups, self.dontUpdateGroups = self.readLocalVersionFile(vf.readlines(), updateGroups = 1)
vf.close()
except:
pass
self.addText("To download latest versions of files click the 'Update' button.", nobr = 0)
# create buttons
iconsDir = os.path.join(self.orangeDir, "OrangeCanvas/icons")
self.updateIcon = os.path.join(iconsDir, "update.png")
self.foldersIcon = os.path.join(iconsDir, "folders.png")
self.optionsIcon = os.path.join(iconsDir, "options.png")
if not os.path.exists(self.updateIcon): self.updateIcon = defaultIcon
if not os.path.exists(self.foldersIcon): self.foldersIcon = defaultIcon
if not os.path.exists(self.optionsIcon): self.optionsIcon = defaultIcon
def createButton(text, icon, callback, tooltip):
b = QToolButton(self.toolbar)
self.toolbar.layout().addWidget(b)
b.setIcon(icon)
b.setText(text)
self.connect(b, SIGNAL("clicked()"), callback)
b.setToolTip(tooltip)
self.toolUpdate = self.toolbar.addAction(QIcon(self.updateIcon), "Update" , self.executeUpdate)
self.toolbar.addSeparator()
self.toolFolders = self.toolbar.addAction(QIcon(self.foldersIcon), "Folders" , self.showFolders)
self.toolOptions = self.toolbar.addAction(QIcon(self.optionsIcon), "Options" , self.showOptions)
self.setWindowIcon(QIcon(self.updateIcon))
self.move((qApp.desktop().width()-self.width())/2, (qApp.desktop().height()-self.height())/2) # center the window
self.show()
# ####################################
# show the list of possible folders
def showFolders(self):
self.updateGroups = []
self.dontUpdateGroups = []
try:
vf = open(self.downfile)
self.downstuff, self.updateGroups, self.dontUpdateGroups = self.readLocalVersionFile(vf.readlines(), updateGroups = 1)
vf.close()
except:
self.addText("Failed to locate file 'whatsdown.txt'. There is no information on current versions of Orange files. By clicking 'Update files' you will download the latest versions of files.", nobr = 0)
return
groups = [(name, 1) for name in self.updateGroups] + [(name, 0) for name in self.dontUpdateGroups]
groups.sort()
groupDict = dict(groups)
dlg = FoldersDlg("Select Orange folders that you wish to update")
dlg.setWindowIcon(QIcon(self.foldersIcon))
dlg.addCategory("Orange Canvas", groupDict.get("Orange Canvas", 1))
dlg.addCategory("Documentation", groupDict.get("Documentation", 1))
dlg.addCategory("Orange Root", groupDict.get("Orange Root", 1))
dlg.addLabel("Orange Widgets:")
for (group, sel) in groups:
if group in ["Orange Canvas", "Documentation", "Orange Root"]: continue
dlg.addCategory(group, sel, indent = 1)
dlg.finishedAdding(cancel = 1)
dlg.move((qApp.desktop().width()-dlg.width())/2, (qApp.desktop().height()-400)/2) # center dlg window
res = dlg.exec_()
if res == QDialog.Accepted:
self.updateGroups = []
self.dontUpdateGroups = []
for i in range(len(dlg.checkBoxes)):
if dlg.checkBoxes[i].isChecked(): self.updateGroups.append(dlg.folders[i])
else: self.dontUpdateGroups.append(dlg.folders[i])
self.writeVersionFile()
return
def showOptions(self):
dlg = OptionsDlg(self.settings)
dlg.setWindowIcon(QIcon(self.optionsIcon))
res = dlg.exec_()
if res == QDialog.Accepted:
self.settings = dlg.settings
def readLocalVersionFile(self, data, updateGroups = 1):
versions = {}
updateGroups = []; dontUpdateGroups = []
for line in data:
if not line: continue
line = line.replace("\r", "") # replace \r in case of linux files
line = line.replace("\n", "")
if not line: continue
if line[0] == "+":
updateGroups.append(line[1:])
elif line[0] == "-":
dontUpdateGroups.append(line[1:])
else:
fnd = self.re_vLocalLine.match(line)
if fnd:
fname, version, md = fnd.group("fname", "version", "md5")
fname = fname.replace("\\", "/")
versions[fname] = ([int(x) for x in version.split(".")], md)
# add widget category if not already in updateGroups
dirs = splitDirs(fname)
if len(dirs) >= 2 and dirs[0].lower() == "orangewidgets" and dirs[1] not in updateGroups + dontUpdateGroups and dirs[1].lower() != "icons":
updateGroups.append(dirs[1])
if len(dirs) >= 1 and dirs[0].lower() == "doc" and "Documentation" not in updateGroups + dontUpdateGroups: updateGroups.append("Documentation")
if len(dirs) >= 1 and dirs[0].lower() == "orangecanvas" and "Orange Canvas" not in updateGroups + dontUpdateGroups: updateGroups.append("Orange Canvas")
if len(dirs) == 1 and "Orange Root" not in updateGroups + dontUpdateGroups: updateGroups.append("Orange Root")
return versions, updateGroups, dontUpdateGroups
def readInternetVersionFile(self, updateGroups = 1):
try:
f = urllib.urlopen(self.whatsupUrl)
except IOError:
self.addText('Unable to download current status file. Check your internet connection.')
return {}, [], []
data = f.read().splitlines()
versions = {}
updateGroups = []; dontUpdateGroups = []
for line in data:
if not line: continue
line = line.replace("\r", "") # replace \r in case of linux files
line = line.replace("\n", "")
if not line: continue
if line[0] == "+":
updateGroups.append(line[1:])
elif line[0] == "-":
dontUpdateGroups.append(line[1:])
else:
fnd = self.re_vInternetLine.match(line)
if fnd:
fname, version, location = fnd.group("fname", "version", "location")
fname = fname.replace("\\", "/")
versions[fname] = ([int(x) for x in version.split(".")], location)
# add widget category if not already in updateGroups
dirs = splitDirs(fname)
if len(dirs) >= 2 and dirs[0].lower() == "orangewidgets" and dirs[1] not in updateGroups and dirs[1].lower() != "icons":
updateGroups.append(dirs[1])
return versions, updateGroups, dontUpdateGroups
def writeVersionFile(self):
vf = open(self.downfile, "wt")
itms = self.downstuff.items()
itms.sort(lambda x,y:cmp(x[0], y[0]))
for g in self.dontUpdateGroups:
vf.write("-%s\n" % g)
for fname, (version, md) in itms:
vf.write("%s=%s:%s\n" % (fname, reduce(lambda x,y:x+"."+y, [`x` for x in version]), md))
vf.close()
def executeUpdate(self):
updatedFiles = 0
newFiles = 0
if self.settings["scripts"]:
self.addText("Reading file status from web server")
self.updateGroups = []; self.dontUpdateGroups = []; self.newGroups = []
self.downstuff = {}
upstuff, upUpdateGroups, upDontUpdateGroups = self.readInternetVersionFile(updateGroups = 0)
if upstuff == {}: return
try:
vf = open(self.downfile)
self.addText("Reading local file status")
self.downstuff, self.updateGroups, self.dontUpdateGroups = self.readLocalVersionFile(vf.readlines(), updateGroups = 1)
vf.close()
except:
res = QMessageBox.information(self, 'Update Orange', "The 'whatsdown.txt' file if missing (most likely because you downloaded Orange from CVS).\nThis file contains information about versions of your local Orange files.\n\nIf you press 'Replace Local Files' you will not replace only updated files, but will \noverwrite all your local Orange files with the latest versions from the web.\n", 'Replace Local Files', "Cancel", "", 0, 1)
if res != 0: return
itms = upstuff.items()
itms.sort(lambda x,y:cmp(x[0], y[0]))
for category in upUpdateGroups: #+ upDontUpdateGroups:
if category not in self.updateGroups + self.dontUpdateGroups:
self.newGroups.append(category)
# show dialog with new groups
if self.newGroups != []:
dlg = FoldersDlg("Select new categories you wish to download")
dlg.setWindowIcon(QIcon(self.foldersIcon))
for group in self.newGroups: dlg.addCategory(group)
dlg.finishedAdding(cancel = 0)
res = dlg.exec_()
for i in range(len(dlg.checkBoxes)):
if dlg.checkBoxes[i].isChecked():
self.updateGroups.append(dlg.folders[i])
else:
self.dontUpdateGroups.append(dlg.folders[i])
self.newGroups = []
# update new files
self.addText("Updating scripts...")
self.statusBar.showMessage("Updating scripts")
for fname, (version, location) in itms:
qApp.processEvents()
# check if it is a widget directory that we don't want to update
dirs = splitDirs(fname)
if len(dirs) >= 2 and dirs[0].lower() == "orangewidgets" and dirs[1] in self.dontUpdateGroups: continue
if len(dirs) >= 1 and dirs[0].lower() == "doc" and "Documentation" in self.dontUpdateGroups: continue
if len(dirs) >= 1 and dirs[0].lower() == "orangecanvas" and "Orange Canvas" in self.dontUpdateGroups: continue
if len(dirs) == 1 and "Orange Root" in self.dontUpdateGroups: continue
if os.path.exists(fname) and self.downstuff.has_key(fname) and self.downstuff[fname][0] < upstuff[fname][0]: # there is a newer version
updatedFiles += self.updatefile(self.updateUrl + fname, location, version, self.downstuff[fname][1], "Updating")
elif not os.path.exists(fname) or not self.downstuff.has_key(fname):
if self.settings["new"]:
updatedFiles += self.updatefile(self.updateUrl + fname, location, version, "", "Downloading new file")
else:
self.addText("Skipping new file %s" % (fname))
self.writeVersionFile()
else:
self.addText("Skipping updating scripts...")
if self.settings["binary"]:
self.addText("Updating binaries...")
updatedFiles += self.updatePyd()
else:
self.addText("Skipping updateing binaries...")
self.addText("Update finished. New files: <b>%d</b>. Updated files: <b>%d</b>\n" %(newFiles, updatedFiles))
self.statusBar.showMessage("Update finished.")
# update binary files
def updatePyd(self):
files = "orange", "corn", "statc", "orangeom", "orangene", "_orngCRS"
baseurl = "http://orange.biolab.si/download/binaries/%i%i/" % sys.version_info[:2]
repository_stamps = dict([tuple(x.split()) for x in urllib.urlopen(baseurl + "stamps_pyd.txt") if x.strip()])
updated = 0
for fle in files:
if not os.path.exists(fle+".pyd") or repository_stamps[fle+".pyd"] != md5.md5(file(fle+".pyd", "rb").read()).hexdigest().upper():
updated += self.updatefile(baseurl + fle + ".pyd", fle + ".pyd", "", "", "Updating")
return updated
# #########################################################
# get new file from the internet and overwrite the old file
# webName = complete path to the file on the web
# localName = path and name of the file on the local disk
# version = the newest file version
# md = hash value of the local file when it was downloaded from the internet - needed to compare if the user has changed the local version of the file
def updatefile(self, webName, localName, version, md, type = "Downloading"):
self.addText(type + " %s ... " % localName, addBreak = 0)
qApp.processEvents()
try:
urllib.urlretrieve(webName, localName + ".temp", self.updateDownloadStatus)
except IOError, inst:
self.addText('<font color="#FF0000">Failed</font> (%s)' % (inst[1]))
return 0
self.statusBar.showMessage("")
dname = os.path.dirname(localName)
if dname and not os.path.exists(dname):
os.makedirs(dname)
isBinaryFile = localName[-3:].lower() in ["pyd"]
if not isBinaryFile:
# read existing file
if md != "" and os.path.exists(localName):
currmd = self.computeFileMd(localName)
if currmd.hexdigest() != md: # the local file has changed
if self.settings["conflicts"] == CONFLICT_OVERWRITE:
res = 0
elif self.settings["conflicts"] == CONFLICT_KEEP:
res = 1
elif self.settings["conflicts"] == CONFLICT_ASK:
res = QMessageBox.information(self,'Update Orange',"Your local file '%s' was edited, but a newer version of this file is available on the web.\nDo you wish to overwrite local copy with newest version (a backup of current file will be created) or keep your current file?" % (os.path.split(localName)[1]), 'Overwrite with newest', 'Keep current file')
if res == 0: # overwrite
currmd = self.computeFileMd(localName+".temp")
try:
ext = ".bak"
if os.path.exists(localName + ext):
i = 1
while os.path.exists(localName + ext + str(i)): i += 1
ext = ext+str(i)
os.rename(localName, localName + ext) # create backup
except OSError, inst:
self.addText('<font color="#FF0000">Failed</font> (%s)' % (inst[1]))
self.addText('Unable to update file <font color="#FF0000">%s</font>. Please close all programs that are using it.' % (os.path.split(localName)[1]))
return 0
elif res == 1: # keep local
self.addText('<font color="#0000FF">Skipping</font>')
return 0
else:
currmd = self.computeFileMd(localName + ".temp")
try:
if os.path.exists(localName):
os.remove(localName)
os.rename(localName + ".temp", localName)
if not isBinaryFile:
self.downstuff[localName[2:]] = (version, currmd.hexdigest()) # remove "./" from localName
self.addText('<font color="#0000FF">OK</font>')
return 1
except OSError, inst:
self.addText('<font color="#FF0000">Failed</font> (%s)' % (inst[1]))
self.addText('Unable to update file <font color="#FF0000">%s</font>. Please close all programs that are using it.' % (os.path.split(localName)[1]))
return 0
# show percent of finished download
def updateDownloadStatus(self, blk_cnt, blk_size, tot_size):
self.statusBar.showMessage("Downloaded %.1f%%" % (100*min(tot_size, blk_cnt*blk_size) / (tot_size or 1)))
def computeFileMd(self, fname):
f = open(fname, "rb")
md = md5.new()
md.update(f.read())
f.close()
return md
def addText(self, text, nobr = 1, addBreak = 1):
cursor = QTextCursor(self.text.textCursor()) # clear the current text selection so that
cursor.movePosition(QTextCursor.End, QTextCursor.MoveAnchor) # the text will be appended to the end of the
self.text.setTextCursor(cursor) # existing text
if nobr: self.text.insertHtml('<nobr>' + text + '</nobr>')
else: self.text.insertHtml(text)
cursor.movePosition(QTextCursor.End, QTextCursor.MoveAnchor) # and then scroll down to the end of the text
self.text.setTextCursor(cursor)
if addBreak: self.text.insertHtml("<br>")
self.text.verticalScrollBar().setValue(self.text.verticalScrollBar().maximum())
def keyPressEvent(self, e):
if e.key() == Qt.Key_Escape:
self.close()
else: QMainWindow.keyPressEvent(self, e)
def closeEvent(self, e):
f = open("updateOrange.set", "wt")
cPickle.dump(self.settings, f)
f.close()
QMainWindow.closeEvent(self, e)
# show application dlg
if __name__ == "__main__":
app = QApplication(sys.argv)
dlg = updateOrangeDlg()
dlg.show()
app.exec_()
| gpl-3.0 |
juped/your-urbit | outside/commonmark/test/spec_tests.py | 23 | 5234 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
from difflib import unified_diff
import argparse
import re
import json
from cmark import CMark
from normalize import normalize_html
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Run cmark tests.')
parser.add_argument('--program', dest='program', nargs='?', default=None,
help='program to test')
parser.add_argument('--spec', dest='spec', nargs='?', default='spec.txt',
help='path to spec')
parser.add_argument('--pattern', dest='pattern', nargs='?',
default=None, help='limit to sections matching regex pattern')
parser.add_argument('--library-dir', dest='library_dir', nargs='?',
default=None, help='directory containing dynamic library')
parser.add_argument('--no-normalize', dest='normalize',
action='store_const', const=False, default=True,
help='do not normalize HTML')
parser.add_argument('--dump-tests', dest='dump_tests',
action='store_const', const=True, default=False,
help='dump tests in JSON format')
parser.add_argument('--debug-normalization', dest='debug_normalization',
action='store_const', const=True,
default=False, help='filter stdin through normalizer for testing')
args = parser.parse_args(sys.argv[1:])
def print_test_header(headertext, example_number, start_line, end_line):
print "Example %d (lines %d-%d) %s" % (example_number,start_line,end_line,headertext)
def do_test(test, normalize):
[retcode, actual_html, err] = cmark.to_html(test['markdown'])
if retcode == 0:
expected_html = test['html']
if normalize:
passed = normalize_html(actual_html) == normalize_html(expected_html)
else:
passed = actual_html == expected_html
if passed:
return 'pass'
else:
print_test_header(test['section'], test['example'], test['start_line'], test['end_line'])
sys.stdout.write(test['markdown'])
expected_html_lines = expected_html.splitlines(True)
actual_html_lines = actual_html.splitlines(True)
for diffline in unified_diff(expected_html_lines, actual_html_lines,
"expected HTML", "actual HTML"):
sys.stdout.write(diffline)
sys.stdout.write('\n')
return 'fail'
else:
print_test_header(test['section'], test['example'], test['start_line'], test['end_line'])
print "program returned error code %d" % retcode
print(err)
return 'error'
def get_tests(specfile):
line_number = 0
start_line = 0
end_line = 0
example_number = 0
markdown_lines = []
html_lines = []
state = 0 # 0 regular text, 1 markdown example, 2 html output
headertext = ''
tests = []
header_re = re.compile('#+ ')
with open(specfile, 'r') as specf:
for line in specf:
line_number = line_number + 1
if state == 0 and re.match(header_re, line):
headertext = header_re.sub('', line).strip()
if line.strip() == ".":
state = (state + 1) % 3
if state == 0:
example_number = example_number + 1
end_line = line_number
tests.append({
"markdown":''.join(markdown_lines).replace('→',"\t"),
"html":''.join(html_lines),
"example": example_number,
"start_line": start_line,
"end_line": end_line,
"section": headertext})
start_line = 0
markdown_lines = []
html_lines = []
elif state == 1:
if start_line == 0:
start_line = line_number - 1
markdown_lines.append(line)
elif state == 2:
html_lines.append(line)
return tests
def do_tests(cmark, tests, pattern, normalize):
passed = 0
errored = 0
failed = 0
skipped = 0
if pattern:
pattern_re = re.compile(pattern, re.IGNORECASE)
else:
pattern_re = re.compile('.')
for test in tests:
if re.search(pattern_re, test['section']):
result = do_test(test, normalize)
if result == 'pass':
passed += 1
elif result == 'fail':
failed += 1
else:
errored += 1
else:
skipped += 1
print "%d passed, %d failed, %d errored, %d skipped" % (passed, failed, errored, skipped)
return (failed == 0 and errored == 0)
if __name__ == "__main__":
if args.debug_normalization:
print normalize_html(sys.stdin.read())
exit(0)
tests = get_tests(args.spec)
if args.dump_tests:
print json.dumps(tests, ensure_ascii=False, indent=2)
exit(0)
else:
cmark = CMark(prog=args.program, library_dir=args.library_dir)
if do_tests(cmark, tests, args.pattern, args.normalize):
exit(0)
else:
exit(1)
| mit |
joeythesaint/yocto-autobuilder | lib/python2.7/site-packages/buildbot-0.8.8-py2.7.egg/buildbot/test/unit/test_changes_base.py | 8 | 2873 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from twisted.trial import unittest
from twisted.internet import defer, reactor, task
from buildbot.test.util import changesource, compat
from buildbot.changes import base
class TestPollingChangeSource(changesource.ChangeSourceMixin, unittest.TestCase):
class Subclass(base.PollingChangeSource):
pass
def setUp(self):
# patch in a Clock so we can manipulate the reactor's time
self.clock = task.Clock()
self.patch(reactor, 'callLater', self.clock.callLater)
self.patch(reactor, 'seconds', self.clock.seconds)
d = self.setUpChangeSource()
def create_changesource(_):
self.attachChangeSource(self.Subclass())
d.addCallback(create_changesource)
return d
def tearDown(self):
return self.tearDownChangeSource()
def runClockFor(self, _, secs):
self.clock.pump([1.0] * secs)
def test_loop_loops(self):
# track when poll() gets called
loops = []
self.changesource.poll = \
lambda : loops.append(self.clock.seconds())
self.changesource.pollInterval = 5
self.startChangeSource()
d = defer.Deferred()
d.addCallback(self.runClockFor, 12)
def check(_):
# note that it does *not* poll at time 0
self.assertEqual(loops, [5.0, 10.0])
d.addCallback(check)
reactor.callWhenRunning(d.callback, None)
return d
@compat.usesFlushLoggedErrors
def test_loop_exception(self):
# track when poll() gets called
loops = []
def poll():
loops.append(self.clock.seconds())
raise RuntimeError("oh noes")
self.changesource.poll = poll
self.changesource.pollInterval = 5
self.startChangeSource()
d = defer.Deferred()
d.addCallback(self.runClockFor, 12)
def check(_):
# note that it keeps looping after error
self.assertEqual(loops, [5.0, 10.0])
self.assertEqual(len(self.flushLoggedErrors(RuntimeError)), 2)
d.addCallback(check)
reactor.callWhenRunning(d.callback, None)
return d
| gpl-2.0 |
xpansa/odoomrp-wip | purchase_homologation/__init__.py | 61 | 1629 | # -*- encoding: utf-8 -*-
##############################################################################
# #
# OpenERP, Open Source Management Solution. #
# #
# @author Carlos Sánchez Cifuentes <[email protected]> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
##############################################################################
from . import models
| agpl-3.0 |
tojonmz/treeherder | treeherder/etl/push.py | 3 | 1993 | import logging
from datetime import datetime
from django.db import transaction
from treeherder.model.models import (Commit,
Push)
logger = logging.getLogger(__name__)
def store_push(repository, push_dict):
push_revision = push_dict.get('revision')
if not push_dict.get('revision'):
raise ValueError("Push must have a revision "
"associated with it!")
with transaction.atomic():
push, _ = Push.objects.update_or_create(
repository=repository,
revision=push_revision,
defaults={
'revision_hash': push_dict.get('revision_hash', push_revision),
'author': push_dict['author'],
'time': datetime.utcfromtimestamp(
push_dict['push_timestamp'])
})
for revision in push_dict['revisions']:
commit, _ = Commit.objects.update_or_create(
push=push,
revision=revision['revision'],
defaults={
'author': revision['author'],
'comments': revision['comment']
})
def store_push_data(repository, pushes):
"""
Stores push data in the treeherder database
pushes = [
{
"revision": "8afdb7debc82a8b6e0d56449dfdf916c77a7bf80",
"push_timestamp": 1378293517,
"author": "[email protected]",
"revisions": [
{
"comment": "Bug 911954 - Add forward declaration of JSScript to TraceLogging.h, r=h4writer",
"author": "John Doe <[email protected]>",
"revision": "8afdb7debc82a8b6e0d56449dfdf916c77a7bf80"
},
...
]
},
...
]
returns = {
}
"""
if not pushes:
logger.info("No new pushes to store")
return
for push in pushes:
store_push(repository, push)
| mpl-2.0 |
romain-li/edx-platform | cms/envs/dev.py | 21 | 5916 | """
This config file runs the simplest dev environment"""
# We intentionally define lots of variables that aren't used, and
# want to import all variables from base settings files
# pylint: disable=wildcard-import, unused-wildcard-import
from .common import *
from openedx.core.lib.logsettings import get_logger_config
# import settings from LMS for consistent behavior with CMS
from lms.envs.dev import (WIKI_ENABLED)
DEBUG = True
TEMPLATE_DEBUG = DEBUG
HTTPS = 'off'
LOGGING = get_logger_config(ENV_ROOT / "log",
logging_env="dev",
tracking_filename="tracking.log",
dev_env=True,
debug=True)
update_module_store_settings(
MODULESTORE,
module_store_options={
'default_class': 'xmodule.raw_module.RawDescriptor',
'fs_root': GITHUB_REPO_ROOT,
}
)
DJFS = {
'type': 'osfs',
'directory_root': 'cms/static/djpyfs',
'url_root': '/static/djpyfs'
}
# cdodge: This is the specifier for the MongoDB (using GridFS) backed static content store
# This is for static content for courseware, not system static content (e.g. javascript, css, edX branding, etc)
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'DOC_STORE_CONFIG': {
'host': 'localhost',
'db': 'xcontent',
},
# allow for additional options that can be keyed on a name, e.g. 'trashcan'
'ADDITIONAL_OPTIONS': {
'trashcan': {
'bucket': 'trash_fs'
}
}
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ENV_ROOT / "db" / "edx.db",
'ATOMIC_REQUESTS': True,
}
}
LMS_BASE = "localhost:8000"
LMS_ROOT_URL = "http://{}".format(LMS_BASE)
FEATURES['PREVIEW_LMS_BASE'] = "localhost:8000"
REPOS = {
'edx4edx': {
'branch': 'master',
'origin': '[email protected]:MITx/edx4edx.git',
},
'content-mit-6002x': {
'branch': 'master',
# 'origin': '[email protected]:MITx/6002x-fall-2012.git',
'origin': '[email protected]:MITx/content-mit-6002x.git',
},
'6.00x': {
'branch': 'master',
'origin': '[email protected]:MITx/6.00x.git',
},
'7.00x': {
'branch': 'master',
'origin': '[email protected]:MITx/7.00x.git',
},
'3.091x': {
'branch': 'master',
'origin': '[email protected]:MITx/3.091x.git',
},
}
CACHES = {
# This is the cache used for most things. Askbot will not work without a
# functioning cache -- it relies on caching to load its settings in places.
# In staging/prod envs, the sessions also live here.
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_loc_mem_cache',
'KEY_FUNCTION': 'util.memcache.safe_key',
},
# The general cache is what you get if you use our util.cache. It's used for
# things like caching the course.xml file for different A/B test groups.
# We set it to be a DummyCache to force reloading of course.xml in dev.
# In staging environments, we would grab VERSION from data uploaded by the
# push process.
'general': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
'KEY_PREFIX': 'general',
'VERSION': 4,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'mongo_metadata_inheritance': {
'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache',
'LOCATION': '/var/tmp/mongo_metadata_inheritance',
'TIMEOUT': 300,
'KEY_FUNCTION': 'util.memcache.safe_key',
},
'loc_cache': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_location_mem_cache',
},
'course_structure_cache': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'edx_course_structure_mem_cache',
},
}
# Make the keyedcache startup warnings go away
CACHE_TIMEOUT = 0
# Dummy secret key for dev
SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd'
################################ PIPELINE #################################
PIPELINE_SASS_ARGUMENTS = '--debug-info'
################################# CELERY ######################################
# By default don't use a worker, execute tasks as if they were local functions
CELERY_ALWAYS_EAGER = True
################################ DEBUG TOOLBAR #################################
INSTALLED_APPS += ('debug_toolbar', 'debug_toolbar_mongo', 'djpyfs')
MIDDLEWARE_CLASSES += ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_PANELS = (
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.profiling.ProfilingPanel',
)
# To see stacktraces for MongoDB queries, set this to True.
# Stacktraces slow down page loads drastically (for pages with lots of queries).
DEBUG_TOOLBAR_MONGO_STACKTRACES = False
# Enable URL that shows information about the status of various services
FEATURES['ENABLE_SERVICE_STATUS'] = True
############################# SEGMENT-IO ##################################
# If there's an environment variable set, grab it to turn on Segment
# Note that this is the Studio key. There is a separate key for the LMS.
import os
CMS_SEGMENT_KEY = os.environ.get('SEGMENT_KEY')
#####################################################################
# Lastly, see if the developer has any local overrides.
try:
from .private import * # pylint: disable=import-error
except ImportError:
pass
| agpl-3.0 |
tomchristie/django-rest-framework | rest_framework/renderers.py | 1 | 40037 | """
Renderers are used to serialize a response into specific media types.
They give us a generic way of being able to handle various media types
on the response, such as JSON encoded data or HTML output.
REST framework also provides an HTML renderer that renders the browsable API.
"""
import base64
from collections import OrderedDict
from urllib import parse
from django import forms
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.paginator import Page
from django.http.multipartparser import parse_header
from django.template import engines, loader
from django.urls import NoReverseMatch
from django.utils.html import mark_safe
from rest_framework import VERSION, exceptions, serializers, status
from rest_framework.compat import (
INDENT_SEPARATORS, LONG_SEPARATORS, SHORT_SEPARATORS, coreapi, coreschema,
pygments_css, yaml
)
from rest_framework.exceptions import ParseError
from rest_framework.request import is_form_media_type, override_method
from rest_framework.settings import api_settings
from rest_framework.utils import encoders, json
from rest_framework.utils.breadcrumbs import get_breadcrumbs
from rest_framework.utils.field_mapping import ClassLookupDict
def zero_as_none(value):
return None if value == 0 else value
class BaseRenderer:
"""
All renderers should extend this class, setting the `media_type`
and `format` attributes, and override the `.render()` method.
"""
media_type = None
format = None
charset = 'utf-8'
render_style = 'text'
def render(self, data, accepted_media_type=None, renderer_context=None):
raise NotImplementedError('Renderer class requires .render() to be implemented')
class JSONRenderer(BaseRenderer):
"""
Renderer which serializes to JSON.
"""
media_type = 'application/json'
format = 'json'
encoder_class = encoders.JSONEncoder
ensure_ascii = not api_settings.UNICODE_JSON
compact = api_settings.COMPACT_JSON
strict = api_settings.STRICT_JSON
# We don't set a charset because JSON is a binary encoding,
# that can be encoded as utf-8, utf-16 or utf-32.
# See: https://www.ietf.org/rfc/rfc4627.txt
# Also: http://lucumr.pocoo.org/2013/7/19/application-mimetypes-and-encodings/
charset = None
def get_indent(self, accepted_media_type, renderer_context):
if accepted_media_type:
# If the media type looks like 'application/json; indent=4',
# then pretty print the result.
# Note that we coerce `indent=0` into `indent=None`.
base_media_type, params = parse_header(accepted_media_type.encode('ascii'))
try:
return zero_as_none(max(min(int(params['indent']), 8), 0))
except (KeyError, ValueError, TypeError):
pass
# If 'indent' is provided in the context, then pretty print the result.
# E.g. If we're being called by the BrowsableAPIRenderer.
return renderer_context.get('indent', None)
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render `data` into JSON, returning a bytestring.
"""
if data is None:
return b''
renderer_context = renderer_context or {}
indent = self.get_indent(accepted_media_type, renderer_context)
if indent is None:
separators = SHORT_SEPARATORS if self.compact else LONG_SEPARATORS
else:
separators = INDENT_SEPARATORS
ret = json.dumps(
data, cls=self.encoder_class,
indent=indent, ensure_ascii=self.ensure_ascii,
allow_nan=not self.strict, separators=separators
)
# We always fully escape \u2028 and \u2029 to ensure we output JSON
# that is a strict javascript subset.
# See: http://timelessrepo.com/json-isnt-a-javascript-subset
ret = ret.replace('\u2028', '\\u2028').replace('\u2029', '\\u2029')
return ret.encode()
class TemplateHTMLRenderer(BaseRenderer):
"""
An HTML renderer for use with templates.
The data supplied to the Response object should be a dictionary that will
be used as context for the template.
The template name is determined by (in order of preference):
1. An explicit `.template_name` attribute set on the response.
2. An explicit `.template_name` attribute set on this class.
3. The return result of calling `view.get_template_names()`.
For example:
data = {'users': User.objects.all()}
return Response(data, template_name='users.html')
For pre-rendered HTML, see StaticHTMLRenderer.
"""
media_type = 'text/html'
format = 'html'
template_name = None
exception_template_names = [
'%(status_code)s.html',
'api_exception.html'
]
charset = 'utf-8'
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Renders data to HTML, using Django's standard template rendering.
The template name is determined by (in order of preference):
1. An explicit .template_name set on the response.
2. An explicit .template_name set on this class.
3. The return result of calling view.get_template_names().
"""
renderer_context = renderer_context or {}
view = renderer_context['view']
request = renderer_context['request']
response = renderer_context['response']
if response.exception:
template = self.get_exception_template(response)
else:
template_names = self.get_template_names(response, view)
template = self.resolve_template(template_names)
if hasattr(self, 'resolve_context'):
# Fallback for older versions.
context = self.resolve_context(data, request, response)
else:
context = self.get_template_context(data, renderer_context)
return template.render(context, request=request)
def resolve_template(self, template_names):
return loader.select_template(template_names)
def get_template_context(self, data, renderer_context):
response = renderer_context['response']
if response.exception:
data['status_code'] = response.status_code
return data
def get_template_names(self, response, view):
if response.template_name:
return [response.template_name]
elif self.template_name:
return [self.template_name]
elif hasattr(view, 'get_template_names'):
return view.get_template_names()
elif hasattr(view, 'template_name'):
return [view.template_name]
raise ImproperlyConfigured(
'Returned a template response with no `template_name` attribute set on either the view or response'
)
def get_exception_template(self, response):
template_names = [name % {'status_code': response.status_code}
for name in self.exception_template_names]
try:
# Try to find an appropriate error template
return self.resolve_template(template_names)
except Exception:
# Fall back to using eg '404 Not Found'
body = '%d %s' % (response.status_code, response.status_text.title())
template = engines['django'].from_string(body)
return template
# Note, subclass TemplateHTMLRenderer simply for the exception behavior
class StaticHTMLRenderer(TemplateHTMLRenderer):
"""
An HTML renderer class that simply returns pre-rendered HTML.
The data supplied to the Response object should be a string representing
the pre-rendered HTML content.
For example:
data = '<html><body>example</body></html>'
return Response(data)
For template rendered HTML, see TemplateHTMLRenderer.
"""
media_type = 'text/html'
format = 'html'
charset = 'utf-8'
def render(self, data, accepted_media_type=None, renderer_context=None):
renderer_context = renderer_context or {}
response = renderer_context.get('response')
if response and response.exception:
request = renderer_context['request']
template = self.get_exception_template(response)
if hasattr(self, 'resolve_context'):
context = self.resolve_context(data, request, response)
else:
context = self.get_template_context(data, renderer_context)
return template.render(context, request=request)
return data
class HTMLFormRenderer(BaseRenderer):
"""
Renderers serializer data into an HTML form.
If the serializer was instantiated without an object then this will
return an HTML form not bound to any object,
otherwise it will return an HTML form with the appropriate initial data
populated from the object.
Note that rendering of field and form errors is not currently supported.
"""
media_type = 'text/html'
format = 'form'
charset = 'utf-8'
template_pack = 'rest_framework/vertical/'
base_template = 'form.html'
default_style = ClassLookupDict({
serializers.Field: {
'base_template': 'input.html',
'input_type': 'text'
},
serializers.EmailField: {
'base_template': 'input.html',
'input_type': 'email'
},
serializers.URLField: {
'base_template': 'input.html',
'input_type': 'url'
},
serializers.IntegerField: {
'base_template': 'input.html',
'input_type': 'number'
},
serializers.FloatField: {
'base_template': 'input.html',
'input_type': 'number'
},
serializers.DateTimeField: {
'base_template': 'input.html',
'input_type': 'datetime-local'
},
serializers.DateField: {
'base_template': 'input.html',
'input_type': 'date'
},
serializers.TimeField: {
'base_template': 'input.html',
'input_type': 'time'
},
serializers.FileField: {
'base_template': 'input.html',
'input_type': 'file'
},
serializers.BooleanField: {
'base_template': 'checkbox.html'
},
serializers.ChoiceField: {
'base_template': 'select.html', # Also valid: 'radio.html'
},
serializers.MultipleChoiceField: {
'base_template': 'select_multiple.html', # Also valid: 'checkbox_multiple.html'
},
serializers.RelatedField: {
'base_template': 'select.html', # Also valid: 'radio.html'
},
serializers.ManyRelatedField: {
'base_template': 'select_multiple.html', # Also valid: 'checkbox_multiple.html'
},
serializers.Serializer: {
'base_template': 'fieldset.html'
},
serializers.ListSerializer: {
'base_template': 'list_fieldset.html'
},
serializers.ListField: {
'base_template': 'list_field.html'
},
serializers.DictField: {
'base_template': 'dict_field.html'
},
serializers.FilePathField: {
'base_template': 'select.html',
},
serializers.JSONField: {
'base_template': 'textarea.html',
},
})
def render_field(self, field, parent_style):
if isinstance(field._field, serializers.HiddenField):
return ''
style = self.default_style[field].copy()
style.update(field.style)
if 'template_pack' not in style:
style['template_pack'] = parent_style.get('template_pack', self.template_pack)
style['renderer'] = self
# Get a clone of the field with text-only value representation.
field = field.as_form_field()
if style.get('input_type') == 'datetime-local' and isinstance(field.value, str):
field.value = field.value.rstrip('Z')
if 'template' in style:
template_name = style['template']
else:
template_name = style['template_pack'].strip('/') + '/' + style['base_template']
template = loader.get_template(template_name)
context = {'field': field, 'style': style}
return template.render(context)
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render serializer data and return an HTML form, as a string.
"""
renderer_context = renderer_context or {}
form = data.serializer
style = renderer_context.get('style', {})
if 'template_pack' not in style:
style['template_pack'] = self.template_pack
style['renderer'] = self
template_pack = style['template_pack'].strip('/')
template_name = template_pack + '/' + self.base_template
template = loader.get_template(template_name)
context = {
'form': form,
'style': style
}
return template.render(context)
class BrowsableAPIRenderer(BaseRenderer):
"""
HTML renderer used to self-document the API.
"""
media_type = 'text/html'
format = 'api'
template = 'rest_framework/api.html'
filter_template = 'rest_framework/filters/base.html'
code_style = 'emacs'
charset = 'utf-8'
form_renderer_class = HTMLFormRenderer
def get_default_renderer(self, view):
"""
Return an instance of the first valid renderer.
(Don't use another documenting renderer.)
"""
renderers = [renderer for renderer in view.renderer_classes
if not issubclass(renderer, BrowsableAPIRenderer)]
non_template_renderers = [renderer for renderer in renderers
if not hasattr(renderer, 'get_template_names')]
if not renderers:
return None
elif non_template_renderers:
return non_template_renderers[0]()
return renderers[0]()
def get_content(self, renderer, data,
accepted_media_type, renderer_context):
"""
Get the content as if it had been rendered by the default
non-documenting renderer.
"""
if not renderer:
return '[No renderers were found]'
renderer_context['indent'] = 4
content = renderer.render(data, accepted_media_type, renderer_context)
render_style = getattr(renderer, 'render_style', 'text')
assert render_style in ['text', 'binary'], 'Expected .render_style ' \
'"text" or "binary", but got "%s"' % render_style
if render_style == 'binary':
return '[%d bytes of binary content]' % len(content)
return content.decode('utf-8') if isinstance(content, bytes) else content
def show_form_for_method(self, view, method, request, obj):
"""
Returns True if a form should be shown for this method.
"""
if method not in view.allowed_methods:
return # Not a valid method
try:
view.check_permissions(request)
if obj is not None:
view.check_object_permissions(request, obj)
except exceptions.APIException:
return False # Doesn't have permissions
return True
def _get_serializer(self, serializer_class, view_instance, request, *args, **kwargs):
kwargs['context'] = {
'request': request,
'format': self.format,
'view': view_instance
}
return serializer_class(*args, **kwargs)
def get_rendered_html_form(self, data, view, method, request):
"""
Return a string representing a rendered HTML form, possibly bound to
either the input or output data.
In the absence of the View having an associated form then return None.
"""
# See issue #2089 for refactoring this.
serializer = getattr(data, 'serializer', None)
if serializer and not getattr(serializer, 'many', False):
instance = getattr(serializer, 'instance', None)
if isinstance(instance, Page):
instance = None
else:
instance = None
# If this is valid serializer data, and the form is for the same
# HTTP method as was used in the request then use the existing
# serializer instance, rather than dynamically creating a new one.
if request.method == method and serializer is not None:
try:
kwargs = {'data': request.data}
except ParseError:
kwargs = {}
existing_serializer = serializer
else:
kwargs = {}
existing_serializer = None
with override_method(view, request, method) as request:
if not self.show_form_for_method(view, method, request, instance):
return
if method in ('DELETE', 'OPTIONS'):
return True # Don't actually need to return a form
has_serializer = getattr(view, 'get_serializer', None)
has_serializer_class = getattr(view, 'serializer_class', None)
if (
(not has_serializer and not has_serializer_class) or
not any(is_form_media_type(parser.media_type) for parser in view.parser_classes)
):
return
if existing_serializer is not None:
try:
return self.render_form_for_serializer(existing_serializer)
except TypeError:
pass
if has_serializer:
if method in ('PUT', 'PATCH'):
serializer = view.get_serializer(instance=instance, **kwargs)
else:
serializer = view.get_serializer(**kwargs)
else:
# at this point we must have a serializer_class
if method in ('PUT', 'PATCH'):
serializer = self._get_serializer(view.serializer_class, view,
request, instance=instance, **kwargs)
else:
serializer = self._get_serializer(view.serializer_class, view,
request, **kwargs)
return self.render_form_for_serializer(serializer)
def render_form_for_serializer(self, serializer):
if hasattr(serializer, 'initial_data'):
serializer.is_valid()
form_renderer = self.form_renderer_class()
return form_renderer.render(
serializer.data,
self.accepted_media_type,
{'style': {'template_pack': 'rest_framework/horizontal'}}
)
def get_raw_data_form(self, data, view, method, request):
"""
Returns a form that allows for arbitrary content types to be tunneled
via standard HTML forms.
(Which are typically application/x-www-form-urlencoded)
"""
# See issue #2089 for refactoring this.
serializer = getattr(data, 'serializer', None)
if serializer and not getattr(serializer, 'many', False):
instance = getattr(serializer, 'instance', None)
if isinstance(instance, Page):
instance = None
else:
instance = None
with override_method(view, request, method) as request:
# Check permissions
if not self.show_form_for_method(view, method, request, instance):
return
# If possible, serialize the initial content for the generic form
default_parser = view.parser_classes[0]
renderer_class = getattr(default_parser, 'renderer_class', None)
if hasattr(view, 'get_serializer') and renderer_class:
# View has a serializer defined and parser class has a
# corresponding renderer that can be used to render the data.
if method in ('PUT', 'PATCH'):
serializer = view.get_serializer(instance=instance)
else:
serializer = view.get_serializer()
# Render the raw data content
renderer = renderer_class()
accepted = self.accepted_media_type
context = self.renderer_context.copy()
context['indent'] = 4
# strip HiddenField from output
data = serializer.data.copy()
for name, field in serializer.fields.items():
if isinstance(field, serializers.HiddenField):
data.pop(name, None)
content = renderer.render(data, accepted, context)
# Renders returns bytes, but CharField expects a str.
content = content.decode()
else:
content = None
# Generate a generic form that includes a content type field,
# and a content field.
media_types = [parser.media_type for parser in view.parser_classes]
choices = [(media_type, media_type) for media_type in media_types]
initial = media_types[0]
class GenericContentForm(forms.Form):
_content_type = forms.ChoiceField(
label='Media type',
choices=choices,
initial=initial,
widget=forms.Select(attrs={'data-override': 'content-type'})
)
_content = forms.CharField(
label='Content',
widget=forms.Textarea(attrs={'data-override': 'content'}),
initial=content,
required=False
)
return GenericContentForm()
def get_name(self, view):
return view.get_view_name()
def get_description(self, view, status_code):
if status_code in (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN):
return ''
return view.get_view_description(html=True)
def get_breadcrumbs(self, request):
return get_breadcrumbs(request.path, request)
def get_extra_actions(self, view, status_code):
if (status_code in (status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN)):
return None
elif not hasattr(view, 'get_extra_action_url_map'):
return None
return view.get_extra_action_url_map()
def get_filter_form(self, data, view, request):
if not hasattr(view, 'get_queryset') or not hasattr(view, 'filter_backends'):
return
# Infer if this is a list view or not.
paginator = getattr(view, 'paginator', None)
if isinstance(data, list):
pass
elif paginator is not None and data is not None:
try:
paginator.get_results(data)
except (TypeError, KeyError):
return
elif not isinstance(data, list):
return
queryset = view.get_queryset()
elements = []
for backend in view.filter_backends:
if hasattr(backend, 'to_html'):
html = backend().to_html(request, queryset, view)
if html:
elements.append(html)
if not elements:
return
template = loader.get_template(self.filter_template)
context = {'elements': elements}
return template.render(context)
def get_context(self, data, accepted_media_type, renderer_context):
"""
Returns the context used to render.
"""
view = renderer_context['view']
request = renderer_context['request']
response = renderer_context['response']
renderer = self.get_default_renderer(view)
raw_data_post_form = self.get_raw_data_form(data, view, 'POST', request)
raw_data_put_form = self.get_raw_data_form(data, view, 'PUT', request)
raw_data_patch_form = self.get_raw_data_form(data, view, 'PATCH', request)
raw_data_put_or_patch_form = raw_data_put_form or raw_data_patch_form
response_headers = OrderedDict(sorted(response.items()))
renderer_content_type = ''
if renderer:
renderer_content_type = '%s' % renderer.media_type
if renderer.charset:
renderer_content_type += ' ;%s' % renderer.charset
response_headers['Content-Type'] = renderer_content_type
if getattr(view, 'paginator', None) and view.paginator.display_page_controls:
paginator = view.paginator
else:
paginator = None
csrf_cookie_name = settings.CSRF_COOKIE_NAME
csrf_header_name = settings.CSRF_HEADER_NAME
if csrf_header_name.startswith('HTTP_'):
csrf_header_name = csrf_header_name[5:]
csrf_header_name = csrf_header_name.replace('_', '-')
return {
'content': self.get_content(renderer, data, accepted_media_type, renderer_context),
'code_style': pygments_css(self.code_style),
'view': view,
'request': request,
'response': response,
'user': request.user,
'description': self.get_description(view, response.status_code),
'name': self.get_name(view),
'version': VERSION,
'paginator': paginator,
'breadcrumblist': self.get_breadcrumbs(request),
'allowed_methods': view.allowed_methods,
'available_formats': [renderer_cls.format for renderer_cls in view.renderer_classes],
'response_headers': response_headers,
'put_form': self.get_rendered_html_form(data, view, 'PUT', request),
'post_form': self.get_rendered_html_form(data, view, 'POST', request),
'delete_form': self.get_rendered_html_form(data, view, 'DELETE', request),
'options_form': self.get_rendered_html_form(data, view, 'OPTIONS', request),
'extra_actions': self.get_extra_actions(view, response.status_code),
'filter_form': self.get_filter_form(data, view, request),
'raw_data_put_form': raw_data_put_form,
'raw_data_post_form': raw_data_post_form,
'raw_data_patch_form': raw_data_patch_form,
'raw_data_put_or_patch_form': raw_data_put_or_patch_form,
'display_edit_forms': bool(response.status_code != 403),
'api_settings': api_settings,
'csrf_cookie_name': csrf_cookie_name,
'csrf_header_name': csrf_header_name
}
def render(self, data, accepted_media_type=None, renderer_context=None):
"""
Render the HTML for the browsable API representation.
"""
self.accepted_media_type = accepted_media_type or ''
self.renderer_context = renderer_context or {}
template = loader.get_template(self.template)
context = self.get_context(data, accepted_media_type, renderer_context)
ret = template.render(context, request=renderer_context['request'])
# Munge DELETE Response code to allow us to return content
# (Do this *after* we've rendered the template so that we include
# the normal deletion response code in the output)
response = renderer_context['response']
if response.status_code == status.HTTP_204_NO_CONTENT:
response.status_code = status.HTTP_200_OK
return ret
class AdminRenderer(BrowsableAPIRenderer):
template = 'rest_framework/admin.html'
format = 'admin'
def render(self, data, accepted_media_type=None, renderer_context=None):
self.accepted_media_type = accepted_media_type or ''
self.renderer_context = renderer_context or {}
response = renderer_context['response']
request = renderer_context['request']
view = self.renderer_context['view']
if response.status_code == status.HTTP_400_BAD_REQUEST:
# Errors still need to display the list or detail information.
# The only way we can get at that is to simulate a GET request.
self.error_form = self.get_rendered_html_form(data, view, request.method, request)
self.error_title = {'POST': 'Create', 'PUT': 'Edit'}.get(request.method, 'Errors')
with override_method(view, request, 'GET') as request:
response = view.get(request, *view.args, **view.kwargs)
data = response.data
template = loader.get_template(self.template)
context = self.get_context(data, accepted_media_type, renderer_context)
ret = template.render(context, request=renderer_context['request'])
# Creation and deletion should use redirects in the admin style.
if response.status_code == status.HTTP_201_CREATED and 'Location' in response:
response.status_code = status.HTTP_303_SEE_OTHER
response['Location'] = request.build_absolute_uri()
ret = ''
if response.status_code == status.HTTP_204_NO_CONTENT:
response.status_code = status.HTTP_303_SEE_OTHER
try:
# Attempt to get the parent breadcrumb URL.
response['Location'] = self.get_breadcrumbs(request)[-2][1]
except KeyError:
# Otherwise reload current URL to get a 'Not Found' page.
response['Location'] = request.full_path
ret = ''
return ret
def get_context(self, data, accepted_media_type, renderer_context):
"""
Render the HTML for the browsable API representation.
"""
context = super().get_context(
data, accepted_media_type, renderer_context
)
paginator = getattr(context['view'], 'paginator', None)
if paginator is not None and data is not None:
try:
results = paginator.get_results(data)
except (TypeError, KeyError):
results = data
else:
results = data
if results is None:
header = {}
style = 'detail'
elif isinstance(results, list):
header = results[0] if results else {}
style = 'list'
else:
header = results
style = 'detail'
columns = [key for key in header if key != 'url']
details = [key for key in header if key != 'url']
if isinstance(results, list) and 'view' in renderer_context:
for result in results:
url = self.get_result_url(result, context['view'])
if url is not None:
result.setdefault('url', url)
context['style'] = style
context['columns'] = columns
context['details'] = details
context['results'] = results
context['error_form'] = getattr(self, 'error_form', None)
context['error_title'] = getattr(self, 'error_title', None)
return context
def get_result_url(self, result, view):
"""
Attempt to reverse the result's detail view URL.
This only works with views that are generic-like (has `.lookup_field`)
and viewset-like (has `.basename` / `.reverse_action()`).
"""
if not hasattr(view, 'reverse_action') or \
not hasattr(view, 'lookup_field'):
return
lookup_field = view.lookup_field
lookup_url_kwarg = getattr(view, 'lookup_url_kwarg', None) or lookup_field
try:
kwargs = {lookup_url_kwarg: result[lookup_field]}
return view.reverse_action('detail', kwargs=kwargs)
except (KeyError, NoReverseMatch):
return
class DocumentationRenderer(BaseRenderer):
media_type = 'text/html'
format = 'html'
charset = 'utf-8'
template = 'rest_framework/docs/index.html'
error_template = 'rest_framework/docs/error.html'
code_style = 'emacs'
languages = ['shell', 'javascript', 'python']
def get_context(self, data, request):
return {
'document': data,
'langs': self.languages,
'lang_htmls': ["rest_framework/docs/langs/%s.html" % language for language in self.languages],
'lang_intro_htmls': ["rest_framework/docs/langs/%s-intro.html" % language for language in self.languages],
'code_style': pygments_css(self.code_style),
'request': request
}
def render(self, data, accepted_media_type=None, renderer_context=None):
if isinstance(data, coreapi.Document):
template = loader.get_template(self.template)
context = self.get_context(data, renderer_context['request'])
return template.render(context, request=renderer_context['request'])
else:
template = loader.get_template(self.error_template)
context = {
"data": data,
"request": renderer_context['request'],
"response": renderer_context['response'],
"debug": settings.DEBUG,
}
return template.render(context, request=renderer_context['request'])
class SchemaJSRenderer(BaseRenderer):
media_type = 'application/javascript'
format = 'javascript'
charset = 'utf-8'
template = 'rest_framework/schema.js'
def render(self, data, accepted_media_type=None, renderer_context=None):
codec = coreapi.codecs.CoreJSONCodec()
schema = base64.b64encode(codec.encode(data)).decode('ascii')
template = loader.get_template(self.template)
context = {'schema': mark_safe(schema)}
request = renderer_context['request']
return template.render(context, request=request)
class MultiPartRenderer(BaseRenderer):
media_type = 'multipart/form-data; boundary=BoUnDaRyStRiNg'
format = 'multipart'
charset = 'utf-8'
BOUNDARY = 'BoUnDaRyStRiNg'
def render(self, data, accepted_media_type=None, renderer_context=None):
from django.test.client import encode_multipart
if hasattr(data, 'items'):
for key, value in data.items():
assert not isinstance(value, dict), (
"Test data contained a dictionary value for key '%s', "
"but multipart uploads do not support nested data. "
"You may want to consider using format='json' in this "
"test case." % key
)
return encode_multipart(self.BOUNDARY, data)
class CoreJSONRenderer(BaseRenderer):
media_type = 'application/coreapi+json'
charset = None
format = 'corejson'
def __init__(self):
assert coreapi, 'Using CoreJSONRenderer, but `coreapi` is not installed.'
def render(self, data, media_type=None, renderer_context=None):
indent = bool(renderer_context.get('indent', 0))
codec = coreapi.codecs.CoreJSONCodec()
return codec.dump(data, indent=indent)
class _BaseOpenAPIRenderer:
def get_schema(self, instance):
CLASS_TO_TYPENAME = {
coreschema.Object: 'object',
coreschema.Array: 'array',
coreschema.Number: 'number',
coreschema.Integer: 'integer',
coreschema.String: 'string',
coreschema.Boolean: 'boolean',
}
schema = {}
if instance.__class__ in CLASS_TO_TYPENAME:
schema['type'] = CLASS_TO_TYPENAME[instance.__class__]
schema['title'] = instance.title
schema['description'] = instance.description
if hasattr(instance, 'enum'):
schema['enum'] = instance.enum
return schema
def get_parameters(self, link):
parameters = []
for field in link.fields:
if field.location not in ['path', 'query']:
continue
parameter = {
'name': field.name,
'in': field.location,
}
if field.required:
parameter['required'] = True
if field.description:
parameter['description'] = field.description
if field.schema:
parameter['schema'] = self.get_schema(field.schema)
parameters.append(parameter)
return parameters
def get_operation(self, link, name, tag):
operation_id = "%s_%s" % (tag, name) if tag else name
parameters = self.get_parameters(link)
operation = {
'operationId': operation_id,
}
if link.title:
operation['summary'] = link.title
if link.description:
operation['description'] = link.description
if parameters:
operation['parameters'] = parameters
if tag:
operation['tags'] = [tag]
return operation
def get_paths(self, document):
paths = {}
tag = None
for name, link in document.links.items():
path = parse.urlparse(link.url).path
method = link.action.lower()
paths.setdefault(path, {})
paths[path][method] = self.get_operation(link, name, tag=tag)
for tag, section in document.data.items():
for name, link in section.links.items():
path = parse.urlparse(link.url).path
method = link.action.lower()
paths.setdefault(path, {})
paths[path][method] = self.get_operation(link, name, tag=tag)
return paths
def get_structure(self, data):
return {
'openapi': '3.0.0',
'info': {
'version': '',
'title': data.title,
'description': data.description
},
'servers': [{
'url': data.url
}],
'paths': self.get_paths(data)
}
class CoreAPIOpenAPIRenderer(_BaseOpenAPIRenderer):
media_type = 'application/vnd.oai.openapi'
charset = None
format = 'openapi'
def __init__(self):
assert coreapi, 'Using CoreAPIOpenAPIRenderer, but `coreapi` is not installed.'
assert yaml, 'Using CoreAPIOpenAPIRenderer, but `pyyaml` is not installed.'
def render(self, data, media_type=None, renderer_context=None):
structure = self.get_structure(data)
return yaml.dump(structure, default_flow_style=False).encode()
class CoreAPIJSONOpenAPIRenderer(_BaseOpenAPIRenderer):
media_type = 'application/vnd.oai.openapi+json'
charset = None
format = 'openapi-json'
def __init__(self):
assert coreapi, 'Using CoreAPIJSONOpenAPIRenderer, but `coreapi` is not installed.'
def render(self, data, media_type=None, renderer_context=None):
structure = self.get_structure(data)
return json.dumps(structure, indent=4).encode('utf-8')
class OpenAPIRenderer(BaseRenderer):
media_type = 'application/vnd.oai.openapi'
charset = None
format = 'openapi'
def __init__(self):
assert yaml, 'Using OpenAPIRenderer, but `pyyaml` is not installed.'
def render(self, data, media_type=None, renderer_context=None):
# disable yaml advanced feature 'alias' for clean, portable, and readable output
class Dumper(yaml.Dumper):
def ignore_aliases(self, data):
return True
return yaml.dump(data, default_flow_style=False, sort_keys=False, Dumper=Dumper).encode('utf-8')
class JSONOpenAPIRenderer(BaseRenderer):
media_type = 'application/vnd.oai.openapi+json'
charset = None
format = 'openapi-json'
def render(self, data, media_type=None, renderer_context=None):
return json.dumps(data, indent=2).encode('utf-8')
| bsd-2-clause |
hydroshare/hydroshare | hs_core/management/commands/check_bag.py | 1 | 12581 | # -*- coding: utf-8 -*-
"""
Generate metadata and bag for a resource from Django
"""
import os
import requests
from django.conf import settings
from django.core.management.base import BaseCommand
from hs_core.models import BaseResource
from hs_core.hydroshare import hs_requests
from hs_core.hydroshare.hs_bagit import create_bag_metadata_files
from hs_core.tasks import create_bag_by_irods
from django_irods.icommands import SessionException
def check_bag(rid, options):
requests.packages.urllib3.disable_warnings()
try:
resource = BaseResource.objects.get(short_id=rid)
istorage = resource.get_irods_storage()
root_exists = istorage.exists(resource.root_path)
if root_exists:
# print status of metadata/bag system
scimeta_path = os.path.join(resource.root_path, 'data',
'resourcemetadata.xml')
scimeta_exists = istorage.exists(scimeta_path)
if scimeta_exists:
print("resource metadata {} found".format(scimeta_path))
else:
print("resource metadata {} NOT FOUND".format(scimeta_path))
resmap_path = os.path.join(resource.root_path, 'data', 'resourcemap.xml')
resmap_exists = istorage.exists(resmap_path)
if resmap_exists:
print("resource map {} found".format(resmap_path))
else:
print("resource map {} NOT FOUND".format(resmap_path))
bag_exists = istorage.exists(resource.bag_path)
if bag_exists:
print("bag {} found".format(resource.bag_path))
else:
print("bag {} NOT FOUND".format(resource.bag_path))
dirty = resource.getAVU('metadata_dirty')
print("{}.metadata_dirty is {}".format(rid, str(dirty)))
modified = resource.getAVU('bag_modified')
print("{}.bag_modified is {}".format(rid, str(modified)))
if options['reset']: # reset all data to pristine
resource.setAVU('metadata_dirty', 'true')
print("{}.metadata_dirty set to true".format(rid))
try:
istorage.delete(resource.scimeta_path)
print("{} deleted".format(resource.scimeta_path))
except SessionException as ex:
print("{} delete failed: {}"
.format(resource.scimeta_path,
ex.stderr))
try:
istorage.delete(resource.resmap_path)
print("{} deleted".format(resource.resmap_path))
except SessionException as ex:
print("{} delete failed: {}"
.format(resource.resmap_path,
ex.stderr))
resource.setAVU('bag_modified', 'true')
print("{}.bag_modified set to true".format(rid))
try:
istorage.delete(resource.bag_path)
print("{} deleted".format(resource.bag_path))
except SessionException as ex:
print("{} delete failed: {}"
.format(resource.bag_path,
ex.stderr))
if options['reset_metadata']:
resource.setAVU('metadata_dirty', 'true')
print("{}.metadata_dirty set to true".format(rid))
try:
istorage.delete(resource.scimeta_path)
print("{} deleted".format(resource.scimeta_path))
except SessionException as ex:
print("delete of {} failed: {}"
.format(resource.scimeta_path,
ex.stderr))
try:
istorage.delete(resource.resmap_path)
print("{} deleted".format(resource.resmap_path))
except SessionException as ex:
print("{} delete failed: {}"
.format(resource.resmap_path,
ex.stderr))
if options['reset_bag']:
resource.setAVU('bag_modified', 'true')
print("{}.bag_modified set to true".format(rid))
try:
istorage.delete(resource.bag_path)
print("{} deleted".format(resource.bag_path))
except SessionException as ex:
print("{} delete failed: {}"
.format(resource.bag_path,
ex.stderr))
if options['generate']: # generate usable bag
if not options['if_needed'] or dirty or not scimeta_exists or not resmap_exists:
try:
create_bag_metadata_files(resource)
except ValueError as e:
print(("{}: value error encountered: {}".format(rid, str(e))))
return
print("{} metadata generated from Django".format(rid))
resource.setAVU('metadata_dirty', 'false')
resource.setAVU('bag_modified', 'true')
print("{}.metadata_dirty set to false".format(rid))
if not options['if_needed'] or modified or not bag_exists:
create_bag_by_irods(rid)
print("{} bag generated from iRODs".format(rid))
resource.setAVU('bag_modified', 'false')
print("{}.bag_modified set to false".format(rid))
if options['generate_metadata']:
if not options['if_needed'] or dirty or not scimeta_exists or not resmap_exists:
try:
create_bag_metadata_files(resource)
except ValueError as e:
print(("{}: value error encountered: {}".format(rid, str(e))))
return
print("{}: metadata generated from Django".format(rid))
resource.setAVU('metadata_dirty', 'false')
print("{}.metadata_dirty set to false".format(rid))
resource.setAVU('bag_modified', 'true')
print("{}.bag_modified set to false".format(rid))
if options['generate_bag']:
if not options['if_needed'] or modified or not bag_exists:
create_bag_by_irods(rid)
print("{}: bag generated from iRODs".format(rid))
resource.setAVU('bag_modified', 'false')
print("{}.bag_modified set to false".format(rid))
if options['download_bag']:
if options['password']:
server = getattr(settings, 'FQDN_OR_IP', 'www.hydroshare.org')
uri = "https://{}/hsapi/resource/{}/".format(server, rid)
print("download uri is {}".format(uri))
r = hs_requests.get(uri, verify=False, stream=True,
auth=requests.auth.HTTPBasicAuth(options['login'],
options['password']))
print("download return status is {}".format(str(r.status_code)))
print("redirects:")
for thing in r.history:
print("...url: {}".format(thing.url))
filename = 'tmp/check_bag_block'
with open(filename, 'wb') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
else:
print("cannot download bag without username and password.")
if options['open_bag']:
if options['password']:
server = getattr(settings, 'FQDN_OR_IP', 'www.hydroshare.org')
uri = "https://{}/hsapi/resource/{}/".format(server, rid)
print("download uri is {}".format(uri))
r = hs_requests.get(uri, verify=False, stream=True,
auth=requests.auth.HTTPBasicAuth(options['login'],
options['password']))
print("download return status is {}".format(str(r.status_code)))
print("redirects:")
for thing in r.history:
print("...url: {}".format(thing.url))
filename = 'tmp/check_bag_block'
with open(filename, 'wb') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
break
else:
print("cannot open bag without username and password.")
else:
print("Resource with id {} does not exist in iRODS".format(rid))
except BaseResource.DoesNotExist:
print("Resource with id {} NOT FOUND in Django".format(rid))
class Command(BaseCommand):
help = "Create metadata files and bag for a resource."
def add_arguments(self, parser):
# a list of resource id's, or none to check all resources
parser.add_argument('resource_ids', nargs='*', type=str)
# Named (optional) arguments
parser.add_argument(
'--reset',
action='store_true', # True for presence, False for absence
dest='reset', # value is options['reset']
help='delete metadata and bag and start over'
)
parser.add_argument(
'--reset_metadata',
action='store_true', # True for presence, False for absence
dest='reset_metadata', # value is options['reset_metadata']
help='delete metadata files and start over'
)
parser.add_argument(
'--reset_bag',
action='store_true', # True for presence, False for absence
dest='reset_bag', # value is options['reset_bag']
help='delete bag and start over'
)
parser.add_argument(
'--generate',
action='store_true', # True for presence, False for absence
dest='generate', # value is options['generate']
help='force generation of metadata and bag'
)
parser.add_argument(
'--generate_metadata',
action='store_true', # True for presence, False for absence
dest='generate_metadata', # value is options['generate_metadata']
help='force generation of metadata and bag'
)
parser.add_argument(
'--generate_bag',
action='store_true', # True for presence, False for absence
dest='generate_bag', # value is options['generate_bag']
help='force generation of metadata and bag'
)
parser.add_argument(
'--if_needed',
action='store_true', # True for presence, False for absence
dest='if_needed', # value is options['if_needed']
help='generate only if not present'
)
parser.add_argument(
'--download_bag',
action='store_true', # True for presence, False for absence
dest='download_bag', # value is options['download_bag']
help='try downloading the bag'
)
parser.add_argument(
'--open_bag',
action='store_true', # True for presence, False for absence
dest='open_bag', # value is options['open_bag']
help='try opening the bag in http without downloading'
)
parser.add_argument(
'--login',
default='admin',
dest='login', # value is options['login']
help='HydroShare login name'
)
parser.add_argument(
'--password',
default=None,
dest='password', # value is options['password']
help='HydroShare password'
)
def handle(self, *args, **options):
if len(options['resource_ids']) > 0: # an array of resource short_id to check.
for rid in options['resource_ids']:
check_bag(rid, options)
else:
for r in BaseResource.objects.all():
check_bag(r.short_id, options)
| bsd-3-clause |
vivekananda/fbeats | django/contrib/gis/db/backends/postgis/adapter.py | 94 | 1501 | """
This object provides quoting for GEOS geometries into PostgreSQL/PostGIS.
"""
from psycopg2 import Binary
from psycopg2.extensions import ISQLQuote
class PostGISAdapter(object):
def __init__(self, geom):
"Initializes on the geometry."
# Getting the WKB (in string form, to allow easy pickling of
# the adaptor) and the SRID from the geometry.
self.ewkb = str(geom.ewkb)
self.srid = geom.srid
self._adapter = Binary(self.ewkb)
def __conform__(self, proto):
# Does the given protocol conform to what Psycopg2 expects?
if proto == ISQLQuote:
return self
else:
raise Exception('Error implementing psycopg2 protocol. Is psycopg2 installed?')
def __eq__(self, other):
if not isinstance(other, PostGISAdapter):
return False
return (self.ewkb == other.ewkb) and (self.srid == other.srid)
def __str__(self):
return self.getquoted()
def prepare(self, conn):
"""
This method allows escaping the binary in the style required by the
server's `standard_conforming_string` setting.
"""
self._adapter.prepare(conn)
def getquoted(self):
"Returns a properly quoted string for use in PostgreSQL/PostGIS."
# psycopg will figure out whether to use E'\\000' or '\000'
return 'ST_GeomFromEWKB(%s)' % self._adapter.getquoted()
def prepare_database_save(self, unused):
return self
| bsd-3-clause |
Weicong-Lin/pymo-global | android/pgs4a-0.9.6/python-install/lib/python2.7/test/test_difflib.py | 86 | 10297 | import difflib
from test.test_support import run_unittest, findfile
import unittest
import doctest
import sys
class TestWithAscii(unittest.TestCase):
def test_one_insert(self):
sm = difflib.SequenceMatcher(None, 'b' * 100, 'a' + 'b' * 100)
self.assertAlmostEqual(sm.ratio(), 0.995, places=3)
self.assertEqual(list(sm.get_opcodes()),
[ ('insert', 0, 0, 0, 1),
('equal', 0, 100, 1, 101)])
sm = difflib.SequenceMatcher(None, 'b' * 100, 'b' * 50 + 'a' + 'b' * 50)
self.assertAlmostEqual(sm.ratio(), 0.995, places=3)
self.assertEqual(list(sm.get_opcodes()),
[ ('equal', 0, 50, 0, 50),
('insert', 50, 50, 50, 51),
('equal', 50, 100, 51, 101)])
def test_one_delete(self):
sm = difflib.SequenceMatcher(None, 'a' * 40 + 'c' + 'b' * 40, 'a' * 40 + 'b' * 40)
self.assertAlmostEqual(sm.ratio(), 0.994, places=3)
self.assertEqual(list(sm.get_opcodes()),
[ ('equal', 0, 40, 0, 40),
('delete', 40, 41, 40, 40),
('equal', 41, 81, 40, 80)])
class TestAutojunk(unittest.TestCase):
"""Tests for the autojunk parameter added in 2.7"""
def test_one_insert_homogenous_sequence(self):
# By default autojunk=True and the heuristic kicks in for a sequence
# of length 200+
seq1 = 'b' * 200
seq2 = 'a' + 'b' * 200
sm = difflib.SequenceMatcher(None, seq1, seq2)
self.assertAlmostEqual(sm.ratio(), 0, places=3)
# Now turn the heuristic off
sm = difflib.SequenceMatcher(None, seq1, seq2, autojunk=False)
self.assertAlmostEqual(sm.ratio(), 0.9975, places=3)
class TestSFbugs(unittest.TestCase):
def test_ratio_for_null_seqn(self):
# Check clearing of SF bug 763023
s = difflib.SequenceMatcher(None, [], [])
self.assertEqual(s.ratio(), 1)
self.assertEqual(s.quick_ratio(), 1)
self.assertEqual(s.real_quick_ratio(), 1)
def test_comparing_empty_lists(self):
# Check fix for bug #979794
group_gen = difflib.SequenceMatcher(None, [], []).get_grouped_opcodes()
self.assertRaises(StopIteration, group_gen.next)
diff_gen = difflib.unified_diff([], [])
self.assertRaises(StopIteration, diff_gen.next)
def test_added_tab_hint(self):
# Check fix for bug #1488943
diff = list(difflib.Differ().compare(["\tI am a buggy"],["\t\tI am a bug"]))
self.assertEqual("- \tI am a buggy", diff[0])
self.assertEqual("? --\n", diff[1])
self.assertEqual("+ \t\tI am a bug", diff[2])
self.assertEqual("? +\n", diff[3])
patch914575_from1 = """
1. Beautiful is beTTer than ugly.
2. Explicit is better than implicit.
3. Simple is better than complex.
4. Complex is better than complicated.
"""
patch914575_to1 = """
1. Beautiful is better than ugly.
3. Simple is better than complex.
4. Complicated is better than complex.
5. Flat is better than nested.
"""
patch914575_from2 = """
\t\tLine 1: preceeded by from:[tt] to:[ssss]
\t\tLine 2: preceeded by from:[sstt] to:[sssst]
\t \tLine 3: preceeded by from:[sstst] to:[ssssss]
Line 4: \thas from:[sst] to:[sss] after :
Line 5: has from:[t] to:[ss] at end\t
"""
patch914575_to2 = """
Line 1: preceeded by from:[tt] to:[ssss]
\tLine 2: preceeded by from:[sstt] to:[sssst]
Line 3: preceeded by from:[sstst] to:[ssssss]
Line 4: has from:[sst] to:[sss] after :
Line 5: has from:[t] to:[ss] at end
"""
patch914575_from3 = """line 0
1234567890123456789012345689012345
line 1
line 2
line 3
line 4 changed
line 5 changed
line 6 changed
line 7
line 8 subtracted
line 9
1234567890123456789012345689012345
short line
just fits in!!
just fits in two lines yup!!
the end"""
patch914575_to3 = """line 0
1234567890123456789012345689012345
line 1
line 2 added
line 3
line 4 chanGEd
line 5a chanGed
line 6a changEd
line 7
line 8
line 9
1234567890
another long line that needs to be wrapped
just fitS in!!
just fits in two lineS yup!!
the end"""
class TestSFpatches(unittest.TestCase):
def test_html_diff(self):
# Check SF patch 914575 for generating HTML differences
f1a = ((patch914575_from1 + '123\n'*10)*3)
t1a = (patch914575_to1 + '123\n'*10)*3
f1b = '456\n'*10 + f1a
t1b = '456\n'*10 + t1a
f1a = f1a.splitlines()
t1a = t1a.splitlines()
f1b = f1b.splitlines()
t1b = t1b.splitlines()
f2 = patch914575_from2.splitlines()
t2 = patch914575_to2.splitlines()
f3 = patch914575_from3
t3 = patch914575_to3
i = difflib.HtmlDiff()
j = difflib.HtmlDiff(tabsize=2)
k = difflib.HtmlDiff(wrapcolumn=14)
full = i.make_file(f1a,t1a,'from','to',context=False,numlines=5)
tables = '\n'.join(
[
'<h2>Context (first diff within numlines=5(default))</h2>',
i.make_table(f1a,t1a,'from','to',context=True),
'<h2>Context (first diff after numlines=5(default))</h2>',
i.make_table(f1b,t1b,'from','to',context=True),
'<h2>Context (numlines=6)</h2>',
i.make_table(f1a,t1a,'from','to',context=True,numlines=6),
'<h2>Context (numlines=0)</h2>',
i.make_table(f1a,t1a,'from','to',context=True,numlines=0),
'<h2>Same Context</h2>',
i.make_table(f1a,f1a,'from','to',context=True),
'<h2>Same Full</h2>',
i.make_table(f1a,f1a,'from','to',context=False),
'<h2>Empty Context</h2>',
i.make_table([],[],'from','to',context=True),
'<h2>Empty Full</h2>',
i.make_table([],[],'from','to',context=False),
'<h2>tabsize=2</h2>',
j.make_table(f2,t2),
'<h2>tabsize=default</h2>',
i.make_table(f2,t2),
'<h2>Context (wrapcolumn=14,numlines=0)</h2>',
k.make_table(f3.splitlines(),t3.splitlines(),context=True,numlines=0),
'<h2>wrapcolumn=14,splitlines()</h2>',
k.make_table(f3.splitlines(),t3.splitlines()),
'<h2>wrapcolumn=14,splitlines(True)</h2>',
k.make_table(f3.splitlines(True),t3.splitlines(True)),
])
actual = full.replace('</body>','\n%s\n</body>' % tables)
# temporarily uncomment next two lines to baseline this test
#with open('test_difflib_expect.html','w') as fp:
# fp.write(actual)
with open(findfile('test_difflib_expect.html')) as fp:
self.assertEqual(actual, fp.read())
def test_recursion_limit(self):
# Check if the problem described in patch #1413711 exists.
limit = sys.getrecursionlimit()
old = [(i%2 and "K:%d" or "V:A:%d") % i for i in range(limit*2)]
new = [(i%2 and "K:%d" or "V:B:%d") % i for i in range(limit*2)]
difflib.SequenceMatcher(None, old, new).get_opcodes()
class TestOutputFormat(unittest.TestCase):
def test_tab_delimiter(self):
args = ['one', 'two', 'Original', 'Current',
'2005-01-26 23:30:50', '2010-04-02 10:20:52']
ud = difflib.unified_diff(*args, lineterm='')
self.assertEqual(list(ud)[0:2], [
"--- Original\t2005-01-26 23:30:50",
"+++ Current\t2010-04-02 10:20:52"])
cd = difflib.context_diff(*args, lineterm='')
self.assertEqual(list(cd)[0:2], [
"*** Original\t2005-01-26 23:30:50",
"--- Current\t2010-04-02 10:20:52"])
def test_no_trailing_tab_on_empty_filedate(self):
args = ['one', 'two', 'Original', 'Current']
ud = difflib.unified_diff(*args, lineterm='')
self.assertEqual(list(ud)[0:2], ["--- Original", "+++ Current"])
cd = difflib.context_diff(*args, lineterm='')
self.assertEqual(list(cd)[0:2], ["*** Original", "--- Current"])
def test_range_format_unified(self):
# Per the diff spec at http://www.unix.org/single_unix_specification/
spec = '''\
Each <range> field shall be of the form:
%1d", <beginning line number> if the range contains exactly one line,
and:
"%1d,%1d", <beginning line number>, <number of lines> otherwise.
If a range is empty, its beginning line number shall be the number of
the line just before the range, or 0 if the empty range starts the file.
'''
fmt = difflib._format_range_unified
self.assertEqual(fmt(3,3), '3,0')
self.assertEqual(fmt(3,4), '4')
self.assertEqual(fmt(3,5), '4,2')
self.assertEqual(fmt(3,6), '4,3')
self.assertEqual(fmt(0,0), '0,0')
def test_range_format_context(self):
# Per the diff spec at http://www.unix.org/single_unix_specification/
spec = '''\
The range of lines in file1 shall be written in the following format
if the range contains two or more lines:
"*** %d,%d ****\n", <beginning line number>, <ending line number>
and the following format otherwise:
"*** %d ****\n", <ending line number>
The ending line number of an empty range shall be the number of the preceding line,
or 0 if the range is at the start of the file.
Next, the range of lines in file2 shall be written in the following format
if the range contains two or more lines:
"--- %d,%d ----\n", <beginning line number>, <ending line number>
and the following format otherwise:
"--- %d ----\n", <ending line number>
'''
fmt = difflib._format_range_context
self.assertEqual(fmt(3,3), '3')
self.assertEqual(fmt(3,4), '4')
self.assertEqual(fmt(3,5), '4,5')
self.assertEqual(fmt(3,6), '4,6')
self.assertEqual(fmt(0,0), '0')
def test_main():
difflib.HtmlDiff._default_prefix = 0
Doctests = doctest.DocTestSuite(difflib)
run_unittest(
TestWithAscii, TestAutojunk, TestSFpatches, TestSFbugs,
TestOutputFormat, Doctests)
if __name__ == '__main__':
test_main()
| mit |
mdietrichc2c/OCB | addons/l10n_mx/__openerp__.py | 379 | 2559 | # -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
# All Rights Reserved
###############Credits######################################################
# Coded by: Alejandro Negrin [email protected],
# Planified by: Alejandro Negrin, Humberto Arocha, Moises Lopez
# Finance by: Vauxoo.
# Audited by: Humberto Arocha ([email protected]) y Moises Lopez ([email protected])
#############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
{
"name" : "Mexico - Accounting",
"version" : "2.0",
"author" : "Vauxoo",
"category" : "Localization/Account Charts",
"description": """
Minimal accounting configuration for Mexico.
============================================
This Chart of account is a minimal proposal to be able to use OoB the
accounting feature of Openerp.
This doesn't pretend be all the localization for MX it is just the minimal
data required to start from 0 in mexican localization.
This modules and its content is updated frequently by openerp-mexico team.
With this module you will have:
- Minimal chart of account tested in production eviroments.
- Minimal chart of taxes, to comply with SAT_ requirements.
.. SAT: http://www.sat.gob.mx/
""",
"depends" : ["account",
"base_vat",
"account_chart",
],
"demo_xml" : [],
"data" : ["data/account_tax_code.xml",
"data/account_chart.xml",
"data/account_tax.xml",
"data/l10n_chart_mx_wizard.xml"],
"active": False,
"installable": True,
"certificate": False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kuiwei/kuiwei | common/lib/capa/capa/tests/response_xml_factory.py | 47 | 34688 | from lxml import etree
from abc import ABCMeta, abstractmethod
class ResponseXMLFactory(object):
""" Abstract base class for capa response XML factories.
Subclasses override create_response_element and
create_input_element to produce XML of particular response types"""
__metaclass__ = ABCMeta
@abstractmethod
def create_response_element(self, **kwargs):
""" Subclasses override to return an etree element
representing the capa response XML
(e.g. <numericalresponse>).
The tree should NOT contain any input elements
(such as <textline />) as these will be added later."""
return None
@abstractmethod
def create_input_element(self, **kwargs):
""" Subclasses override this to return an etree element
representing the capa input XML (such as <textline />)"""
return None
def build_xml(self, **kwargs):
""" Construct an XML string for a capa response
based on **kwargs.
**kwargs is a dictionary that will be passed
to create_response_element() and create_input_element().
See the subclasses below for other keyword arguments
you can specify.
For all response types, **kwargs can contain:
*question_text*: The text of the question to display,
wrapped in <p> tags.
*explanation_text*: The detailed explanation that will
be shown if the user answers incorrectly.
*script*: The embedded Python script (a string)
*num_responses*: The number of responses to create [DEFAULT: 1]
*num_inputs*: The number of input elements
to create [DEFAULT: 1]
Returns a string representation of the XML tree.
"""
# Retrieve keyward arguments
question_text = kwargs.get('question_text', '')
explanation_text = kwargs.get('explanation_text', '')
script = kwargs.get('script', None)
num_responses = kwargs.get('num_responses', 1)
num_inputs = kwargs.get('num_inputs', 1)
# The root is <problem>
root = etree.Element("problem")
# Add a script if there is one
if script:
script_element = etree.SubElement(root, "script")
script_element.set("type", "loncapa/python")
script_element.text = str(script)
# The problem has a child <p> with question text
question = etree.SubElement(root, "p")
question.text = question_text
# Add the response(s)
for i in range(0, int(num_responses)):
response_element = self.create_response_element(**kwargs)
root.append(response_element)
# Add input elements
for j in range(0, int(num_inputs)):
input_element = self.create_input_element(**kwargs)
if not (None == input_element):
response_element.append(input_element)
# The problem has an explanation of the solution
if explanation_text:
explanation = etree.SubElement(root, "solution")
explanation_div = etree.SubElement(explanation, "div")
explanation_div.set("class", "detailed-solution")
explanation_div.text = explanation_text
return etree.tostring(root)
@staticmethod
def textline_input_xml(**kwargs):
""" Create a <textline/> XML element
Uses **kwargs:
*math_display*: If True, then includes a MathJax display of user input
*size*: An integer representing the width of the text line
"""
math_display = kwargs.get('math_display', False)
size = kwargs.get('size', None)
input_element = etree.Element('textline')
if math_display:
input_element.set('math', '1')
if size:
input_element.set('size', str(size))
return input_element
@staticmethod
def choicegroup_input_xml(**kwargs):
""" Create a <choicegroup> XML element
Uses **kwargs:
*choice_type*: Can be "checkbox", "radio", or "multiple"
*choices*: List of True/False values indicating whether
a particular choice is correct or not.
Users must choose *all* correct options in order
to be marked correct.
DEFAULT: [True]
*choice_names": List of strings identifying the choices.
If specified, you must ensure that
len(choice_names) == len(choices)
"""
# Names of group elements
group_element_names = {'checkbox': 'checkboxgroup',
'radio': 'radiogroup',
'multiple': 'choicegroup'}
# Retrieve **kwargs
choices = kwargs.get('choices', [True])
choice_type = kwargs.get('choice_type', 'multiple')
choice_names = kwargs.get('choice_names', [None] * len(choices))
# Create the <choicegroup>, <checkboxgroup>, or <radiogroup> element
assert(choice_type in group_element_names)
group_element = etree.Element(group_element_names[choice_type])
# Create the <choice> elements
for (correct_val, name) in zip(choices, choice_names):
choice_element = etree.SubElement(group_element, "choice")
choice_element.set("correct", "true" if correct_val else "false")
# Add a name identifying the choice, if one exists
# For simplicity, we use the same string as both the
# name attribute and the text of the element
if name:
choice_element.text = str(name)
choice_element.set("name", str(name))
return group_element
class NumericalResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <numericalresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <numericalresponse> XML element.
Uses **kwarg keys:
*answer*: The correct answer (e.g. "5")
*tolerance*: The tolerance within which a response
is considered correct. Can be a decimal (e.g. "0.01")
or percentage (e.g. "2%")
"""
answer = kwargs.get('answer', None)
tolerance = kwargs.get('tolerance', None)
response_element = etree.Element('numericalresponse')
if answer:
if isinstance(answer, float):
response_element.set('answer', repr(answer))
else:
response_element.set('answer', str(answer))
if tolerance:
responseparam_element = etree.SubElement(response_element, 'responseparam')
responseparam_element.set('type', 'tolerance')
responseparam_element.set('default', str(tolerance))
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class CustomResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <customresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <customresponse> XML element.
Uses **kwargs:
*cfn*: the Python code to run. Can be inline code,
or the name of a function defined in earlier <script> tags.
Should have the form: cfn(expect, answer_given, student_answers)
where expect is a value (see below),
answer_given is a single value (for 1 input)
or a list of values (for multiple inputs),
and student_answers is a dict of answers by input ID.
*expect*: The value passed to the function cfn
*answer*: Inline script that calculates the answer
"""
# Retrieve **kwargs
cfn = kwargs.get('cfn', None)
expect = kwargs.get('expect', None)
answer = kwargs.get('answer', None)
options = kwargs.get('options', None)
cfn_extra_args = kwargs.get('cfn_extra_args', None)
# Create the response element
response_element = etree.Element("customresponse")
if cfn:
response_element.set('cfn', str(cfn))
if expect:
response_element.set('expect', str(expect))
if answer:
answer_element = etree.SubElement(response_element, "answer")
answer_element.text = str(answer)
if options:
response_element.set('options', str(options))
if cfn_extra_args:
response_element.set('cfn_extra_args', str(cfn_extra_args))
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class SymbolicResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <symbolicresponse> XML trees """
def create_response_element(self, **kwargs):
cfn = kwargs.get('cfn', None)
answer = kwargs.get('answer', None)
options = kwargs.get('options', None)
response_element = etree.Element("symbolicresponse")
if cfn:
response_element.set('cfn', str(cfn))
if answer:
response_element.set('answer', str(answer))
if options:
response_element.set('options', str(options))
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class SchematicResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <schematicresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create the <schematicresponse> XML element.
Uses *kwargs*:
*answer*: The Python script used to evaluate the answer.
"""
answer_script = kwargs.get('answer', None)
# Create the <schematicresponse> element
response_element = etree.Element("schematicresponse")
# Insert the <answer> script if one is provided
if answer_script:
answer_element = etree.SubElement(response_element, "answer")
answer_element.set("type", "loncapa/python")
answer_element.text = str(answer_script)
return response_element
def create_input_element(self, **kwargs):
""" Create the <schematic> XML element.
Although <schematic> can have several attributes,
(*height*, *width*, *parts*, *analyses*, *submit_analysis*, and *initial_value*),
none of them are used in the capa module.
For testing, we create a bare-bones version of <schematic>."""
return etree.Element("schematic")
class CodeResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <coderesponse> XML trees """
def build_xml(self, **kwargs):
# Since we are providing an <answer> tag,
# we should override the default behavior
# of including a <solution> tag as well
kwargs['explanation_text'] = None
return super(CodeResponseXMLFactory, self).build_xml(**kwargs)
def create_response_element(self, **kwargs):
"""
Create a <coderesponse> XML element.
Uses **kwargs:
*initial_display*: The code that initially appears in the textbox
[DEFAULT: "Enter code here"]
*answer_display*: The answer to display to the student
[DEFAULT: "This is the correct answer!"]
*grader_payload*: A JSON-encoded string sent to the grader
[DEFAULT: empty dict string]
*allowed_files*: A space-separated string of file names.
[DEFAULT: None]
*required_files*: A space-separated string of file names.
[DEFAULT: None]
"""
# Get **kwargs
initial_display = kwargs.get("initial_display", "Enter code here")
answer_display = kwargs.get("answer_display", "This is the correct answer!")
grader_payload = kwargs.get("grader_payload", '{}')
allowed_files = kwargs.get("allowed_files", None)
required_files = kwargs.get("required_files", None)
# Create the <coderesponse> element
response_element = etree.Element("coderesponse")
# If files are involved, create the <filesubmission> element.
has_files = allowed_files or required_files
if has_files:
filesubmission_element = etree.SubElement(response_element, "filesubmission")
if allowed_files:
filesubmission_element.set("allowed_files", allowed_files)
if required_files:
filesubmission_element.set("required_files", required_files)
# Create the <codeparam> element.
codeparam_element = etree.SubElement(response_element, "codeparam")
# Set the initial display text
initial_element = etree.SubElement(codeparam_element, "initial_display")
initial_element.text = str(initial_display)
# Set the answer display text
answer_element = etree.SubElement(codeparam_element, "answer_display")
answer_element.text = str(answer_display)
# Set the grader payload string
grader_element = etree.SubElement(codeparam_element, "grader_payload")
grader_element.text = str(grader_payload)
# Create the input within the response
if not has_files:
input_element = etree.SubElement(response_element, "textbox")
input_element.set("mode", "python")
return response_element
def create_input_element(self, **kwargs):
# Since we create this in create_response_element(),
# return None here
return None
class ChoiceResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <choiceresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <choiceresponse> element """
return etree.Element("choiceresponse")
def create_input_element(self, **kwargs):
""" Create a <checkboxgroup> element."""
return ResponseXMLFactory.choicegroup_input_xml(**kwargs)
class FormulaResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <formularesponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <formularesponse> element.
*sample_dict*: A dictionary of the form:
{ VARIABLE_NAME: (MIN, MAX), ....}
This specifies the range within which
to numerically sample each variable to check
student answers.
[REQUIRED]
*num_samples*: The number of times to sample the student's answer
to numerically compare it to the correct answer.
*tolerance*: The tolerance within which answers will be accepted
[DEFAULT: 0.01]
*answer*: The answer to the problem. Can be a formula string
or a Python variable defined in a script
(e.g. "$calculated_answer" for a Python variable
called calculated_answer)
[REQUIRED]
*hints*: List of (hint_prompt, hint_name, hint_text) tuples
Where *hint_prompt* is the formula for which we show the hint,
*hint_name* is an internal identifier for the hint,
and *hint_text* is the text we show for the hint.
"""
# Retrieve kwargs
sample_dict = kwargs.get("sample_dict", None)
num_samples = kwargs.get("num_samples", None)
tolerance = kwargs.get("tolerance", 0.01)
answer = kwargs.get("answer", None)
hint_list = kwargs.get("hints", None)
assert(answer)
assert(sample_dict and num_samples)
# Create the <formularesponse> element
response_element = etree.Element("formularesponse")
# Set the sample information
sample_str = self._sample_str(sample_dict, num_samples, tolerance)
response_element.set("samples", sample_str)
# Set the tolerance
responseparam_element = etree.SubElement(response_element, "responseparam")
responseparam_element.set("type", "tolerance")
responseparam_element.set("default", str(tolerance))
# Set the answer
response_element.set("answer", str(answer))
# Include hints, if specified
if hint_list:
hintgroup_element = etree.SubElement(response_element, "hintgroup")
for (hint_prompt, hint_name, hint_text) in hint_list:
# For each hint, create a <formulahint> element
formulahint_element = etree.SubElement(hintgroup_element, "formulahint")
# We could sample a different range, but for simplicity,
# we use the same sample string for the hints
# that we used previously.
formulahint_element.set("samples", sample_str)
formulahint_element.set("answer", str(hint_prompt))
formulahint_element.set("name", str(hint_name))
# For each hint, create a <hintpart> element
# corresponding to the <formulahint>
hintpart_element = etree.SubElement(hintgroup_element, "hintpart")
hintpart_element.set("on", str(hint_name))
text_element = etree.SubElement(hintpart_element, "text")
text_element.text = str(hint_text)
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
def _sample_str(self, sample_dict, num_samples, tolerance):
# Loncapa uses a special format for sample strings:
# "x,y,z@4,5,3:10,12,8#4" means plug in values for (x,y,z)
# from within the box defined by points (4,5,3) and (10,12,8)
# The "#4" means to repeat 4 times.
variables = [str(v) for v in sample_dict.keys()]
low_range_vals = [str(f[0]) for f in sample_dict.values()]
high_range_vals = [str(f[1]) for f in sample_dict.values()]
sample_str = (",".join(sample_dict.keys()) + "@" +
",".join(low_range_vals) + ":" +
",".join(high_range_vals) +
"#" + str(num_samples))
return sample_str
class ImageResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <imageresponse> XML """
def create_response_element(self, **kwargs):
""" Create the <imageresponse> element."""
return etree.Element("imageresponse")
def create_input_element(self, **kwargs):
""" Create the <imageinput> element.
Uses **kwargs:
*src*: URL for the image file [DEFAULT: "/static/image.jpg"]
*width*: Width of the image [DEFAULT: 100]
*height*: Height of the image [DEFAULT: 100]
*rectangle*: String representing the rectangles the user should select.
Take the form "(x1,y1)-(x2,y2)", where the two (x,y)
tuples define the corners of the rectangle.
Can include multiple rectangles separated by a semicolon, e.g.
"(490,11)-(556,98);(242,202)-(296,276)"
*regions*: String representing the regions a user can select
Take the form "[ [[x1,y1], [x2,y2], [x3,y3]],
[[x1,y1], [x2,y2], [x3,y3]] ]"
(Defines two regions, each with 3 points)
REQUIRED: Either *rectangle* or *region* (or both)
"""
# Get the **kwargs
src = kwargs.get("src", "/static/image.jpg")
width = kwargs.get("width", 100)
height = kwargs.get("height", 100)
rectangle = kwargs.get('rectangle', None)
regions = kwargs.get('regions', None)
assert(rectangle or regions)
# Create the <imageinput> element
input_element = etree.Element("imageinput")
input_element.set("src", str(src))
input_element.set("width", str(width))
input_element.set("height", str(height))
if rectangle:
input_element.set("rectangle", rectangle)
if regions:
input_element.set("regions", regions)
return input_element
class JavascriptResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <javascriptresponse> XML """
def create_response_element(self, **kwargs):
""" Create the <javascriptresponse> element.
Uses **kwargs:
*generator_src*: Name of the JS file to generate the problem.
*grader_src*: Name of the JS file to grade the problem.
*display_class*: Name of the class used to display the problem
*display_src*: Name of the JS file used to display the problem
*param_dict*: Dictionary of parameters to pass to the JS
"""
# Get **kwargs
generator_src = kwargs.get("generator_src", None)
grader_src = kwargs.get("grader_src", None)
display_class = kwargs.get("display_class", None)
display_src = kwargs.get("display_src", None)
param_dict = kwargs.get("param_dict", {})
# Both display_src and display_class given,
# or neither given
assert((display_src and display_class) or
(not display_src and not display_class))
# Create the <javascriptresponse> element
response_element = etree.Element("javascriptresponse")
if generator_src:
generator_element = etree.SubElement(response_element, "generator")
generator_element.set("src", str(generator_src))
if grader_src:
grader_element = etree.SubElement(response_element, "grader")
grader_element.set("src", str(grader_src))
if display_class and display_src:
display_element = etree.SubElement(response_element, "display")
display_element.set("class", str(display_class))
display_element.set("src", str(display_src))
for (param_name, param_val) in param_dict.items():
responseparam_element = etree.SubElement(response_element, "responseparam")
responseparam_element.set("name", str(param_name))
responseparam_element.set("value", str(param_val))
return response_element
def create_input_element(self, **kwargs):
""" Create the <javascriptinput> element """
return etree.Element("javascriptinput")
class MultipleChoiceResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <multiplechoiceresponse> XML """
def create_response_element(self, **kwargs):
""" Create the <multiplechoiceresponse> element"""
return etree.Element('multiplechoiceresponse')
def create_input_element(self, **kwargs):
""" Create the <choicegroup> element"""
kwargs['choice_type'] = 'multiple'
return ResponseXMLFactory.choicegroup_input_xml(**kwargs)
class TrueFalseResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <truefalseresponse> XML """
def create_response_element(self, **kwargs):
""" Create the <truefalseresponse> element"""
return etree.Element('truefalseresponse')
def create_input_element(self, **kwargs):
""" Create the <choicegroup> element"""
kwargs['choice_type'] = 'multiple'
return ResponseXMLFactory.choicegroup_input_xml(**kwargs)
class OptionResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <optionresponse> XML"""
def create_response_element(self, **kwargs):
""" Create the <optionresponse> element"""
return etree.Element("optionresponse")
def create_input_element(self, **kwargs):
""" Create the <optioninput> element.
Uses **kwargs:
*options*: a list of possible options the user can choose from [REQUIRED]
You must specify at least 2 options.
*correct_option*: the correct choice from the list of options [REQUIRED]
"""
options_list = kwargs.get('options', None)
correct_option = kwargs.get('correct_option', None)
assert(options_list and correct_option)
assert(len(options_list) > 1)
assert(correct_option in options_list)
# Create the <optioninput> element
optioninput_element = etree.Element("optioninput")
# Set the "options" attribute
# Format: "('first', 'second', 'third')"
options_attr_string = u",".join([u"'{}'".format(o) for o in options_list])
options_attr_string = u"({})".format(options_attr_string)
optioninput_element.set('options', options_attr_string)
# Set the "correct" attribute
optioninput_element.set('correct', str(correct_option))
return optioninput_element
class StringResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <stringresponse> XML """
def create_response_element(self, **kwargs):
""" Create a <stringresponse> XML element.
Uses **kwargs:
*answer*: The correct answer (a string) [REQUIRED]
*case_sensitive*: Whether the response is case-sensitive (True/False)
[DEFAULT: True]
*hints*: List of (hint_prompt, hint_name, hint_text) tuples
Where *hint_prompt* is the string for which we show the hint,
*hint_name* is an internal identifier for the hint,
and *hint_text* is the text we show for the hint.
*hintfn*: The name of a function in the script to use for hints.
*regexp*: Whether the response is regexp
*additional_answers*: list of additional asnwers.
"""
# Retrieve the **kwargs
answer = kwargs.get("answer", None)
case_sensitive = kwargs.get("case_sensitive", None)
hint_list = kwargs.get('hints', None)
hint_fn = kwargs.get('hintfn', None)
regexp = kwargs.get('regexp', None)
additional_answers = kwargs.get('additional_answers', [])
assert answer
# Create the <stringresponse> element
response_element = etree.Element("stringresponse")
# Set the answer attribute
response_element.set("answer", unicode(answer))
# Set the case sensitivity and regexp:
type_value = ''
if case_sensitive is not None:
type_value += "cs" if case_sensitive else "ci"
type_value += ' regexp' if regexp else ''
if type_value:
response_element.set("type", type_value.strip())
# Add the hints if specified
if hint_list or hint_fn:
hintgroup_element = etree.SubElement(response_element, "hintgroup")
if hint_list:
assert not hint_fn
for (hint_prompt, hint_name, hint_text) in hint_list:
stringhint_element = etree.SubElement(hintgroup_element, "stringhint")
stringhint_element.set("answer", str(hint_prompt))
stringhint_element.set("name", str(hint_name))
hintpart_element = etree.SubElement(hintgroup_element, "hintpart")
hintpart_element.set("on", str(hint_name))
hint_text_element = etree.SubElement(hintpart_element, "text")
hint_text_element.text = str(hint_text)
if hint_fn:
assert not hint_list
hintgroup_element.set("hintfn", hint_fn)
for additional_answer in additional_answers:
etree.SubElement(response_element, "additional_answer").text = additional_answer
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class AnnotationResponseXMLFactory(ResponseXMLFactory):
""" Factory for creating <annotationresponse> XML trees """
def create_response_element(self, **kwargs):
""" Create a <annotationresponse> element """
return etree.Element("annotationresponse")
def create_input_element(self, **kwargs):
""" Create a <annotationinput> element."""
input_element = etree.Element("annotationinput")
text_children = [
{'tag': 'title', 'text': kwargs.get('title', 'super cool annotation')},
{'tag': 'text', 'text': kwargs.get('text', 'texty text')},
{'tag': 'comment', 'text':kwargs.get('comment', 'blah blah erudite comment blah blah')},
{'tag': 'comment_prompt', 'text': kwargs.get('comment_prompt', 'type a commentary below')},
{'tag': 'tag_prompt', 'text': kwargs.get('tag_prompt', 'select one tag')}
]
for child in text_children:
etree.SubElement(input_element, child['tag']).text = child['text']
default_options = [('green', 'correct'),('eggs', 'incorrect'), ('ham', 'partially-correct')]
options = kwargs.get('options', default_options)
options_element = etree.SubElement(input_element, 'options')
for (description, correctness) in options:
option_element = etree.SubElement(options_element, 'option', {'choice': correctness})
option_element.text = description
return input_element
class SymbolicResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <symbolicresponse> xml """
def create_response_element(self, **kwargs):
""" Build the <symbolicresponse> XML element.
Uses **kwargs:
*expect*: The correct answer (a sympy string)
*options*: list of option strings to pass to symmath_check
(e.g. 'matrix', 'qbit', 'imaginary', 'numerical')"""
# Retrieve **kwargs
expect = kwargs.get('expect', '')
options = kwargs.get('options', [])
# Symmath check expects a string of options
options_str = ",".join(options)
# Construct the <symbolicresponse> element
response_element = etree.Element('symbolicresponse')
if expect:
response_element.set('expect', str(expect))
if options_str:
response_element.set('options', str(options_str))
return response_element
def create_input_element(self, **kwargs):
return ResponseXMLFactory.textline_input_xml(**kwargs)
class ChoiceTextResponseXMLFactory(ResponseXMLFactory):
""" Factory for producing <choicetextresponse> xml """
def create_response_element(self, **kwargs):
""" Create a <choicetextresponse> element """
return etree.Element("choicetextresponse")
def create_input_element(self, **kwargs):
""" Create a <checkboxgroup> element.
choices can be specified in the following format:
[("true", [{"answer": "5", "tolerance": 0}]),
("false", [{"answer": "5", "tolerance": 0}])
]
This indicates that the first checkbox/radio is correct and it
contains a numtolerance_input with an answer of 5 and a tolerance of 0
It also indicates that the second has a second incorrect radiobutton
or checkbox with a numtolerance_input.
"""
choices = kwargs.get('choices', [("true", {})])
choice_inputs = []
# Ensure that the first element of choices is an ordered
# collection. It will start as a list, a tuple, or not a Container.
if type(choices[0]) not in [list, tuple]:
choices = [choices]
for choice in choices:
correctness, answers = choice
numtolerance_inputs = []
# If the current `choice` contains any("answer": number)
# elements, turn those into numtolerance_inputs
if answers:
# `answers` will be a list or tuple of answers or a single
# answer, representing the answers for numtolerance_inputs
# inside of this specific choice.
# Make sure that `answers` is an ordered collection for
# convenience.
if type(answers) not in [list, tuple]:
answers = [answers]
numtolerance_inputs = [
self._create_numtolerance_input_element(answer)
for answer in answers
]
choice_inputs.append(
self._create_choice_element(
correctness=correctness,
inputs=numtolerance_inputs
)
)
# Default type is 'radiotextgroup'
input_type = kwargs.get('type', 'radiotextgroup')
input_element = etree.Element(input_type)
for ind, choice in enumerate(choice_inputs):
# Give each choice text equal to it's position(0,1,2...)
choice.text = "choice_{0}".format(ind)
input_element.append(choice)
return input_element
def _create_choice_element(self, **kwargs):
"""
Creates a choice element for a choictextproblem.
Defaults to a correct choice with no numtolerance_input
"""
text = kwargs.get('text', '')
correct = kwargs.get('correctness', "true")
inputs = kwargs.get('inputs', [])
choice_element = etree.Element("choice")
choice_element.set("correct", correct)
choice_element.text = text
for inp in inputs:
# Add all of the inputs as children of this choice
choice_element.append(inp)
return choice_element
def _create_numtolerance_input_element(self, params):
"""
Creates a <numtolerance_input/> or <decoy_input/> element with
optionally specified tolerance and answer.
"""
answer = params['answer'] if 'answer' in params else None
# If there is not an answer specified, Then create a <decoy_input/>
# otherwise create a <numtolerance_input/> and set its tolerance
# and answer attributes.
if answer:
text_input = etree.Element("numtolerance_input")
text_input.set('answer', answer)
# If tolerance was specified, was specified use it, otherwise
# Set the tolerance to "0"
text_input.set(
'tolerance',
params['tolerance'] if 'tolerance' in params else "0"
)
else:
text_input = etree.Element("decoy_input")
return text_input
| agpl-3.0 |
andreamerello/linux-stm32 | Documentation/sphinx/kernel-doc.py | 43 | 5741 | # coding=utf-8
#
# Copyright © 2016 Intel Corporation
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice (including the next
# paragraph) shall be included in all copies or substantial portions of the
# Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
# Authors:
# Jani Nikula <[email protected]>
#
# Please make sure this works on both python2 and python3.
#
import os
import subprocess
import sys
import re
import glob
from docutils import nodes, statemachine
from docutils.statemachine import ViewList
from docutils.parsers.rst import directives
from sphinx.util.compat import Directive
from sphinx.ext.autodoc import AutodocReporter
class KernelDocDirective(Directive):
"""Extract kernel-doc comments from the specified file"""
required_argument = 1
optional_arguments = 4
option_spec = {
'doc': directives.unchanged_required,
'functions': directives.unchanged_required,
'export': directives.unchanged,
'internal': directives.unchanged,
}
has_content = False
def run(self):
env = self.state.document.settings.env
cmd = [env.config.kerneldoc_bin, '-rst', '-enable-lineno']
filename = env.config.kerneldoc_srctree + '/' + self.arguments[0]
export_file_patterns = []
# Tell sphinx of the dependency
env.note_dependency(os.path.abspath(filename))
tab_width = self.options.get('tab-width', self.state.document.settings.tab_width)
# FIXME: make this nicer and more robust against errors
if 'export' in self.options:
cmd += ['-export']
export_file_patterns = str(self.options.get('export')).split()
elif 'internal' in self.options:
cmd += ['-internal']
export_file_patterns = str(self.options.get('internal')).split()
elif 'doc' in self.options:
cmd += ['-function', str(self.options.get('doc'))]
elif 'functions' in self.options:
for f in str(self.options.get('functions')).split():
cmd += ['-function', f]
for pattern in export_file_patterns:
for f in glob.glob(env.config.kerneldoc_srctree + '/' + pattern):
env.note_dependency(os.path.abspath(f))
cmd += ['-export-file', f]
cmd += [filename]
try:
env.app.verbose('calling kernel-doc \'%s\'' % (" ".join(cmd)))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate()
# python2 needs conversion to unicode.
# python3 with universal_newlines=True returns strings.
if sys.version_info.major < 3:
out, err = unicode(out, 'utf-8'), unicode(err, 'utf-8')
if p.returncode != 0:
sys.stderr.write(err)
env.app.warn('kernel-doc \'%s\' failed with return code %d' % (" ".join(cmd), p.returncode))
return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
elif env.config.kerneldoc_verbosity > 0:
sys.stderr.write(err)
lines = statemachine.string2lines(out, tab_width, convert_whitespace=True)
result = ViewList()
lineoffset = 0;
line_regex = re.compile("^#define LINENO ([0-9]+)$")
for line in lines:
match = line_regex.search(line)
if match:
# sphinx counts lines from 0
lineoffset = int(match.group(1)) - 1
# we must eat our comments since the upset the markup
else:
result.append(line, filename, lineoffset)
lineoffset += 1
node = nodes.section()
buf = self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter
self.state.memo.reporter = AutodocReporter(result, self.state.memo.reporter)
self.state.memo.title_styles, self.state.memo.section_level = [], 0
try:
self.state.nested_parse(result, 0, node, match_titles=1)
finally:
self.state.memo.title_styles, self.state.memo.section_level, self.state.memo.reporter = buf
return node.children
except Exception as e: # pylint: disable=W0703
env.app.warn('kernel-doc \'%s\' processing failed with: %s' %
(" ".join(cmd), str(e)))
return [nodes.error(None, nodes.paragraph(text = "kernel-doc missing"))]
def setup(app):
app.add_config_value('kerneldoc_bin', None, 'env')
app.add_config_value('kerneldoc_srctree', None, 'env')
app.add_config_value('kerneldoc_verbosity', 1, 'env')
app.add_directive('kernel-doc', KernelDocDirective)
| gpl-2.0 |
eric-stanley/robotframework | utest/conf/test_settings.py | 1 | 5837 | import unittest
import os
from os.path import abspath
from robot.conf.settings import _BaseSettings, RobotSettings, RebotSettings
from robot.errors import DataError
from robot.utils.asserts import assert_equals, assert_false
class SettingWrapper(_BaseSettings):
def __init__(self):
pass
class TestSplitArgsFromNameOrPath(unittest.TestCase):
def setUp(self):
self.method = SettingWrapper()._split_args_from_name_or_path
def test_with_no_args(self):
assert not os.path.exists('name'), 'does not work if you have name folder!'
assert_equals(self.method('name'), ('name', []))
def test_with_args(self):
assert not os.path.exists('name'), 'does not work if you have name folder!'
assert_equals(self.method('name:arg'), ('name', ['arg']))
assert_equals(self.method('listener:v1:v2:v3'), ('listener', ['v1', 'v2', 'v3']))
assert_equals(self.method('aa:bb:cc'), ('aa', ['bb', 'cc']))
def test_empty_args(self):
assert not os.path.exists('foo'), 'does not work if you have foo folder!'
assert_equals(self.method('foo:'), ('foo', ['']))
assert_equals(self.method('bar:arg1::arg3'), ('bar', ['arg1', '', 'arg3']))
assert_equals(self.method('3:'), ('3', ['']))
def test_with_windows_path_without_args(self):
assert_equals(self.method('C:\\name.py'), ('C:\\name.py', []))
assert_equals(self.method('X:\\APPS\\listener'), ('X:\\APPS\\listener', []))
assert_equals(self.method('C:/varz.py'), ('C:/varz.py', []))
def test_with_windows_path_with_args(self):
assert_equals(self.method('C:\\name.py:arg1'), ('C:\\name.py', ['arg1']))
assert_equals(self.method('D:\\APPS\\listener:v1:b2:z3'),
('D:\\APPS\\listener', ['v1', 'b2', 'z3']))
assert_equals(self.method('C:/varz.py:arg'), ('C:/varz.py', ['arg']))
def test_existing_paths_are_made_absolute(self):
path = 'robot-framework-unit-test-file-12q3405909qasf'
open(path, 'w').close()
try:
assert_equals(self.method(path), (abspath(path), []))
assert_equals(self.method(path+':arg'), (abspath(path), ['arg']))
finally:
os.remove(path)
def test_existing_path_with_colons(self):
# Colons aren't allowed in Windows paths (other than in "c:")
if os.sep == '\\':
return
path = 'robot:framework:test:1:2:42'
os.mkdir(path)
try:
assert_equals(self.method(path), (abspath(path), []))
finally:
os.rmdir(path)
class TestRobotAndRebotSettings(unittest.TestCase):
def test_robot_and_rebot_settings_are_independent(self):
# http://code.google.com/p/robotframework/issues/detail?id=881
orig_opts = RobotSettings()._opts
RebotSettings()
assert_equals(RobotSettings()._opts, orig_opts)
def test_extra_options(self):
assert_equals(RobotSettings(name='My Name')['Name'], 'My Name')
assert_equals(RobotSettings({'name': 'Override'}, name='Set')['Name'],'Set')
def test_multi_options_as_single_string(self):
assert_equals(RobotSettings({'test': 'one'})['TestNames'], ['one'])
assert_equals(RebotSettings({'exclude': 'two'})['Exclude'], ['two'])
def test_output_files_as_none_string(self):
for name in 'Output', 'Report', 'Log', 'XUnit', 'DebugFile':
attr = (name[:-4] if name.endswith('File') else name).lower()
settings = RobotSettings({name.lower(): 'NoNe'})
assert_equals(settings[name], None)
if hasattr(settings, attr):
assert_equals(getattr(settings, attr), None)
def test_output_files_as_none_object(self):
for name in 'Output', 'Report', 'Log', 'XUnit', 'DebugFile':
attr = (name[:-4] if name.endswith('File') else name).lower()
settings = RobotSettings({name.lower(): None})
assert_equals(settings[name], None)
if hasattr(settings, attr):
assert_equals(getattr(settings, attr), None)
def test_log_levels(self):
self._verify_log_level('TRACE')
self._verify_log_level('DEBUG')
self._verify_log_level('INFO')
self._verify_log_level('WARN')
self._verify_log_level('NONE')
def test_default_log_level(self):
self._verify_log_levels(RobotSettings(), 'INFO')
self._verify_log_levels(RebotSettings(), 'TRACE')
def _verify_log_level(self, input, level=None, default=None):
level = level or input
default = default or level
self._verify_log_levels(RobotSettings({'loglevel': input}), level, default)
self._verify_log_levels(RebotSettings({'loglevel': input}), level, default)
def _verify_log_levels(self, settings, level, default=None):
assert_equals(settings['LogLevel'], level)
assert_equals(settings['VisibleLogLevel'], default or level)
def test_log_levels_with_default(self):
self._verify_log_level('TRACE:INFO', level='TRACE', default='INFO')
self._verify_log_level('TRACE:debug', level='TRACE', default='DEBUG')
self._verify_log_level('DEBUG:INFO', level='DEBUG', default='INFO')
def test_invalid_log_level(self):
self._verify_invalid_log_level('kekonen')
self._verify_invalid_log_level('DEBUG:INFO:FOO')
self._verify_invalid_log_level('INFO:bar')
self._verify_invalid_log_level('bar:INFO')
def test_visible_level_higher_than_normal_level(self):
self._verify_invalid_log_level('INFO:TRACE')
self._verify_invalid_log_level('DEBUG:TRACE')
def _verify_invalid_log_level(self, input):
self.assertRaises(DataError, RobotSettings, {'loglevel': input})
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
jayceyxc/hue | desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/Protocol/AllOrNothing.py | 125 | 11897 | #
# AllOrNothing.py : all-or-nothing package transformations
#
# Part of the Python Cryptography Toolkit
#
# Written by Andrew M. Kuchling and others
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""This file implements all-or-nothing package transformations.
An all-or-nothing package transformation is one in which some text is
transformed into message blocks, such that all blocks must be obtained before
the reverse transformation can be applied. Thus, if any blocks are corrupted
or lost, the original message cannot be reproduced.
An all-or-nothing package transformation is not encryption, although a block
cipher algorithm is used. The encryption key is randomly generated and is
extractable from the message blocks.
This class implements the All-Or-Nothing package transformation algorithm
described in:
Ronald L. Rivest. "All-Or-Nothing Encryption and The Package Transform"
http://theory.lcs.mit.edu/~rivest/fusion.pdf
"""
__revision__ = "$Id$"
import operator
import sys
from Crypto.Util.number import bytes_to_long, long_to_bytes
from Crypto.Util.py3compat import *
def isInt(x):
test = 0
try:
test += x
except TypeError:
return 0
return 1
class AllOrNothing:
"""Class implementing the All-or-Nothing package transform.
Methods for subclassing:
_inventkey(key_size):
Returns a randomly generated key. Subclasses can use this to
implement better random key generating algorithms. The default
algorithm is probably not very cryptographically secure.
"""
def __init__(self, ciphermodule, mode=None, IV=None):
"""AllOrNothing(ciphermodule, mode=None, IV=None)
ciphermodule is a module implementing the cipher algorithm to
use. It must provide the PEP272 interface.
Note that the encryption key is randomly generated
automatically when needed. Optional arguments mode and IV are
passed directly through to the ciphermodule.new() method; they
are the feedback mode and initialization vector to use. All
three arguments must be the same for the object used to create
the digest, and to undigest'ify the message blocks.
"""
self.__ciphermodule = ciphermodule
self.__mode = mode
self.__IV = IV
self.__key_size = ciphermodule.key_size
if not isInt(self.__key_size) or self.__key_size==0:
self.__key_size = 16
__K0digit = bchr(0x69)
def digest(self, text):
"""digest(text:string) : [string]
Perform the All-or-Nothing package transform on the given
string. Output is a list of message blocks describing the
transformed text, where each block is a string of bit length equal
to the ciphermodule's block_size.
"""
# generate a random session key and K0, the key used to encrypt the
# hash blocks. Rivest calls this a fixed, publically-known encryption
# key, but says nothing about the security implications of this key or
# how to choose it.
key = self._inventkey(self.__key_size)
K0 = self.__K0digit * self.__key_size
# we need two cipher objects here, one that is used to encrypt the
# message blocks and one that is used to encrypt the hashes. The
# former uses the randomly generated key, while the latter uses the
# well-known key.
mcipher = self.__newcipher(key)
hcipher = self.__newcipher(K0)
# Pad the text so that its length is a multiple of the cipher's
# block_size. Pad with trailing spaces, which will be eliminated in
# the undigest() step.
block_size = self.__ciphermodule.block_size
padbytes = block_size - (len(text) % block_size)
text = text + b(' ') * padbytes
# Run through the algorithm:
# s: number of message blocks (size of text / block_size)
# input sequence: m1, m2, ... ms
# random key K' (`key' in the code)
# Compute output sequence: m'1, m'2, ... m's' for s' = s + 1
# Let m'i = mi ^ E(K', i) for i = 1, 2, 3, ..., s
# Let m's' = K' ^ h1 ^ h2 ^ ... hs
# where hi = E(K0, m'i ^ i) for i = 1, 2, ... s
#
# The one complication I add is that the last message block is hard
# coded to the number of padbytes added, so that these can be stripped
# during the undigest() step
s = divmod(len(text), block_size)[0]
blocks = []
hashes = []
for i in range(1, s+1):
start = (i-1) * block_size
end = start + block_size
mi = text[start:end]
assert len(mi) == block_size
cipherblock = mcipher.encrypt(long_to_bytes(i, block_size))
mticki = bytes_to_long(mi) ^ bytes_to_long(cipherblock)
blocks.append(mticki)
# calculate the hash block for this block
hi = hcipher.encrypt(long_to_bytes(mticki ^ i, block_size))
hashes.append(bytes_to_long(hi))
# Add the padbytes length as a message block
i = i + 1
cipherblock = mcipher.encrypt(long_to_bytes(i, block_size))
mticki = padbytes ^ bytes_to_long(cipherblock)
blocks.append(mticki)
# calculate this block's hash
hi = hcipher.encrypt(long_to_bytes(mticki ^ i, block_size))
hashes.append(bytes_to_long(hi))
# Now calculate the last message block of the sequence 1..s'. This
# will contain the random session key XOR'd with all the hash blocks,
# so that for undigest(), once all the hash blocks are calculated, the
# session key can be trivially extracted. Calculating all the hash
# blocks requires that all the message blocks be received, thus the
# All-or-Nothing algorithm succeeds.
mtick_stick = bytes_to_long(key) ^ reduce(operator.xor, hashes)
blocks.append(mtick_stick)
# we convert the blocks to strings since in Python, byte sequences are
# always represented as strings. This is more consistent with the
# model that encryption and hash algorithms always operate on strings.
return [long_to_bytes(i,self.__ciphermodule.block_size) for i in blocks]
def undigest(self, blocks):
"""undigest(blocks : [string]) : string
Perform the reverse package transformation on a list of message
blocks. Note that the ciphermodule used for both transformations
must be the same. blocks is a list of strings of bit length
equal to the ciphermodule's block_size.
"""
# better have at least 2 blocks, for the padbytes package and the hash
# block accumulator
if len(blocks) < 2:
raise ValueError, "List must be at least length 2."
# blocks is a list of strings. We need to deal with them as long
# integers
blocks = map(bytes_to_long, blocks)
# Calculate the well-known key, to which the hash blocks are
# encrypted, and create the hash cipher.
K0 = self.__K0digit * self.__key_size
hcipher = self.__newcipher(K0)
block_size = self.__ciphermodule.block_size
# Since we have all the blocks (or this method would have been called
# prematurely), we can calculate all the hash blocks.
hashes = []
for i in range(1, len(blocks)):
mticki = blocks[i-1] ^ i
hi = hcipher.encrypt(long_to_bytes(mticki, block_size))
hashes.append(bytes_to_long(hi))
# now we can calculate K' (key). remember the last block contains
# m's' which we don't include here
key = blocks[-1] ^ reduce(operator.xor, hashes)
# and now we can create the cipher object
mcipher = self.__newcipher(long_to_bytes(key, self.__key_size))
# And we can now decode the original message blocks
parts = []
for i in range(1, len(blocks)):
cipherblock = mcipher.encrypt(long_to_bytes(i, block_size))
mi = blocks[i-1] ^ bytes_to_long(cipherblock)
parts.append(mi)
# The last message block contains the number of pad bytes appended to
# the original text string, such that its length was an even multiple
# of the cipher's block_size. This number should be small enough that
# the conversion from long integer to integer should never overflow
padbytes = int(parts[-1])
text = b('').join(map(long_to_bytes, parts[:-1]))
return text[:-padbytes]
def _inventkey(self, key_size):
# Return key_size random bytes
from Crypto import Random
return Random.new().read(key_size)
def __newcipher(self, key):
if self.__mode is None and self.__IV is None:
return self.__ciphermodule.new(key)
elif self.__IV is None:
return self.__ciphermodule.new(key, self.__mode)
else:
return self.__ciphermodule.new(key, self.__mode, self.__IV)
if __name__ == '__main__':
import sys
import getopt
import base64
usagemsg = '''\
Test module usage: %(program)s [-c cipher] [-l] [-h]
Where:
--cipher module
-c module
Cipher module to use. Default: %(ciphermodule)s
--aslong
-l
Print the encoded message blocks as long integers instead of base64
encoded strings
--help
-h
Print this help message
'''
ciphermodule = 'AES'
aslong = 0
def usage(code, msg=None):
if msg:
print msg
print usagemsg % {'program': sys.argv[0],
'ciphermodule': ciphermodule}
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:],
'c:l', ['cipher=', 'aslong'])
except getopt.error, msg:
usage(1, msg)
if args:
usage(1, 'Too many arguments')
for opt, arg in opts:
if opt in ('-h', '--help'):
usage(0)
elif opt in ('-c', '--cipher'):
ciphermodule = arg
elif opt in ('-l', '--aslong'):
aslong = 1
# ugly hack to force __import__ to give us the end-path module
module = __import__('Crypto.Cipher.'+ciphermodule, None, None, ['new'])
x = AllOrNothing(module)
print 'Original text:\n=========='
print __doc__
print '=========='
msgblocks = x.digest(b(__doc__))
print 'message blocks:'
for i, blk in zip(range(len(msgblocks)), msgblocks):
# base64 adds a trailing newline
print ' %3d' % i,
if aslong:
print bytes_to_long(blk)
else:
print base64.encodestring(blk)[:-1]
#
# get a new undigest-only object so there's no leakage
y = AllOrNothing(module)
text = y.undigest(msgblocks)
if text == b(__doc__):
print 'They match!'
else:
print 'They differ!'
| apache-2.0 |
marc-sensenich/ansible | lib/ansible/modules/cloud/cloudstack/cs_user.py | 15 | 11663 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cs_user
short_description: Manages users on Apache CloudStack based clouds.
description:
- Create, update, disable, lock, enable and remove users.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
username:
description:
- Username of the user.
required: true
account:
description:
- Account the user will be created under.
- Required on C(state=present).
password:
description:
- Password of the user to be created.
- Required on C(state=present).
- Only considered on creation and will not be updated if user exists.
first_name:
description:
- First name of the user.
- Required on C(state=present).
last_name:
description:
- Last name of the user.
- Required on C(state=present).
email:
description:
- Email of the user.
- Required on C(state=present).
timezone:
description:
- Timezone of the user.
keys_registered:
description:
- If API keys of the user should be generated.
- "Note: Keys can not be removed by the API again."
version_added: "2.4"
type: bool
default: no
domain:
description:
- Domain the user is related to.
default: ROOT
state:
description:
- State of the user.
- C(unlocked) is an alias for C(enabled).
default: present
choices: [ present, absent, enabled, disabled, locked, unlocked ]
poll_async:
description:
- Poll async jobs until job has finished.
type: bool
default: yes
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
- name: Create an user in domain 'CUSTOMERS'
local_action:
module: cs_user
account: developers
username: johndoe
password: S3Cur3
last_name: Doe
first_name: John
email: [email protected]
domain: CUSTOMERS
- name: Lock an existing user in domain 'CUSTOMERS'
local_action:
module: cs_user
username: johndoe
domain: CUSTOMERS
state: locked
- name: Disable an existing user in domain 'CUSTOMERS'
local_action:
module: cs_user
username: johndoe
domain: CUSTOMERS
state: disabled
- name: Enable/unlock an existing user in domain 'CUSTOMERS'
local_action:
module: cs_user
username: johndoe
domain: CUSTOMERS
state: enabled
- name: Remove an user in domain 'CUSTOMERS'
local_action:
module: cs_user
name: customer_xy
domain: CUSTOMERS
state: absent
'''
RETURN = '''
---
id:
description: UUID of the user.
returned: success
type: str
sample: 87b1e0ce-4e01-11e4-bb66-0050569e64b8
username:
description: Username of the user.
returned: success
type: str
sample: johndoe
fist_name:
description: First name of the user.
returned: success
type: str
sample: John
last_name:
description: Last name of the user.
returned: success
type: str
sample: Doe
email:
description: Emailof the user.
returned: success
type: str
sample: [email protected]
user_api_key:
description: API key of the user.
returned: success
type: str
sample: JLhcg8VWi8DoFqL2sSLZMXmGojcLnFrOBTipvBHJjySODcV4mCOo29W2duzPv5cALaZnXj5QxDx3xQfaQt3DKg
user_api_secret:
description: API secret of the user.
returned: success
type: str
sample: FUELo3LB9fa1UopjTLPdqLv_6OXQMJZv9g9N4B_Ao3HFz8d6IGFCV9MbPFNM8mwz00wbMevja1DoUNDvI8C9-g
account:
description: Account name of the user.
returned: success
type: str
sample: developers
account_type:
description: Type of the account.
returned: success
type: str
sample: user
timezone:
description: Timezone of the user.
returned: success
type: str
sample: enabled
created:
description: Date the user was created.
returned: success
type: str
sample: Doe
state:
description: State of the user.
returned: success
type: str
sample: enabled
domain:
description: Domain the user is related.
returned: success
type: str
sample: ROOT
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.cloudstack import (
AnsibleCloudStack,
cs_argument_spec,
cs_required_together,
)
class AnsibleCloudStackUser(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackUser, self).__init__(module)
self.returns = {
'username': 'username',
'firstname': 'first_name',
'lastname': 'last_name',
'email': 'email',
'secretkey': 'user_api_secret',
'apikey': 'user_api_key',
'timezone': 'timezone',
}
self.account_types = {
'user': 0,
'root_admin': 1,
'domain_admin': 2,
}
self.user = None
def get_account_type(self):
account_type = self.module.params.get('account_type')
return self.account_types[account_type]
def get_user(self):
if not self.user:
args = {
'domainid': self.get_domain('id'),
'fetch_list': True,
}
users = self.query_api('listUsers', **args)
if users:
user_name = self.module.params.get('username')
for u in users:
if user_name.lower() == u['username'].lower():
self.user = u
break
return self.user
def enable_user(self):
user = self.get_user()
if not user:
user = self.present_user()
if user['state'].lower() != 'enabled':
self.result['changed'] = True
args = {
'id': user['id'],
}
if not self.module.check_mode:
res = self.query_api('enableUser', **args)
user = res['user']
return user
def lock_user(self):
user = self.get_user()
if not user:
user = self.present_user()
# we need to enable the user to lock it.
if user['state'].lower() == 'disabled':
user = self.enable_user()
if user['state'].lower() != 'locked':
self.result['changed'] = True
args = {
'id': user['id'],
}
if not self.module.check_mode:
res = self.query_api('lockUser', **args)
user = res['user']
return user
def disable_user(self):
user = self.get_user()
if not user:
user = self.present_user()
if user['state'].lower() != 'disabled':
self.result['changed'] = True
args = {
'id': user['id'],
}
if not self.module.check_mode:
user = self.query_api('disableUser', **args)
poll_async = self.module.params.get('poll_async')
if poll_async:
user = self.poll_job(user, 'user')
return user
def present_user(self):
required_params = [
'account',
'email',
'password',
'first_name',
'last_name',
]
self.module.fail_on_missing_params(required_params=required_params)
user = self.get_user()
if user:
user = self._update_user(user)
else:
user = self._create_user(user)
return user
def _get_common_args(self):
return {
'firstname': self.module.params.get('first_name'),
'lastname': self.module.params.get('last_name'),
'email': self.module.params.get('email'),
'timezone': self.module.params.get('timezone'),
}
def _create_user(self, user):
self.result['changed'] = True
args = self._get_common_args()
args.update({
'account': self.get_account(key='name'),
'domainid': self.get_domain('id'),
'username': self.module.params.get('username'),
'password': self.module.params.get('password'),
})
if not self.module.check_mode:
res = self.query_api('createUser', **args)
user = res['user']
# register user api keys
if self.module.params.get('keys_registered'):
res = self.query_api('registerUserKeys', id=user['id'])
user.update(res['userkeys'])
return user
def _update_user(self, user):
args = self._get_common_args()
args.update({
'id': user['id'],
})
if self.has_changed(args, user):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('updateUser', **args)
user = res['user']
# register user api keys
if 'apikey' not in user and self.module.params.get('keys_registered'):
self.result['changed'] = True
if not self.module.check_mode:
res = self.query_api('registerUserKeys', id=user['id'])
user.update(res['userkeys'])
return user
def absent_user(self):
user = self.get_user()
if user:
self.result['changed'] = True
if not self.module.check_mode:
self.query_api('deleteUser', id=user['id'])
return user
def get_result(self, user):
super(AnsibleCloudStackUser, self).get_result(user)
if user:
if 'accounttype' in user:
for key, value in self.account_types.items():
if value == user['accounttype']:
self.result['account_type'] = key
break
# secretkey has been removed since CloudStack 4.10 from listUsers API
if self.module.params.get('keys_registered') and 'apikey' in user and 'secretkey' not in user:
user_keys = self.query_api('getUserKeys', id=user['id'])
if user_keys:
self.result['user_api_secret'] = user_keys['userkeys'].get('secretkey')
return self.result
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
username=dict(required=True),
account=dict(),
state=dict(choices=['present', 'absent', 'enabled', 'disabled', 'locked', 'unlocked'], default='present'),
domain=dict(default='ROOT'),
email=dict(),
first_name=dict(),
last_name=dict(),
password=dict(no_log=True),
timezone=dict(),
keys_registered=dict(type='bool', default=False),
poll_async=dict(type='bool', default=True),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
acs_acc = AnsibleCloudStackUser(module)
state = module.params.get('state')
if state == 'absent':
user = acs_acc.absent_user()
elif state in ['enabled', 'unlocked']:
user = acs_acc.enable_user()
elif state == 'disabled':
user = acs_acc.disable_user()
elif state == 'locked':
user = acs_acc.lock_user()
else:
user = acs_acc.present_user()
result = acs_acc.get_result(user)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
zachjustice/schemaverse_ai | db.py | 1 | 5792 | import psycopg2 #import postgresql connection library
from psycopg2.extras import NamedTupleConnection
import traceback
class db:
def __init__(self, conn_str):
self.conn = psycopg2.connect( conn_str ) #establish DB connection
self.conn.autocommit = True #disable transactions (transactions left uncommitted will hang the game)
def fetchall(self, query, data=None):
cur = self.execute(query, data)
results = cur.fetchall()
cur.close()
return results
def fetchone(self, query, data=None):
cur = self.execute(query, data)
results = cur.fetchone()
cur.close()
return results
def execute_blind(self, query, data=None):
cur = self.execute(query, data)
cur.close()
def execute(self, query, data=None):
cur = self.conn.cursor(cursor_factory=psycopg2.extras.NamedTupleCursor)
try:
if data == None:
cur.execute(query)
else:
cur.execute(query, data)
except Exception, e:
traceback.print_stack()
print "ERROR WITH QUERY: '" + query + "'"
return cur
def get_current_tic(self):
query = "SELECT last_value FROM tic_seq;"
return self.fetchone(query).last_value
def convert_fuel_to_money(self, amount=None):
if amount == None:
query ="SELECT convert_resource('FUEL', my_player.fuel_reserve)\
FROM my_player;"
else:
query ="SELECT convert_resource('FUEL', %s) FROM my_player;"
converted_fuel = self.fetchone(query, [amount])
return converted_fuel
def get_my_player_info(self):
query ="""\
SELECT
id,
balance,
fuel_reserve
FROM my_player;"""
player_info = self.fetchone(query)
return player_info
def move_ships(self, planet_destination, ship_ids):
query = """\
SELECT
SHIP_COURSE_CONTROL(id, current_fuel / 2, null, POINT%s)
FROM my_ships
WHERE id = ANY(%s)"""
data = [planet_destination,ship_ids]
self.execute_blind(query, data)
def create_ships(self, ships_to_create):
query = "INSERT INTO my_ships( attack, defense, engineering, prospecting, location) VALUES"
ship_str = " ( 0,10,0,10, ( SELECT location FROM planets WHERE conqueror_id=GET_PLAYER_ID(SESSION_USER) ) ),"
for i in range( ships_to_create ):
query = query + ship_str
query = query[:-1] # remove last comma
query = query + " RETURNING id;"
created_ship_ids = self.fetchall( query )
return created_ship_ids
# ship_actions is an array of arrays such that
# [
# [ <action string>, action_target_id, <array of applicable ship ids>]
# ...
# ]
# bulk set ship actions will then string together a list of update queries
# to update the ships to act on the corresponding action target
def bulk_set_ship_actions(self, ship_actions):
data = []
bulk_query = ""
simple_query = """
UPDATE my_ships s
SET action=%s, action_target_id=%s
WHERE s.id = ANY(%s);
"""
for ship_action in ship_actions:
bulk_query = bulk_query + simple_query
data = data + ship_action
self.execute_blind(bulk_query, data)
def set_ship_action(self, action, action_target_id, ship_ids):
query = """
UPDATE my_ships s
SET action=%s, action_target_id=%s
WHERE s.id = ANY(%s);
"""
data = (action, action_target_id, ship_ids)
self.execute_blind(query, data)
def get_my_ships(self):
query = """
SELECT
id,
name,
current_health,
max_health,
current_fuel,
max_fuel,
max_speed,
range,
attack,
defense,
engineering,
prospecting,
location_x,
location_y,
direction,
speed,
destination_x,
destination_y,
action,
action_target_id,
location,
destination,
target_speed,
target_direction
FROM
my_ships;"""
ships = self.fetchall(query)
return ships
def get_planets(self):
query = """
SELECT
id,
name,
mine_limit,
location_x,
location_y,
conqueror_id,
location
FROM
planets;"""
planets = self.fetchall(query)
return planets
def get_planets_in_range(self):
query = """
SELECT
ship,
planet,
ship_location,
planet_location,
distance
FROM
planets_in_range;"""
planets_in_range = self.fetchall(query)
return planets_in_range
def get_ships_in_range(self):
query = """
SELECT
id,
ship_in_range_of,
player_id,
name,
health,
enemy_location
FROM
ships_in_range;"""
ships_in_range = self.fetchall(query)
return ships_in_range
def refuel_ships(self):
query = "SELECT refuel_ship(id) FROM my_ships WHERE current_fuel < max_fuel"
self.execute_blind(query)
| mit |
Kev/maybelater | maybelater/urls.py | 1 | 1563 | from django.conf.urls.defaults import *
from django.contrib.auth.views import *
urlpatterns = patterns('maybelater.views',
#(r'^report/$', 'all_tasks'), #this one's just a test, don't enable it (allows you to see all tasks for all users)
#(r'^archived/$', 'archived'),
(r'^profile/$', 'editProfile'),
(r'^outstanding/$', 'outstanding'),
(r'^outstanding/task/(?P<taskId>\d+)/$', 'outstanding'),
(r'^completed/$', 'completed'),
(r'^completed/task/(?P<taskId>\d+)/$', 'completed'),
(r'^task/new$', 'createTask'),
(r'^task/edit$', 'editTask'),
(r'^context/new$', 'createContext'),
(r'^context/(?P<contextId>\d+)/task/(?P<taskId>\d+)/$', 'context'),
(r'^context/task/(?P<taskId>\d+)/$', 'context'),
(r'^context/(\d+)/$', 'context'),
(r'^context/$', 'context'),
(r'^project/new$', 'createProject'),
(r'^project/(?P<projectId>\d+)/task/(?P<taskId>\d+)/$', 'project'),
(r'^project/task/(?P<taskId>\d+)/$', 'project'),
(r'^project/(\d+)/$', 'project'),
(r'^project/$', 'project'),
(r'^task/\d+/$', 'task'),
(r'^v2/$', 'v2ui'),
(r'^v2/tasks$', 'v2_tasks'),
(r'^$', 'context'),
(r'^generate-test-data$', 'generateTestData'), #demo data - you want to disable this in production!
)
urlpatterns += patterns("",
#You most likely want the admin interface, but it's possible to do without.
(r'^admin/', include('django.contrib.admin.urls')),
(r'^accounts/login/$', 'django.contrib.auth.views.login'),
(r'^logout/$', 'django.contrib.auth.views.logout'),
)
| gpl-2.0 |
angdraug/nova | nova/api/openstack/compute/schemas/v3/aggregates.py | 14 | 2559 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova.api.validation import parameter_types
availability_zone = copy.deepcopy(parameter_types.name)
availability_zone['type'] = ['string', 'null']
create = {
'type': 'object',
'properties': {
'type': 'object',
'aggregate': {
'type': 'object',
'properties': {
'name': parameter_types.name,
'availability_zone': availability_zone,
},
'required': ['name'],
'additionalProperties': False,
},
},
'required': ['aggregate'],
'additionalProperties': False,
}
update = {
'type': 'object',
'properties': {
'type': 'object',
'aggregate': {
'type': 'object',
'properties': {
'name': parameter_types.name,
'availability_zone': availability_zone
},
'additionalProperties': False,
'anyOf': [
{'required': ['name']},
{'required': ['availability_zone']}
]
},
},
'required': ['aggregate'],
'additionalProperties': False,
}
add_host = {
'type': 'object',
'properties': {
'type': 'object',
'add_host': {
'type': 'object',
'properties': {
'host': parameter_types.hostname,
},
'required': ['host'],
'additionalProperties': False,
},
},
'required': ['add_host'],
'additionalProperties': False,
}
remove_host = {
'type': 'object',
'properties': {
'type': 'object',
'remove_host': {
'type': 'object',
'properties': {
'host': parameter_types.hostname,
},
'required': ['host'],
'additionalProperties': False,
},
},
'required': ['remove_host'],
'additionalProperties': False,
}
| apache-2.0 |
ecthros/uncaptcha | audio.py | 1 | 16077 | import speech_recognition as sr
import os
import time
import json
import logging, sys
import multiprocessing
import pprint
import csv
import threading
import googleapiclient
from collections import Counter
# Set up logging and pretty printing
LEVEL = logging.INFO
logging.basicConfig(stream=sys.stderr, level=LEVEL)
logging.getLogger('oauth2client.transport').setLevel(logging.ERROR)
logging.getLogger('googleapiclient.discovery').setLevel(logging.CRITICAL)
logging.getLogger('oauth2client.client').setLevel(logging.ERROR)
pp = pprint.PrettyPrinter(indent=4)
# Set up default guess
#DEFAULT = "X" # all un-identified digits remain unknown
DEFAULT = "6" # all un-identified digits are mapped to "6"
# Set up api list
apis = ["googleCloud", "wit", "bing", "ibm", "google", "sphinx"]
# Simple homophone mapping, taking any exact matches and returning the digit (layer one mapping)
def homophone(num):
if num in ["one", "1", "won"]:
return "1"
elif num in ["two", "to", "too", "2"]:
return "2"
elif num in ["three", "3"]:
return "3"
elif num in ["four", "for", "4", "fore"]:
return "4"
elif num in ["five", "5"]:
return "5"
elif num in ["six", "6"]:
return "6"
elif num in ["seven", "7"]:
return "7"
elif num in ["eight", "ate", "8"]:
return "8"
elif num in ["nine", "9"]:
return "9"
elif num in ["zero", "0"]:
return "0"
return DEFAULT
# Apply both layers of phonetic mapping
# More complex mapping, where homophones and near-homophones are used in conjunction
# Heigher weights are given to words that are phonetically close to a digit
def text_to_num(num, source_name="", results_dict={}):
num = num.strip()
if not source_name in results_dict:
results_dict[source_name] = [str(num)]
if not source_name + "_fil" in results_dict:
results_dict[source_name + "_fil"] = list()
digits = list()
########## FIRST LAYER MAPPING ##########
# These match correspond to exact homophone matches
if num in ["one", "won" "1"]:
digits.append(1)
if num in ["two", "to", "too", "2"]:
digits.append(2)
if num in ["three", "3"]:
digits.append(3)
if num in ["four", "for", "fore", "4"]:
digits.append(4)
if num in ["five", "5"]:
digits.append(5)
if num in ["six", "6"]:
digits.append(6)
if num in ["six", "6"]:
digits.append(6)
if num in ["seven", "7"]:
digits.append(7)
if num in ["eight", "ate", "8"]:
digits.append(8)
if num in ["nine", "9"]:
digits.append(9)
if num in ["zero", "0"]:
digits.append(0)
########## SECOND LAYER MAPPING ##########
# These match correspond to near homophone matches
if num in ["one", "1", "juan", "Warren", "fun", "who won"]:
digits.append(1)
if num in ["to", "two", "too", "2", "who", "true", "do", "so", "you", "hello", "lou"] or num.endswith("ew") or num.endswith("do"):
digits.append(2)
if num in ["during", "three", "3", "tree", "free", "siri", "very", "be", "wes", "we", "really", "hurry"] or "ee" in num:
digits.append(3)
if num in ["four", "for", "fourth", "4", "oar", "or", "more", "porn"] or "oor" in num:
digits.append(4)
if num in ["five", "5", "hive", "fight", "fifth", "why", "find"] or "ive" in num:
digits.append(5)
if num in ["six", "6", "sex", "big", "sic", "set", "dicks", "it", "thank"] or num.endswith("icks") or num.endswith("ick") or num.endswith("inks") or num.endswith("ex"):
digits.append(6)
if num in ["get in", "seven", "7", "heaven", "Frozen", "Allen", "send","weather", "that in", "ten"] or "ven" in num:
digits.append(7)
if num in ["eight hundred", "o. k.", "eight", "8", "hate", "fate", "hey", "it", "they", "a", "A", "they have", "then"] or "ate" in num:
digits.append(8)
if num in ["yeah I", "no", "nine", "i'm", "9", "mine", "brian", "now i", "no i", "no I", "during", "now I", "no", "night", "eyes", "none", "non", "bind", "nice", "no i'm"] or "ine" in num:
digits.append(9)
if num in ["a hero", "the euro", "the hero", "Europe", "yeah well", "the o.", "hey oh", "zero", "hero", "0", "yeah","here", "well", "yeah well", "euro", "yo", "hello", "arrow", "Arrow", "they don't", "girl", "bill", "you know"] or "ero" in num:
digits.append(0)
if num in ["hi", "i", "I", "bye", "by", "buy"]:
digits.append(5)
digits.append(9)
# Combine the output of the filters
retStr = ''.join([str(x) for x in digits])
if (retStr == '' or retStr == None):
# Digit could not be classified
results_dict[source_name + "_fil"] += DEFAULT
return DEFAULT
else:
results_dict[source_name + "_fil"] += str(digits[0])
return retStr
#################### SPEECH-TO-TEXT WEB APIS ####################
###### The following functions interact with the APIs we used to query for each segment ########
###### Keys have been removed from this section #######
#Query Sphinx
def sphinx(audio, vals, i, results_dict, timing):
try:
#print("Sphinx: ")
s = time.time()
vals[i] = text_to_num(r.recognize_sphinx(audio), "sphinx", results_dict)
timing["sphinx"].append(time.time() - s)
print "timing2", timing
except sr.UnknownValueError:
logging.debug("Sphinx could not understand audio")
results_dict["sphinx"] = [DEFAULT]
results_dict["sphinx_fil"] = [DEFAULT]
except sr.RequestError as e:
logging.debug("Sphinx error; {0}".format(e))
results_dict["sphinx"] = [DEFAULT]
results_dict["sphinx_fil"] = [DEFAULT]
#Query Google Cloud
def googleCloud(audio, vals, i, results_dict, timing):
# recognize speech using Google Cloud Speech
GOOGLE_CLOUD_SPEECH_CREDENTIALS = r"""{
"type": "service_account",
"project_id": "XXXXXX",
"private_key_id": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
"private_key": "-----BEGIN PRIVATE KEY-----\nxxxxxxxxxxxxxxxxxxxxxxxxxx\n-----END PRIVATE KEY-----\n",
"client_email": "",
"client_id": "XXXXXXXXXXXXXXXXXXXXXX",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://accounts.google.com/o/oauth2/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/audio-539%40XXXXXXXXXXX.iam.gserviceaccount.com"
}"""
try:
s = time.time()
#print("Google Cloud Speech: ")
vals[i] = text_to_num(r.recognize_google_cloud(audio, \
preferred_phrases=["zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"],\
credentials_json=GOOGLE_CLOUD_SPEECH_CREDENTIALS), "googleCloud", results_dict)
timing["googleCloud"].append(time.time() - s)
print "timing", timing["googleCloud"]
#print("Google Cloud " + str(vals[i]))
except sr.UnknownValueError:
logging.debug("Google Cloud Speech could not understand audio")
results_dict["googleCloud"] = [DEFAULT]
results_dict["googleCloud_fil"] = [DEFAULT]
except sr.RequestError as e:
logging.debug("Could not request results from Google Cloud Speech service; {0}".format(e))
results_dict["googleCloud"] = [DEFAULT]
results_dict["googleCloud_fil"] = [DEFAULT]
except:
pass
#Query Wit
def wit(audio, vals, i, results_dict, timing):
# recognize speech using Wit.ai
WIT_AI_KEY = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXx" # Wit.ai keys are 32-character uppercase alphanumeric strings
try:
s = time.time()
#print("Wit.ai: ")
vals[i] = text_to_num(r.recognize_wit(audio, key=WIT_AI_KEY), "wit", results_dict)
timing["wit"].append(time.time() - s)
#print("Wit " + str(vals[i]))
except sr.UnknownValueError:
logging.debug("Wit.ai could not understand audio")
results_dict["wit"] = [DEFAULT]
results_dict["wit_fil"] = [DEFAULT]
except sr.RequestError as e:
logging.debug("Could not request results from Wit.ai service; {0}".format(e))
results_dict["wit"] = [DEFAULT]
results_dict["wit_fil"] = [DEFAULT]
#Query Bing
def bing(audio, vals, i, results_dict, timing):
# recognize speech using Microsoft Bing Voice Recognition
# Microsoft Bing Voice Recognition API keys 32-character lowercase hexadecimal strings
BING_KEY = "XXXXXXXXXXXXXXXXXXXXXXXXXXXX"
try:
s = time.time()
#print("Microsoft Bing Voice Recognition: ")
vals[i] = text_to_num(r.recognize_bing(audio, key=BING_KEY), "bing", results_dict)
timing["bing"].append(time.time() - s)
except sr.UnknownValueError:
logging.debug("Microsoft Bing Voice Recognition could not understand audio")
results_dict["bing"] = [DEFAULT]
results_dict["bing_fil"] = [DEFAULT]
except sr.RequestError as e:
logging.debug("Could not request results from Microsoft Bing Voice Recognition service; {0}".format(e))
results_dict["bing"] = [DEFAULT]
results_dict["bing_fil"] = [DEFAULT]
# Query IBM
def ibm(audio, vals, i, results_dict, timing, show_all=False):
# recognize speech using IBM Speech to Text
IBM_USERNAME = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" # IBM Speech to Text usernames are strings of the form XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
IBM_PASSWORD = "XXXXXXXXXX" # IBM Speech to Text passwords are mixed-case alphanumeric strings
try:
s = time.time()
#print("IBM Speech to Text: ")
vals[i] = text_to_num(r.recognize_ibm(audio, username=IBM_USERNAME, \
password=IBM_PASSWORD, show_all=False), "ibm", results_dict)
timing["ibm"].append(time.time() - s)
except sr.UnknownValueError:
logging.debug("IBM Speech to Text could not understand audio")
results_dict["ibm"] = [DEFAULT]
results_dict["ibm_fil"] = [DEFAULT]
except sr.RequestError as e:
logging.debug("Could not request results from IBM Speech to Text service; {0}".format(e))
results_dict["ibm"] = [DEFAULT]
results_dict["ibm_fil"] = [DEFAULT]
#Query Google Speech-To-Text
def google(audio, vals, i, results_dict, timing):
try:
#print("Google: ")
s= time.time()
vals[i] = text_to_num(r.recognize_google(audio), "google", results_dict)
timing["google"].append(time.time() - s)
except:
logging.debug("Google could not understand")
results_dict["google"] = [DEFAULT]
results_dict["google_fil"] = [DEFAULT]
#Query Houndify. This was not used as we found Houndify difficult to incorportate.
def houndify(audio, vals, i, results_dict, timing):
# recognize speech using Houndify
HOUNDIFY_CLIENT_ID = "XXXXXXXXXXXXXXXXXXXXX==" # Houndify client IDs are Base64-encoded strings
HOUNDIFY_CLIENT_KEY = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX==" # Houndify client keys are Base64-encoded strings
try:
#print("Houndify: ")
vals[i] = text_to_num(r.recognize_houndify(audio, client_id=HOUNDIFY_CLIENT_ID,\
client_key=HOUNDIFY_CLIENT_KEY), "houndify", results_dict)
# vals[i] = None
except sr.UnknownValueError:
logging.debug("Houndify could not understand audio")
results_dict["houndify"] = [DEFAULT]
results_dict["houndify_fil"] = [DEFAULT]
except sr.RequestError as e:
logging.debug("Could not request results from Houndify service; {0}".format(e))
results_dict["houndify"] = [DEFAULT]
results_dict["houndify_fil"] = [DEFAULT]
# Apply a new phonetic mapping to the saved data
def re_test(new_fil, base_dir="data"):
try:
tasks = os.listdir(base_dir)
except OSError:
print("no such directory")
return None
for task in tasks:
new_final = ""
task_path = os.path.join(base_dir, task)
logging.info(task_path)
csv_log = open(os.path.join(task_path, "results_%s.csv" % new_fil.__name__), "wb")
csv_writer = csv.writer(csv_log)
try:
with open(os.path.join(task_path, "results.json"), "r") as log:
json_str = log.read()
results_dict = json.loads(json_str)
with open(os.path.join(task_path, "oracle"), "r") as log:
oracle = log.read()
except:
continue
new_results_dict = dict()
for api in apis:
new_results_dict[api + "_fil"] = list()
new_results_dict[api] = results_dict[api] # copy the unfiltered results
for dig_count in xrange(0,10):
csv_row = list()
i = 0
new_dig_guess = [0] * len(apis)
csv_row.append(oracle[dig_count])
# re-filter each api for digit dig_count
for api in apis:
#print api, results_dict[api], dig_count
csv_row.append(results_dict[api][dig_count])
new_dig_guess[i] = new_fil(results_dict[api][dig_count]) # apply new filter
new_results_dict[api + "_fil"].append(new_dig_guess[i])
i += 1
logging.debug(new_dig_guess)
resultsFiltered = filter(None, new_dig_guess)
resultsFiltered = filter(lambda x: x != DEFAULT, new_dig_guess)
results = []
for result in resultsFiltered:
digits = [digit for digit in str(result)]
results += digits
logging.debug(results)
results = sorted(results, key=results.count, reverse=True)
logging.debug(results)
if not results:
logging.debug("FOUND NOTHING: DEFAULTING TO %s" % DEFAULT)
new_final += DEFAULT # seems good enough
else:
logging.debug("DETERMINED AS: " + str(results[0]))
new_final += results[0]
csv_row.append(new_final[-1])
csv_writer.writerow(csv_row)
logging.debug(new_final)
new_results_dict["final"] = new_final
new_final_log = os.path.join(task_path, "results_%s.json" % new_fil.__name__)
with open(new_final_log, "w") as log:
json.dump(new_results_dict, log)
csv_log.close()
def getNums(task_path, audio_files):
print audio_files
num_str = ""
results_dict = dict()
start = time.time()
i = 0
ts = []
ans = ["X" for j in range(0, 11)]
print ans
for f in sorted(audio_files):
ts.append(multiprocessing.Process(target=getNum, args=((f, results_dict, i, ans))))
logging.debug(f)
#num_str += str(getNum(f, results_dict, i, ans))
i += 1
print ts
for t in ts:
t.start()
for t in ts:
t.join()
end = time.time()
print ans
print end-start
results_dict["total_time"] = end - start
logging.debug(num_str)
results_dict["final"] = num_str
logging.debug(results_dict)
# save the results in a log file
#with open(os.path.join(task_path, "results.json"), "w") as log:
# json.dump(results_dict, log)
logging.debug("results recorded for %s" % task_path)
return num_str, end-start
def getNum(audio_file, results_dict, digit_num=0, ans=[]):
global r
r = sr.Recognizer()
with sr.AudioFile(audio_file) as source:
audio = r.record(source) # read the entire audio file
manage_vars = multiprocessing.Manager()
ret_vals = manage_vars.dict()
results_dict_threaded = manage_vars.dict()
results = []
threads = []
timed = manage_vars.dict()
for api in apis:
timed[api] = manage_vars.list()
apis_func = [googleCloud, sphinx, wit, bing, google, ibm]
i = 0
start = time.time()
for api in apis_func:
t = multiprocessing.Process(target=api, args=(audio, ret_vals, i, results_dict_threaded, timed))
threads.append(t)
t.start()
i += 1
for thread in threads:
thread.join()
end = time.time()
print "getnumtime", end-start
print timed
results_dict["time" + str(digit_num)] = end - start
# merge the results with the past results
for name in results_dict_threaded.keys():
if name in results_dict:
results_dict[name] += results_dict_threaded[name]
else:
results_dict[name] = results_dict_threaded[name]
#print(ret_vals)
i = 0
for key in ret_vals.keys():
results.append(ret_vals[key])
# logging.debug(results)
resultsFiltered = filter(None, results)
results = []
for result in resultsFiltered:
digits = [digit for digit in str(result)]
results += digits
# logging.debug(results)
results = sorted(results, key=results.count, reverse=True)
if not results:
logging.debug("FOUND NOTHING")
ans[digit_num] = DEFAULT
return DEFAULT
else:
# print(results[0])
logging.info("DETERMINED AS: " + str(results[0]))
print ans
print digit_num
ans[digit_num] = results[0]
return results[0]
def test_dir(directory):
try:
audio_files = [os.path.join(directory,f) for f in os.listdir(directory) if "_0" in f]
getNums(directory, audio_files)
except OSError:
print("%s does not exist" % directory)
def test_all(start_dir="data"):
tasks = os.listdir(start_dir)
for task in tasks:
test_dir(os.path.join(start_dir, task))
def test_some(start_dir="data", start=1, end=2):
logging.basicConfig(stream=sys.stderr, level=LEVEL)
for task_num in range(start, end+1):
task = "task"+str(task_num)
task_path = os.path.join(start_dir, task)
test_dir(task_path)
NEW_FILTER = text_to_num
if __name__ == "__main__":
re_test(NEW_FILTER, "new_data")
| mit |
xme1226/horizon | openstack_dashboard/dashboards/project/firewalls/workflows.py | 4 | 11755 | # Copyright 2013, Big Switch Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon.utils import validators
from horizon import workflows
from openstack_dashboard import api
port_validator = validators.validate_port_or_colon_separated_port_range
class AddRuleAction(workflows.Action):
name = forms.CharField(
max_length=80,
label=_("Name"),
required=False)
description = forms.CharField(
max_length=80,
label=_("Description"),
required=False)
protocol = forms.ChoiceField(
label=_("Protocol"),
choices=[('tcp', _('TCP')),
('udp', _('UDP')),
('icmp', _('ICMP')),
('any', _('ANY'))],)
action = forms.ChoiceField(
label=_("Action"),
choices=[('allow', _('ALLOW')),
('deny', _('DENY'))],)
source_ip_address = forms.IPField(
label=_("Source IP Address/Subnet"),
version=forms.IPv4 | forms.IPv6,
required=False, mask=True)
destination_ip_address = forms.IPField(
label=_("Destination IP Address/Subnet"),
version=forms.IPv4 | forms.IPv6,
required=False, mask=True)
source_port = forms.CharField(
max_length=80,
label=_("Source Port/Port Range"),
required=False,
validators=[port_validator])
destination_port = forms.CharField(
max_length=80,
label=_("Destination Port/Port Range"),
required=False,
validators=[port_validator])
shared = forms.BooleanField(
label=_("Shared"), initial=False, required=False)
enabled = forms.BooleanField(
label=_("Enabled"), initial=True, required=False)
def __init__(self, request, *args, **kwargs):
super(AddRuleAction, self).__init__(request, *args, **kwargs)
class Meta:
name = _("AddRule")
permissions = ('openstack.services.network',)
help_text = _("Create a firewall rule.\n\n"
"Protocol and action must be specified. "
"Other fields are optional.")
class AddRuleStep(workflows.Step):
action_class = AddRuleAction
contributes = ("name", "description", "protocol", "action",
"source_ip_address", "source_port",
"destination_ip_address", "destination_port",
"enabled", "shared")
def contribute(self, data, context):
context = super(AddRuleStep, self).contribute(data, context)
if data:
if context['protocol'] == 'any':
del context['protocol']
for field in ['source_port',
'destination_port',
'source_ip_address',
'destination_ip_address']:
if not context[field]:
del context[field]
return context
class AddRule(workflows.Workflow):
slug = "addrule"
name = _("Add Rule")
finalize_button_name = _("Add")
success_message = _('Added Rule "%s".')
failure_message = _('Unable to add Rule "%s".')
success_url = "horizon:project:firewalls:index"
# fwaas is designed to support a wide range of vendor
# firewalls. Considering the multitude of vendor firewall
# features in place today, firewall_rule definition can
# involve more complex configuration over time. Hence,
# a workflow instead of a single form is used for
# firewall_rule add to be ready for future extension.
default_steps = (AddRuleStep,)
def format_status_message(self, message):
return message % self.context.get('name')
def handle(self, request, context):
try:
api.fwaas.rule_create(request, **context)
return True
except Exception as e:
msg = self.format_status_message(self.failure_message) + str(e)
exceptions.handle(request, msg)
return False
class SelectRulesAction(workflows.Action):
rule = forms.MultipleChoiceField(
label=_("Rules"),
required=False,
widget=forms.CheckboxSelectMultiple(),
help_text=_("Create a policy with selected rules."))
class Meta:
name = _("Rules")
permissions = ('openstack.services.network',)
help_text = _("Select rules for your policy.")
def populate_rule_choices(self, request, context):
try:
tenant_id = self.request.user.tenant_id
rules = api.fwaas.rule_list(request, tenant_id=tenant_id)
for r in rules:
r.set_id_as_name_if_empty()
rules = sorted(rules,
key=lambda rule: rule.name)
rule_list = [(rule.id, rule.name) for rule in rules
if not rule.firewall_policy_id]
except Exception as e:
rule_list = []
exceptions.handle(request,
_('Unable to retrieve rules (%(error)s).') % {
'error': str(e)})
return rule_list
class SelectRulesStep(workflows.Step):
action_class = SelectRulesAction
template_name = "project/firewalls/_update_rules.html"
contributes = ("firewall_rules",)
def contribute(self, data, context):
if data:
rules = self.workflow.request.POST.getlist("rule")
if rules:
rules = [r for r in rules if r != '']
context['firewall_rules'] = rules
return context
class AddPolicyAction(workflows.Action):
name = forms.CharField(max_length=80,
label=_("Name"))
description = forms.CharField(max_length=80,
label=_("Description"),
required=False)
shared = forms.BooleanField(label=_("Shared"),
initial=False,
required=False)
audited = forms.BooleanField(label=_("Audited"),
initial=False,
required=False)
def __init__(self, request, *args, **kwargs):
super(AddPolicyAction, self).__init__(request, *args, **kwargs)
class Meta:
name = _("AddPolicy")
permissions = ('openstack.services.network',)
help_text = _("Create a firewall policy with an ordered list "
"of firewall rules.\n\n"
"A name must be given. Firewall rules are "
"added in the order placed under the Rules tab.")
class AddPolicyStep(workflows.Step):
action_class = AddPolicyAction
contributes = ("name", "description", "shared", "audited")
def contribute(self, data, context):
context = super(AddPolicyStep, self).contribute(data, context)
if data:
return context
class AddPolicy(workflows.Workflow):
slug = "addpolicy"
name = _("Add Policy")
finalize_button_name = _("Add")
success_message = _('Added Policy "%s".')
failure_message = _('Unable to add Policy "%s".')
success_url = "horizon:project:firewalls:index"
default_steps = (AddPolicyStep, SelectRulesStep)
def format_status_message(self, message):
return message % self.context.get('name')
def handle(self, request, context):
try:
api.fwaas.policy_create(request, **context)
return True
except Exception as e:
msg = self.format_status_message(self.failure_message) + str(e)
exceptions.handle(request, msg)
return False
class AddFirewallAction(workflows.Action):
name = forms.CharField(max_length=80,
label=_("Name"),
required=False)
description = forms.CharField(max_length=80,
label=_("Description"),
required=False)
firewall_policy_id = forms.ChoiceField(label=_("Policy"))
shared = forms.BooleanField(label=_("Shared"),
initial=False,
required=False)
# TODO(amotoki): make UP/DOWN translatable
admin_state_up = forms.ChoiceField(choices=[(True, 'UP'), (False, 'DOWN')],
label=_("Admin State"))
def __init__(self, request, *args, **kwargs):
super(AddFirewallAction, self).__init__(request, *args, **kwargs)
firewall_policy_id_choices = [('', _("Select a Policy"))]
try:
tenant_id = self.request.user.tenant_id
policies = api.fwaas.policy_list(request, tenant_id=tenant_id)
policies = sorted(policies, key=lambda policy: policy.name)
except Exception as e:
exceptions.handle(
request,
_('Unable to retrieve policy list (%(error)s).') % {
'error': str(e)})
policies = []
for p in policies:
p.set_id_as_name_if_empty()
firewall_policy_id_choices.append((p.id, p.name))
self.fields['firewall_policy_id'].choices = firewall_policy_id_choices
# only admin can set 'shared' attribute to True
if not request.user.is_superuser:
self.fields['shared'].widget.attrs['disabled'] = 'disabled'
class Meta:
name = _("AddFirewall")
permissions = ('openstack.services.network',)
help_text = _("Create a firewall based on a policy.\n\n"
"A policy must be selected. "
"Other fields are optional.")
class AddFirewallStep(workflows.Step):
action_class = AddFirewallAction
contributes = ("name", "firewall_policy_id", "description",
"shared", "admin_state_up")
def contribute(self, data, context):
context = super(AddFirewallStep, self).contribute(data, context)
context['admin_state_up'] = (context['admin_state_up'] == 'True')
return context
class AddFirewall(workflows.Workflow):
slug = "addfirewall"
name = _("Add Firewall")
finalize_button_name = _("Add")
success_message = _('Added Firewall "%s".')
failure_message = _('Unable to add Firewall "%s".')
success_url = "horizon:project:firewalls:index"
# fwaas is designed to support a wide range of vendor
# firewalls. Considering the multitude of vendor firewall
# features in place today, firewall definition can
# involve more complex configuration over time. Hence,
# a workflow instead of a single form is used for
# firewall_rule add to be ready for future extension.
default_steps = (AddFirewallStep,)
def format_status_message(self, message):
return message % self.context.get('name')
def handle(self, request, context):
try:
api.fwaas.firewall_create(request, **context)
return True
except Exception as e:
msg = self.format_status_message(self.failure_message) + str(e)
exceptions.handle(request, msg)
return False
| apache-2.0 |
seem-sky/kbengine | kbe/src/lib/python/Lib/encodings/charmap.py | 860 | 2084 | """ Generic Python Character Mapping Codec.
Use this codec directly rather than through the automatic
conversion mechanisms supplied by unicode() and .encode().
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.charmap_encode
decode = codecs.charmap_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def __init__(self, errors='strict', mapping=None):
codecs.IncrementalEncoder.__init__(self, errors)
self.mapping = mapping
def encode(self, input, final=False):
return codecs.charmap_encode(input, self.errors, self.mapping)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def __init__(self, errors='strict', mapping=None):
codecs.IncrementalDecoder.__init__(self, errors)
self.mapping = mapping
def decode(self, input, final=False):
return codecs.charmap_decode(input, self.errors, self.mapping)[0]
class StreamWriter(Codec,codecs.StreamWriter):
def __init__(self,stream,errors='strict',mapping=None):
codecs.StreamWriter.__init__(self,stream,errors)
self.mapping = mapping
def encode(self,input,errors='strict'):
return Codec.encode(input,errors,self.mapping)
class StreamReader(Codec,codecs.StreamReader):
def __init__(self,stream,errors='strict',mapping=None):
codecs.StreamReader.__init__(self,stream,errors)
self.mapping = mapping
def decode(self,input,errors='strict'):
return Codec.decode(input,errors,self.mapping)
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='charmap',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| lgpl-3.0 |
zopim/babel-localization | babel/messages/plurals.py | 67 | 7207 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Plural form definitions."""
from babel.core import default_locale, Locale
from babel.util import itemgetter
LC_CTYPE = default_locale('LC_CTYPE')
PLURALS = {
# Afar
# 'aa': (),
# Abkhazian
# 'ab': (),
# Avestan
# 'ae': (),
# Afrikaans - From Pootle's PO's
'af': (2, '(n != 1)'),
# Akan
# 'ak': (),
# Amharic
# 'am': (),
# Aragonese
# 'an': (),
# Arabic - From Pootle's PO's
'ar': (6, '(n==0 ? 0 : n==1 ? 1 : n==2 ? 2 : n>=3 && n<=10 ? 3 : n>=11 && n<=99 ? 4 : 5)'),
# Assamese
# 'as': (),
# Avaric
# 'av': (),
# Aymara
# 'ay': (),
# Azerbaijani
# 'az': (),
# Bashkir
# 'ba': (),
# Belarusian
# 'be': (),
# Bulgarian - From Pootle's PO's
'bg': (2, '(n != 1)'),
# Bihari
# 'bh': (),
# Bislama
# 'bi': (),
# Bambara
# 'bm': (),
# Bengali - From Pootle's PO's
'bn': (2, '(n != 1)'),
# Tibetan - as discussed in private with Andrew West
'bo': (1, '0'),
# Breton
# 'br': (),
# Bosnian
# 'bs': (),
# Catalan - From Pootle's PO's
'ca': (2, '(n != 1)'),
# Chechen
# 'ce': (),
# Chamorro
# 'ch': (),
# Corsican
# 'co': (),
# Cree
# 'cr': (),
# Czech
'cs': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Church Slavic
# 'cu': (),
# Chuvash
'cv': (1, '0'),
# Welsh
'cy': (5, '(n==1 ? 1 : n==2 ? 2 : n==3 ? 3 : n==6 ? 4 : 0)'),
# Danish
'da': (2, '(n != 1)'),
# German
'de': (2, '(n != 1)'),
# Divehi
# 'dv': (),
# Dzongkha
'dz': (1, '0'),
# Greek
'el': (2, '(n != 1)'),
# English
'en': (2, '(n != 1)'),
# Esperanto
'eo': (2, '(n != 1)'),
# Spanish
'es': (2, '(n != 1)'),
# Estonian
'et': (2, '(n != 1)'),
# Basque - From Pootle's PO's
'eu': (2, '(n != 1)'),
# Persian - From Pootle's PO's
'fa': (1, '0'),
# Finnish
'fi': (2, '(n != 1)'),
# French
'fr': (2, '(n > 1)'),
# Friulian - From Pootle's PO's
'fur': (2, '(n > 1)'),
# Irish
'ga': (3, '(n==1 ? 0 : n==2 ? 1 : 2)'),
# Galician - From Pootle's PO's
'gl': (2, '(n != 1)'),
# Hausa - From Pootle's PO's
'ha': (2, '(n != 1)'),
# Hebrew
'he': (2, '(n != 1)'),
# Hindi - From Pootle's PO's
'hi': (2, '(n != 1)'),
# Croatian
'hr': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Hungarian
'hu': (1, '0'),
# Armenian - From Pootle's PO's
'hy': (1, '0'),
# Icelandic - From Pootle's PO's
'is': (2, '(n != 1)'),
# Italian
'it': (2, '(n != 1)'),
# Japanese
'ja': (1, '0'),
# Georgian - From Pootle's PO's
'ka': (1, '0'),
# Kongo - From Pootle's PO's
'kg': (2, '(n != 1)'),
# Khmer - From Pootle's PO's
'km': (1, '0'),
# Korean
'ko': (1, '0'),
# Kurdish - From Pootle's PO's
'ku': (2, '(n != 1)'),
# Lao - Another member of the Tai language family, like Thai.
'lo': (1, '0'),
# Lithuanian
'lt': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Latvian
'lv': (3, '(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2)'),
# Maltese - From Pootle's PO's
'mt': (4, '(n==1 ? 0 : n==0 || ( n%100>1 && n%100<11) ? 1 : (n%100>10 && n%100<20 ) ? 2 : 3)'),
# Norwegian Bokmål
'nb': (2, '(n != 1)'),
# Dutch
'nl': (2, '(n != 1)'),
# Norwegian Nynorsk
'nn': (2, '(n != 1)'),
# Norwegian
'no': (2, '(n != 1)'),
# Punjabi - From Pootle's PO's
'pa': (2, '(n != 1)'),
# Polish
'pl': (3, '(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Portuguese
'pt': (2, '(n != 1)'),
# Brazilian
'pt_BR': (2, '(n > 1)'),
# Romanian - From Pootle's PO's
'ro': (3, '(n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2)'),
# Russian
'ru': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Slovak
'sk': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Slovenian
'sl': (4, '(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3)'),
# Serbian - From Pootle's PO's
'sr': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Southern Sotho - From Pootle's PO's
'st': (2, '(n != 1)'),
# Swedish
'sv': (2, '(n != 1)'),
# Thai
'th': (1, '0'),
# Turkish
'tr': (1, '0'),
# Ukrainian
'uk': (3, '(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2)'),
# Venda - From Pootle's PO's
've': (2, '(n != 1)'),
# Vietnamese - From Pootle's PO's
'vi': (1, '0'),
# Xhosa - From Pootle's PO's
'xh': (2, '(n != 1)'),
# Chinese - From Pootle's PO's
'zh_CN': (1, '0'),
'zh_HK': (1, '0'),
'zh_TW': (1, '0'),
}
DEFAULT_PLURAL = (2, '(n != 1)')
class _PluralTuple(tuple):
"""A tuple with plural information."""
__slots__ = ()
num_plurals = property(itemgetter(0), doc="""
The number of plurals used by the locale.""")
plural_expr = property(itemgetter(1), doc="""
The plural expression used by the locale.""")
plural_forms = property(lambda x: 'npurals=%s; plural=%s' % x, doc="""
The plural expression used by the catalog or locale.""")
def __str__(self):
return self.plural_forms
def get_plural(locale=LC_CTYPE):
"""A tuple with the information catalogs need to perform proper
pluralization. The first item of the tuple is the number of plural
forms, the second the plural expression.
>>> get_plural(locale='en')
(2, '(n != 1)')
>>> get_plural(locale='ga')
(3, '(n==1 ? 0 : n==2 ? 1 : 2)')
The object returned is a special tuple with additional members:
>>> tup = get_plural("ja")
>>> tup.num_plurals
1
>>> tup.plural_expr
'0'
>>> tup.plural_forms
'npurals=1; plural=0'
Converting the tuple into a string prints the plural forms for a
gettext catalog:
>>> str(tup)
'npurals=1; plural=0'
"""
locale = Locale.parse(locale)
try:
tup = PLURALS[str(locale)]
except KeyError:
try:
tup = PLURALS[locale.language]
except KeyError:
tup = DEFAULT_PLURAL
return _PluralTuple(tup)
| bsd-3-clause |
gmarciani/pymple | sort/MergeSort.py | 1 | 1370 | '''
Created on May 25, 2013
@author: Giacomo
'''
def mergeSort(L):
recursiveMergeSort(L, 0, len(L) - 1)
def recursiveMergeSort(L, left, right):
if left >= right:
return
mid = int((left + right) / 2)
recursiveMergeSort(L, left, mid)
recursiveMergeSort(L, mid + 1, right)
mergePartitions(L, left, mid, right)
def mergePartitions(L, left, mid, right):
idLeft = left
idRight = mid + 1
tempList = []
while True :
if L[idLeft] < L[idRight]:
tempList.append(L[idLeft])
idLeft += 1
if idLeft > mid:
for v in L[idRight:right + 1]:
tempList.append(v)
break
else:
tempList.append(L[idRight])
idRight += 1
if idRight > right:
for v in L[idLeft:mid + 1]:
tempList.append(v)
break
for i in range(left, right + 1):
L[i] = tempList[i - left]
def testMergeSort(L):
mergeSort(L)
print L
if __name__ == "__main__":
print "\n\n"
print "# L #"
L = [3,2,1,7,6,5,4,10,9,8]
print L
print "\n\n"
print "### MergeSort ###"
testMergeSort(L)
print "\n\n" | mit |
eduNEXT/edunext-platform | common/test/acceptance/tests/studio/base_studio_test.py | 4 | 6142 | """
Base classes used by studio tests.
"""
from bok_choy.page_object import XSS_INJECTION
from common.test.acceptance.fixtures.course import CourseFixture
from common.test.acceptance.fixtures.library import LibraryFixture
from common.test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.studio.overview import CourseOutlinePage
from common.test.acceptance.pages.studio.utils import verify_ordering
from common.test.acceptance.tests.helpers import AcceptanceTest, UniqueCourseTest
class StudioCourseTest(UniqueCourseTest):
"""
Base class for all Studio course tests.
"""
def setUp(self, is_staff=False, test_xss=True): # pylint: disable=arguments-differ
"""
Install a course with no content using a fixture.
"""
super(StudioCourseTest, self).setUp()
self.test_xss = test_xss
self.install_course_fixture(is_staff)
def install_course_fixture(self, is_staff=False):
"""
Install a course fixture
"""
self.course_fixture = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name'],
)
if self.test_xss:
xss_injected_unique_id = XSS_INJECTION + self.unique_id
test_improper_escaping = {u"value": xss_injected_unique_id}
self.course_fixture.add_advanced_settings({
"advertised_start": test_improper_escaping,
"info_sidebar_name": test_improper_escaping,
"cert_name_short": test_improper_escaping,
"cert_name_long": test_improper_escaping,
"display_organization": test_improper_escaping,
"display_coursenumber": test_improper_escaping,
})
self.course_info['display_organization'] = xss_injected_unique_id
self.course_info['display_coursenumber'] = xss_injected_unique_id
self.populate_course_fixture(self.course_fixture)
self.course_fixture.install()
self.user = self.course_fixture.user
self.log_in(self.user, is_staff)
def populate_course_fixture(self, course_fixture):
"""
Populate the children of the test course fixture.
"""
pass
def log_in(self, user, is_staff=False):
"""
Log in as the user that created the course. The user will be given instructor access
to the course and enrolled in it. By default the user will not have staff access unless
is_staff is passed as True.
Args:
user(dict): dictionary containing user data: {'username': ..., 'email': ..., 'password': ...}
is_staff(bool): register this user as staff
"""
self.auth_page = AutoAuthPage(
self.browser,
staff=is_staff,
username=user.get('username'),
email=user.get('email'),
password=user.get('password')
)
self.auth_page.visit()
class ContainerBase(StudioCourseTest):
"""
Base class for tests that do operations on the container page.
"""
def setUp(self, is_staff=False):
"""
Create a unique identifier for the course used in this test.
"""
# Ensure that the superclass sets up
super(ContainerBase, self).setUp(is_staff=is_staff)
self.outline = CourseOutlinePage(
self.browser,
self.course_info['org'],
self.course_info['number'],
self.course_info['run']
)
def go_to_nested_container_page(self):
"""
Go to the nested container page.
"""
unit = self.go_to_unit_page()
# The 0th entry is the unit page itself.
container = unit.xblocks[1].go_to_container()
return container
def go_to_unit_page(self, section_name='Test Section', subsection_name='Test Subsection', unit_name='Test Unit'):
"""
Go to the test unit page.
If make_draft is true, the unit page will be put into draft mode.
"""
self.outline.visit()
subsection = self.outline.section(section_name).subsection(subsection_name)
return subsection.expand_subsection().unit(unit_name).go_to()
def do_action_and_verify(self, action, expected_ordering):
"""
Perform the supplied action and then verify the resulting ordering.
"""
container = self.go_to_nested_container_page()
action(container)
verify_ordering(self, container, expected_ordering)
# Reload the page to see that the change was persisted.
container = self.go_to_nested_container_page()
verify_ordering(self, container, expected_ordering)
class StudioLibraryTest(AcceptanceTest):
"""
Base class for all Studio library tests.
"""
as_staff = True
def setUp(self):
"""
Install a library with no content using a fixture.
"""
super(StudioLibraryTest, self).setUp()
fixture = LibraryFixture(
'test_org',
self.unique_id,
u'Test Library {}'.format(self.unique_id),
)
self.populate_library_fixture(fixture)
fixture.install()
self.library_fixture = fixture
self.library_info = fixture.library_info
self.library_key = fixture.library_key
self.user = fixture.user
self.log_in(self.user, self.as_staff)
def populate_library_fixture(self, library_fixture):
"""
Populate the children of the test course fixture.
"""
pass
def log_in(self, user, is_staff=False):
"""
Log in as the user that created the library.
By default the user will not have staff access unless is_staff is passed as True.
"""
auth_page = AutoAuthPage(
self.browser,
staff=is_staff,
username=user.get('username'),
email=user.get('email'),
password=user.get('password')
)
auth_page.visit()
| agpl-3.0 |
2014cdag11/2014cadg11 | wsgi/programs/cdag3/__init__.py | 12 | 16404 | import cherrypy
# 這是 CDAG3 類別的定義
class CDAG3(object):
# 各組利用 index 引導隨後的程式執行
@cherrypy.expose
def index(self, *args, **kwargs):
outstring = '''
這是 2014CDA 協同專案下的 cdag3 分組程式開發網頁, 以下為 W12 的任務執行內容.<br />
<!-- 這裡採用相對連結, 而非網址的絕對連結 (這一段為 html 註解) -->
<a href="cube1">cdag3 正方體參數繪圖</a>(尺寸變數 a, b, c)<br /><br />
<a href="cube1">cdag1 正方體參數繪圖</a>(尺寸變數 a, b, c)<br /><br />
<a href="fourbar1">四連桿組立</a><br /><br />
請確定下列連桿位於 V:/home/fourbar 目錄中, 且開啟空白 Creo 組立檔案.<br />
<a href="/static/fourbar.7z">fourbar.7z</a>(滑鼠右鍵存成 .7z 檔案)<br />
'''
return outstring
'''
假如採用下列規畫
import programs.cdag3 as cdag3
root.cdag3 = cdag3.CDAG3()
則程式啟動後, 可以利用 /cdag3/cube1 呼叫函式執行
則程式啟動後, 可以利用 /cdag2/cube1 呼叫函式執行
'''
@cherrypy.expose
def cube1(self, *args, **kwargs):
'''
// 假如要自行打開特定零件檔案
// 若第三輸入為 false, 表示僅載入 session, 但是不顯示
// ret 為 model open return
var ret = document.pwl.pwlMdlOpen("axle_5.prt", "v:/tmp", false);
if (!ret.Status) {
alert("pwlMdlOpen failed (" + ret.ErrorCode + ")");
}
//將 ProE 執行階段設為變數 session
var session = pfcGetProESession();
// 在視窗中打開零件檔案, 並且顯示出來
var window = session.OpenFile(pfcCreate("pfcModelDescriptor").CreateFromFileName("axle_5.prt"));
var solid = session.GetModel("axle_5.prt",pfcCreate("pfcModelType").MDL_PART);
'''
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/weblink/examples/jscript/pfcUtils.js"></script>
<script type="text/javascript" src="/static/weblink/examples/jscript/pfcParameterExamples.js"></script>
<script type="text/javascript" src="/static/weblink/examples/jscript/pfcComponentFeatExamples.js"></script>
</head>
<body>
<script type="text/javascript">
var session = pfcGetProESession ();
// 以目前所開啟的檔案為 solid model
// for volume
var solid = session.CurrentModel;
var a, b, c, i, j, aValue, bValue, cValue, volume, count;
// 將模型檔中的 a 變數設為 javascript 中的 a 變數
a = solid.GetParam("a");
b = solid.GetParam("b");
c = solid.GetParam("c");
volume=0;
count=0;
try
{
for(i=0;i<5;i++)
{
myf = 100;
myn = myf + i*10;
// 設定變數值, 利用 ModelItem 中的 CreateDoubleParamValue 轉換成 Pro/Web.Link 所需要的浮點數值
aValue = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myn);
bValue = pfcCreate ("MpfcModelItem").CreateDoubleParamValue(myn);
// 將處理好的變數值, 指定給對應的零件變數
a.Value = aValue;
b.Value = bValue;
//零件尺寸重新設定後, 呼叫 Regenerate 更新模型
solid.Regenerate(void null);
//利用 GetMassProperty 取得模型的質量相關物件
properties = solid.GetMassProperty(void null);
volume = properties.Volume;
count = count + 1;
alert("執行第"+count+"次,零件總體積:"+volume);
// 將零件存為新檔案
//var newfile = document.pwl.pwlMdlSaveAs("filename.prt", "v:/tmp", "filename_5_"+count+".prt");
// 測試 stl 轉檔
//var stl_csys = "PRT_CSYS_DEF";
//var stl_instrs = new pfcCreate ("pfcSTLASCIIExportInstructions").Create(stl_csys);
//stl_instrs.SetQuality(10);
//solid.Export("v:/tmp/filename_5_"+count+".stl", stl_instrs);
// 結束測試轉檔
//if (!newfile.Status) {
//alert("pwlMdlSaveAs failed (" + newfile.ErrorCode + ")");
//}
} // for loop
}
catch (err)
{
alert ("Exception occurred: "+pfcGetExceptionType (err));
}
</script>
</body>
</html>
'''
return outstring
@cherrypy.expose
def fourbar1(self, *args, **kwargs):
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="content-type" content="text/html;charset=utf-8">
<script type="text/javascript" src="/static/weblink/examples/jscript/pfcUtils.js"></script>
</head>
<body>
<script type="text/javascript">
if (!pfcIsWindows())
netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect");
var session = pfcGetProESession();
// 設定 config option
session.SetConfigOption("comp_placement_assumptions","no");
// 建立擺放零件的位置矩陣
var identityMatrix = pfcCreate ("pfcMatrix3D");
for (var x = 0; x < 4; x++)
for (var y = 0; y < 4; y++)
{
if (x == y)
identityMatrix.Set (x, y, 1.0);
else
identityMatrix.Set (x, y, 0.0);
}
var transf = pfcCreate ("pfcTransform3D").Create (identityMatrix);
// 取得目前的工作目錄
var currentDir = session.getCurrentDirectory();
// 以目前已開檔, 作為 model
var model = session.CurrentModel;
// 查驗有無 model, 或 model 類別是否為組立件
if (model == void null || model.Type != pfcCreate ("pfcModelType").MDL_ASSEMBLY)
throw new Error (0, "Current model is not an assembly.");
var assembly = model;
/**----------------------------------------------- link0 -------------------------------------------------------------**/
//檔案目錄,建議將圖檔放置工作目錄下較方便使用
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/fourbar/link0.prt");
// 若 link1.prt 在 session 則直接取用
var componentModel = session.GetModelFromDescr (descr);
//若 link1.prt 不在 session 則從工作目錄中載入 session
var componentModel = session.RetrieveModel(descr);
//若 link1.prt 已經在 session 則放入組立檔中
if (componentModel != void null)
{
//注意這個 asmcomp 即為設定約束條件的本體
//asmcomp 為特徵物件,直接將零件, 以 transf 座標轉換放入組立檔案中
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
// 建立約束條件變數
var constrs = pfcCreate ("pfcComponentConstraints");
//設定組立檔中的三個定位面, 注意內定名稱與 Pro/E WF 中的 ASM_D_FRONT 不同, 而是 ASM_FRONT
var asmDatums = new Array ("ASM_FRONT", "ASM_TOP", "ASM_RIGHT");
//設定零件檔中的三個定位面, 名稱與 Pro/E WF 中相同
var compDatums = new Array ("FRONT", "TOP", "RIGHT");
//建立 ids 變數, intseq 為 sequence of integers 為資料類別, 使用者可以經由整數索引擷取此資料類別的元件, 第一個索引為 0
var ids = pfcCreate ("intseq");
//建立路徑變數
var path = pfcCreate ("MpfcAssembly").CreateComponentPath (assembly, ids);
//採用互動式設定相關的變數
var MpfcSelect = pfcCreate ("MpfcSelect");
//利用迴圈分別約束組立與零件檔中的三個定位平面
for (var i = 0; i < 3; i++)
{
//設定組立參考面
var asmItem = assembly.GetItemByName (pfcCreate ("pfcModelItemType").ITEM_SURFACE, asmDatums [i]);
//若無對應的組立參考面, 則啟用互動式平面選擇表單 flag
if (asmItem == void null)
{
interactFlag = true;
continue;
}
//設定零件參考面
var compItem = componentModel.GetItemByName (pfcCreate ("pfcModelItemType").ITEM_SURFACE, compDatums [i]);
//若無對應的零件參考面, 則啟用互動式平面選擇表單 flag
if (compItem == void null)
{
interactFlag = true;
continue;
}
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, path);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (false, false);
//將互動選擇相關資料, 附加在程式約束變數之後
constrs.Append (constr);
}
//設定組立約束條件
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
/**----------------------------------------------- link1 -------------------------------------------------------------**/
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/fourbar/link1.prt");
var componentModel = session.GetModelFromDescr (descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var components = assembly.ListFeaturesByType(true, pfcCreate ("pfcFeatureType").FEATTYPE_COMPONENT);
var featID = components.Item(0).Id;
ids.Append(featID);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_1", "TOP", "ASM_TOP");
var compDatums = new Array ("A_1", "TOP", "TOP");
var relation = new Array (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS,pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate ("pfcComponentConstraints");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, false);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
/**----------------------------------------------- link2 -------------------------------------------------------------**/
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/fourbar/link2.prt");
var componentModel = session.GetModelFromDescr (descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var ids = pfcCreate ("intseq");
ids.Append(featID+1);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_2", "TOP", "ASM_TOP");
var compDatums = new Array ("A_1", "TOP", "TOP");
var relation = new Array (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS,pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate ("pfcComponentConstraints");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, false);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
/**----------------------------------------------- link3 -------------------------------------------------------------**/
var descr = pfcCreate ("pfcModelDescriptor").CreateFromFileName ("v:/home/fourbar/link3.prt");
var componentModel = session.GetModelFromDescr (descr);
var componentModel = session.RetrieveModel(descr);
if (componentModel != void null)
{
var asmcomp = assembly.AssembleComponent (componentModel, transf);
}
var relation = new Array (pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_ALIGN, pfcCreate ("pfcComponentConstraintType").ASM_CONSTRAINT_MATE);
var relationItem = new Array(pfcCreate("pfcModelItemType").ITEM_AXIS,pfcCreate("pfcModelItemType").ITEM_SURFACE);
var constrs = pfcCreate ("pfcComponentConstraints");
var ids = pfcCreate ("intseq");
ids.Append(featID+2);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_2");
var compDatums = new Array ("A_1");
for (var i = 0; i < 1; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, false);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
var ids = pfcCreate ("intseq");
ids.Append(featID);
var subPath = pfcCreate ("MpfcAssembly").CreateComponentPath( assembly, ids );
subassembly = subPath.Leaf;
var asmDatums = new Array ("A_2", "TOP");
var compDatums = new Array ("A_2", "BOTTON");
for (var i = 0; i < 2; i++)
{
var asmItem = subassembly.GetItemByName (relationItem[i], asmDatums [i]);
if (asmItem == void null)
{
interactFlag = true;
continue;
}
var compItem = componentModel.GetItemByName (relationItem[i], compDatums [i]);
if (compItem == void null)
{
interactFlag = true;
continue;
}
var MpfcSelect = pfcCreate ("MpfcSelect");
var asmSel = MpfcSelect.CreateModelItemSelection (asmItem, subPath);
var compSel = MpfcSelect.CreateModelItemSelection (compItem, void null);
var constr = pfcCreate ("pfcComponentConstraint").Create (relation[i]);
constr.AssemblyReference = asmSel;
constr.ComponentReference = compSel;
constr.Attributes = pfcCreate ("pfcConstraintAttributes").Create (true, true);
constrs.Append (constr);
}
asmcomp.SetConstraints (constrs, void null);
/**-------------------------------------------------------------------------------------------------------------------**/
var session = pfcGetProESession ();
var solid = session.CurrentModel;
properties = solid.GetMassProperty(void null);
var COG = properties.GravityCenter;
document.write("MassProperty:<br />");
document.write("Mass:"+(properties.Mass.toFixed(2))+" pound<br />");
document.write("Average Density:"+(properties.Density.toFixed(2))+" pound/inch^3<br />");
document.write("Surface area:"+(properties.SurfaceArea.toFixed(2))+" inch^2<br />");
document.write("Volume:"+(properties.Volume.toFixed(2))+" inch^3<br />");
document.write("COG_X:"+COG.Item(0).toFixed(2)+"<br />");
document.write("COG_Y:"+COG.Item(1).toFixed(2)+"<br />");
document.write("COG_Z:"+COG.Item(2).toFixed(2)+"<br />");
try
{
document.write("Current Directory:<br />"+currentDir);
}
catch (err)
{
alert ("Exception occurred: "+pfcGetExceptionType (err));
}
assembly.Regenerate (void null);
session.GetModelWindow (assembly).Repaint();
</script>
</body>
</html>
'''
return outstring
| gpl-2.0 |
bigyanbhar/single-file-code | multiFileLogging.py | 1 | 2184 | __author__ = 'bigyan'
import logging
import os
class class2:
__logger = None
def __init__(self):
self.__logger = logging.getLogger("class2Log")
def log(self, message):
self.__logger.info(message)
#logging.basicConfig(
# filename=self.__expId + ".log",
# format='[%(asctime)s]' + ' ' +
# '{%(threadName)s/%(filename)s/%(module)s/%(funcName)s/%(lineno)d}' + ' ' +
# '%(levelname)s | %(message)s',
# filemode='w',
# level=logging.DEBUG)
#def setup_logger(logger_name, log_file, level=logging.INFO):
# logging.basicConfig(
# format='[%(asctime)s]' + ' ' +
# '{%(threadName)s/%(filename)s/%(module)s/%(funcName)s/%(lineno)d}' + ' ' +
# '%(levelname)s | %(message)s',
# filemode='w',
# level=logging.DEBUG)
# logger = logging.getLogger(logger_name)
#
# #formatter = logging.Formatter('%(asctime)s : %(message)s')
# fileHandler = logging.FileHandler(log_file, mode='w')
# #fileHandler.setFormatter(formatter)
# #streamHandler = logging.StreamHandler()
# #streamHandler.setFormatter(formatter)
#
# #logger.setLevel(level)
# logger.addHandler(fileHandler)
# #logger.addHandler(streamHandler)
# logger.propagate = False
def setup_logger(loggerName, logFile, level=logging.DEBUG):
logger = logging.getLogger(loggerName)
formatter = \
logging.Formatter('[%(asctime)s]' + ' ' +
'{%(threadName)s/%(filename)s/%(module)s/%(funcName)s/%(lineno)d}' + ' ' +
'%(levelname)s | %(message)s')
fileHandler = logging.FileHandler(logFile, mode='w')
fileHandler.setFormatter(formatter)
logger.setLevel(level)
logger.addHandler(fileHandler)
def main():
os.mkdir("log1")
setup_logger('log1', r'log1/log1.log')
log1 = logging.getLogger('log1')
log1.info('Info for log 1!')
#log1.handlers[0].stream.close()
os.mkdir("log2")
setup_logger('log2', r'log2/log2.log')
log2 = logging.getLogger('log2')
log2.info('Info for log 2!')
log1.error('Oh, no! Something went wrong!')
logging.info("Wow")
if __name__ == "__main__":
main() | apache-2.0 |
bpsinc-native/src_third_party_trace-viewer | third_party/tvcm/tvcm/parse_html_deps.py | 1 | 6994 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from HTMLParser import HTMLParser
from tvcm import module
from tvcm import strip_js_comments
from tvcm import html_generation_controller
CHUNK_TEXT_OP = 'text-op'
CHUNK_SCRIPT_OP = 'script-op'
CHUNK_STYLESHEET_OP = 'stylesheet-op'
CHUNK_INLINE_STYLE_OP = 'inline-style-op'
class _Chunk(object):
def __init__(self, op, data):
self.op = op
self.data = data
class InlineScript(object):
def __init__(self, contents, open_tags):
self.contents = contents
self.open_tags = open_tags
self._stripped_contents = None
@property
def stripped_contents(self):
if not self._stripped_contents:
self._stripped_contents = strip_js_comments.StripJSComments(
self.contents)
return self._stripped_contents
class HTMLModuleParserResults(object):
def __init__(self):
self.scripts_external = []
self.inline_scripts = []
self.stylesheets = []
self.imports = []
self.has_decl = False
self._chunks = []
@property
def inline_stylesheets(self):
return [x.data for x in self._chunks
if x.op == CHUNK_INLINE_STYLE_OP]
def AppendHTMLContent(self, text):
self._chunks.append(_Chunk(CHUNK_TEXT_OP, text))
def AppendHTMLInlineStyleContent(self, text):
self._chunks.append(_Chunk(CHUNK_INLINE_STYLE_OP, text))
def AppendHTMLScriptSplicePoint(self, href):
self._chunks.append(_Chunk(CHUNK_SCRIPT_OP, href))
def AppendHTMLStylesheetSplicePoint(self, href):
self._chunks.append(_Chunk(CHUNK_STYLESHEET_OP, href))
def GenerateHTML(self, controller):
return ''.join(list(self.YieldHTMLInPieces(controller)))
def YieldHTMLInPieces(self, controller):
for chunk in self._chunks:
if chunk.op == CHUNK_TEXT_OP:
yield chunk.data
elif chunk.op == CHUNK_INLINE_STYLE_OP:
html = controller.GetHTMLForInlineStylesheet(chunk.data)
if html:
yield html
elif chunk.op == CHUNK_SCRIPT_OP:
html = controller.GetHTMLForScriptHRef(chunk.data)
if html:
yield html
elif chunk.op == CHUNK_STYLESHEET_OP:
html = controller.GetHTMLForStylesheetHRef(chunk.data)
if html:
yield html
else:
raise NotImplementedError()
@property
def html_contents_without_links_and_script(self):
return self.GenerateHTML(html_generation_controller.HTMLGenerationController())
_SELF_CLOSING_TAGS = ('link', 'p', 'meta')
class _Tag(object):
def __init__(self, tag, attrs):
self.tag = tag
self.attrs = attrs
def __repr__(self):
attr_string = ' '.join(['%s="%s"' % (x[0], x[1]) for x in self.attrs])
return '<%s %s>' % (self.tag, attr_string)
class HTMLModuleParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.current_results = None
self.current_inline_script = None
self._current_inline_style_sheet_contents = None
self.open_tags = []
def Parse(self, html):
results = HTMLModuleParserResults()
if html is None or len(html) == 0:
return results
if html.find('< /script>') != -1:
raise Exception('Escape script tags with <\/script>')
self.current_results = results
self.feed(html)
self.current_results = None
if len(self.open_tags):
raise Exception('There were open tags: %s' % ','.join(self.open_tags))
return results
def handle_decl(self, decl):
assert self.current_results.has_decl == False, 'Only one doctype decl allowed'
self.current_results.has_decl = True
def handle_starttag(self, tag, attrs):
if tag == 'br':
raise Exception('Must use <br/>')
if tag not in _SELF_CLOSING_TAGS:
self.open_tags.append(_Tag(tag, attrs))
if tag == 'link':
is_stylesheet = False
is_import = False
href = None
for attr in attrs:
if attr[0] == 'rel' and attr[1] == 'stylesheet':
is_stylesheet = True
elif attr[0] == 'rel' and attr[1] == 'import':
is_import = True
elif attr[0] == 'href':
href = attr[1]
if is_stylesheet:
self.current_results.AppendHTMLStylesheetSplicePoint(href)
self.current_results.stylesheets.append(href)
elif is_import:
self.current_results.imports.append(href)
else:
self.current_results.AppendHTMLContent(
self.get_starttag_text())
elif tag == 'script':
had_src = False
for attr in attrs:
if attr[0] == 'src':
self.current_results.scripts_external.append(attr[1])
self.current_results.AppendHTMLScriptSplicePoint(attr[1])
had_src = True
if had_src == False:
assert self.current_inline_script == None
self.current_inline_script = InlineScript(
'',
list(self.open_tags[:-1]))
elif tag == 'style':
self._current_inline_style_sheet_contents = ''
self.current_results.AppendHTMLContent(
self.get_starttag_text())
else:
self.current_results.AppendHTMLContent(
self.get_starttag_text())
def handle_entityref(self, name):
self.current_results.AppendHTMLContent('&%s;' % name)
def handle_charref(self, name):
self.current_results.AppendHTMLContent('&#%s;' % name)
def handle_startendtag(self, tag, attrs):
if (tag == 'script'):
raise Exception('Script must have explicit close tag')
self.current_results.AppendHTMLContent('%s' % self.get_starttag_text())
def handle_endtag(self, tag):
if tag not in _SELF_CLOSING_TAGS:
if len(self.open_tags) == 0:
raise Exception('got </%s> with no previous open tag' % tag)
if self.open_tags[-1].tag != tag:
raise Exception('Expected </%s> but got </%s>' % (
self.open_tags[-1].tag, tag))
self.open_tags.pop()
if tag == 'script':
if self.current_inline_script:
self.current_results.inline_scripts.append(
self.current_inline_script)
self.current_inline_script = None
elif tag == 'style':
if self._current_inline_style_sheet_contents != None:
self.current_results.AppendHTMLInlineStyleContent(
self._current_inline_style_sheet_contents)
self._current_inline_style_sheet_contents = None
self.current_results.AppendHTMLContent('</style>')
else:
self.current_results.AppendHTMLContent("</%s>" % tag)
def handle_data(self, data):
if self.current_inline_script:
self.current_inline_script.contents += data
elif self._current_inline_style_sheet_contents != None:
result = re.match(r"\s*@import url\(([^\)]*)\)", data,
flags=re.IGNORECASE)
if result:
raise Exception("@import not yet supported")
self._current_inline_style_sheet_contents += data
else:
self.current_results.AppendHTMLContent(data)
| bsd-3-clause |
Bismarrck/tensorflow | tensorflow/python/framework/op_def_library_test.py | 30 | 56095 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.ops.op_def_library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from google.protobuf import text_format
from tensorflow.core.framework import op_def_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_ops
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
def _unknown_shape(op):
"""Shape function for use with ops whose output shapes are unknown."""
return [tensor_shape.unknown_shape() for _ in op.outputs]
class OpDefLibraryTest(test_util.TensorFlowTestCase):
def setUp(self):
self._lib = test_ops._op_def_lib
def _add_op(self, ascii): # pylint: disable=redefined-builtin
op_def = op_def_pb2.OpDef()
text_format.Merge(ascii, op_def)
self._lib.add_op(op_def)
def Tensor(self, t, name="in"):
return self._lib.apply_op("OutT", T=t, name=name)
def testNoRegisteredOpFails(self):
with self.assertRaises(RuntimeError) as cm:
self._lib.apply_op("unknown")
self.assertEqual(str(cm.exception), "Unrecognized Op name unknown")
def testAddOpValidation(self):
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'MissingTypeAttr' "
"input_arg { name: 'a' type_attr: 'T' } ")
self.assertEqual(str(cm.exception),
"Inconsistent OpDef for 'MissingTypeAttr', "
"missing attr 'T'")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'BadTypeAttr' "
"output_arg { name: 'a' type_attr: 'T' } "
"attr { name: 'T' type: 'int' }")
self.assertEqual(
str(cm.exception),
"Attr 'T' of 'BadTypeAttr' used as a type_attr but has type int")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'MissingNumberAttr' "
"input_arg { name: 'a' type: DT_INT32 number_attr: 'N' } ")
self.assertEqual(str(cm.exception),
"Inconsistent OpDef for 'MissingNumberAttr', "
"missing attr 'N'")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'BadNumberAttr' "
"output_arg { name: 'a' type: DT_INT32 number_attr: 'N' } "
"attr { name: 'N' type: 'type' }")
self.assertEqual(
str(cm.exception),
"Attr 'N' of 'BadNumberAttr' used as a number_attr but has type type")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'TwoTypesA' "
"input_arg { name: 'a' type: DT_INT32 type_attr: 'T' } "
"attr { name: 'T' type: 'type' }")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'TwoTypesA' must have one type field not 2")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'TwoTypesB' "
"input_arg { name: 'a' type: DT_INT32 type_list_attr: 'T' } "
"attr { name: 'T' type: 'list(type)' }")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'TwoTypesB' must have one type field not 2")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'ThreeTypes' "
"input_arg { name: 'a' type: DT_INT32 type_attr: 'T' "
"type_list_attr: 'U' } "
"attr { name: 'T' type: 'type' } "
"attr { name: 'U' type: 'list(type)' }")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'ThreeTypes' must have one type field not 3")
with self.assertRaises(TypeError) as cm:
self._add_op("name: 'NoTypes' output_arg { name: 'a' } ")
self.assertEqual(str(cm.exception),
"Arg 'a' of 'NoTypes' must have one type field not 0")
def testSimple(self):
with ops.Graph().as_default():
out = self._lib.apply_op("Simple", a=3)
self.assertEqual(dtypes.float32, out.dtype)
self.assertProtoEquals("""
name: 'Simple' op: 'Simple' input: 'Simple/a'
""", out.op.node_def)
out = self._lib.apply_op("Simple", a=4)
self.assertProtoEquals("""
name: 'Simple_1' op: 'Simple' input: 'Simple_1/a'
""", out.op.node_def)
out = self._lib.apply_op("Simple", a=5, name="named")
self.assertProtoEquals("""
name: 'named' op: 'Simple' input: 'named/a'
""", out.op.node_def)
out = self._lib.apply_op("Simple", a=[[1, 2, 3], [4, 5, 6]], name="two_d")
self.assertProtoEquals("""
name: 'two_d' op: 'Simple' input: 'two_d/a'
""", out.op.node_def)
def testSimpleFailures(self):
with ops.Graph().as_default():
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a="Bad string")
self.assertEqual(str(cm.exception),
"Expected int32 passed to parameter 'a' of op 'Simple', "
"got 'Bad string' of type 'str' instead.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a=self.Tensor(dtypes.string))
self.assertEqual(str(cm.exception),
"Input 'a' of 'Simple' Op has type string "
"that does not match expected type of int32.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a=6, extra="bogus")
self.assertEqual(str(cm.exception),
"apply_op() got unexpected keyword arguments: extra")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a=6, extra1="bogus", extra2="also_bogus")
self.assertEqual(str(cm.exception),
"apply_op() got unexpected keyword arguments: extra1, "
"extra2")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple")
self.assertEqual(str(cm.exception), "No argument for input a")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", wrong=7)
self.assertEqual(str(cm.exception), "No argument for input a")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Simple", a={"label": 1})
self.assertEqual(str(cm.exception),
"Expected int32 passed to parameter 'a' of op 'Simple', "
"got {'label': 1} of type 'dict' instead.")
def testReservedInput(self):
with ops.Graph().as_default():
op = self._lib.apply_op("ReservedInput", input_=7, name="x")
self.assertProtoEquals("""
name: 'x' op: 'ReservedInput' input: 'x/input'
""", op.node_def)
def testPolymorphic(self):
with ops.Graph().as_default():
out = self._lib.apply_op("Polymorphic", a=7, name="p")
self.assertEqual(dtypes.int32, out.dtype)
self.assertProtoEquals("""
name: 'p' op: 'Polymorphic' input: 'p/a'
attr { key: 'T' value { type: DT_INT32 } }
""", out.op.node_def)
out = self._lib.apply_op("Polymorphic", a="s", name="q")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'q' op: 'Polymorphic' input: 'q/a'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
out = self._lib.apply_op("Polymorphic", a=["s", "t", "u"], name="r")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'r' op: 'Polymorphic' input: 'r/a'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Polymorphic", a="s", T=dtypes.string)
self.assertEqual(str(cm.exception),
"Should not specify value for inferred attr 'T'.")
def testPolymorphicOut(self):
with ops.Graph().as_default():
out = self._lib.apply_op("PolymorphicOut", T=dtypes.int32, name="p")
self.assertEqual(dtypes.int32, out.dtype)
self.assertProtoEquals("""
name: 'p' op: 'PolymorphicOut'
attr { key: 'T' value { type: DT_INT32 } }
""", out.op.node_def)
out = self._lib.apply_op("PolymorphicOut", T=dtypes.bool, name="q")
self.assertEqual(dtypes.bool, out.dtype)
self.assertProtoEquals("""
name: 'q' op: 'PolymorphicOut'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("PolymorphicOut")
self.assertEqual(str(cm.exception),
"No argument for attr T")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("PolymorphicOut", T=None)
self.assertEqual(str(cm.exception),
"Expected DataType for argument 'T' not None.")
def testPolymorphicDefaultOut(self):
with ops.Graph().as_default():
out = self._lib.apply_op("PolymorphicDefaultOut", T=None, name="p")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'p' op: 'PolymorphicDefaultOut'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
out = self._lib.apply_op("PolymorphicDefaultOut", T=dtypes.bool, name="q")
self.assertEqual(dtypes.bool, out.dtype)
self.assertProtoEquals("""
name: 'q' op: 'PolymorphicDefaultOut'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
def testBinary(self):
with ops.Graph().as_default():
out = self._lib.apply_op("Binary", a=8, b=9, name="b")
self.assertEqual(dtypes.int32, out.dtype)
self.assertProtoEquals("""
name: 'b' op: 'Binary' input: 'b/a' input: 'b/b'
attr { key: 'T' value { type: DT_INT32 } }
""", out.op.node_def)
out = self._lib.apply_op("Binary", a="left", b="right", name="c")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'c' op: 'Binary' input: 'c/a' input: 'c/b'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Binary", a="left", b=12)
self.assertEqual(str(cm.exception),
"Expected string passed to parameter 'b' of op 'Binary',"
" got 12 of type 'int' instead.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Binary",
a=self.Tensor(dtypes.string),
b=self.Tensor(dtypes.int32))
self.assertEqual(str(cm.exception),
"Input 'b' of 'Binary' Op has type int32 "
"that does not match type string of argument 'a'.")
def testRestrict(self):
with ops.Graph().as_default():
out = self._lib.apply_op("Restrict", a="foo", name="g")
self.assertEqual(dtypes.string, out.dtype)
self.assertProtoEquals("""
name: 'g' op: 'Restrict' input: 'g/a'
attr { key: 'T' value { type: DT_STRING } }
""", out.op.node_def)
out = self._lib.apply_op("Restrict", a=True, name="h")
self.assertEqual(dtypes.bool, out.dtype)
self.assertProtoEquals("""
name: 'h' op: 'Restrict' input: 'h/a'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Restrict", a=17)
self.assertEqual(str(cm.exception),
"Value passed to parameter 'a' has DataType int32 "
"not in list of allowed values: string, bool")
def testTypeList(self):
with ops.Graph().as_default():
op = self._lib.apply_op("TypeList", a=["foo"], name="z")
self.assertProtoEquals("""
name: 'z' op: 'TypeList' input: 'z/a_0'
attr { key: 'T' value { list { type: DT_STRING } } }
""", op.node_def)
op = self._lib.apply_op("TypeList", a=[True, 12], name="y")
self.assertProtoEquals("""
name: 'y' op: 'TypeList' input: 'y/a_0' input: 'y/a_1'
attr { key: 'T' value { list { type: DT_BOOL type: DT_INT32 } } }
""", op.node_def)
op = self._lib.apply_op("TypeList", a=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'TypeList' attr { key: 'T' value { list { } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeList", a=17)
self.assertStartsWith(str(cm.exception),
"Expected list for 'a' "
"argument to 'TypeList' Op, not ")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeList", a=[self.Tensor(dtypes.int32), None])
self.assertStartsWith(str(cm.exception),
"Tensors in list passed to 'a' of 'TypeList' Op "
"have types [int32, <NOT CONVERTIBLE TO TENSOR>]")
def testTypeListTwice(self):
with ops.Graph().as_default():
op = self._lib.apply_op("TypeListTwice",
a=["foo", True],
b=["bar", False],
name="z")
self.assertProtoEquals("""
name: 'z' op: 'TypeListTwice'
input: 'z/a_0' input: 'z/a_1' input: 'z/b_0' input: 'z/b_1'
attr { key: 'T' value { list { type: DT_STRING type: DT_BOOL } } }
""", op.node_def)
op = self._lib.apply_op("TypeListTwice", a=[], b=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'TypeListTwice' attr { key: 'T' value { list { } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeListTwice", a=["foo", True], b=["bar", 6])
self.assertEqual(str(cm.exception),
"Input 'b' of 'TypeListTwice' Op has type list of "
"string, int32 that does not match type list "
"string, bool of argument 'a'.")
def testOutTypeList(self):
with ops.Graph().as_default():
out, = self._lib.apply_op("OutTypeList", T=[dtypes.float32], name="x")
self.assertEqual(dtypes.float32, out.dtype)
self.assertProtoEquals("""
name: 'x' op: 'OutTypeList'
attr { key: 'T' value { list { type: DT_FLOAT } } }
""", out.op.node_def)
out1, out2 = self._lib.apply_op("OutTypeList",
T=[dtypes.int32, dtypes.bool],
name="w")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertProtoEquals("""
name: 'w' op: 'OutTypeList'
attr { key: 'T' value { list { type: DT_INT32 type: DT_BOOL } } }
""", out1.op.node_def)
out = self._lib.apply_op("OutTypeList", T=[], name="empty")
self.assertEqual([], out)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("OutTypeList", T=dtypes.int32)
self.assertEqual(str(cm.exception), "Expected list for attr T")
def testTypeListRestrict(self):
with ops.Graph().as_default():
op = self._lib.apply_op("TypeListRestrict", a=["foo", False], name="v")
self.assertProtoEquals("""
name: 'v' op: 'TypeListRestrict' input: 'v/a_0' input: 'v/a_1'
attr { key: 'T' value { list { type: DT_STRING type: DT_BOOL } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("TypeListRestrict", a=[True, 12])
self.assertEqual(str(cm.exception),
"Value passed to parameter 'a' has DataType int32 "
"not in list of allowed values: string, bool")
def testOutTypeListRestrict(self):
with ops.Graph().as_default():
out1, out2 = self._lib.apply_op("OutTypeListRestrict",
t=[dtypes.bool, dtypes.string],
name="u")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.string, out2.dtype)
self.assertProtoEquals("""
name: 'u' op: 'OutTypeListRestrict'
attr { key: 't' value { list { type: DT_BOOL type: DT_STRING } } }
""", out1.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("OutTypeListRestrict",
t=[dtypes.string, dtypes.int32])
self.assertEqual(str(cm.exception),
"Value passed to parameter 't' has DataType int32 "
"not in list of allowed values: string, bool")
def testAttr(self):
with ops.Graph().as_default():
op = self._lib.apply_op("Attr", a=12, name="t")
self.assertProtoEquals("""
name: 't' op: 'Attr' attr { key: 'a' value { i: 12 } }
""", op.node_def)
op = self._lib.apply_op("Attr", a=tensor_shape.Dimension(13), name="u")
self.assertProtoEquals("""
name: 'u' op: 'Attr' attr { key: 'a' value { i: 13 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr", a="bad")
self.assertEqual(str(cm.exception),
"Expected int for argument 'a' not 'bad'.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr", a=[12])
self.assertEqual(str(cm.exception),
"Expected int for argument 'a' not [12].")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr", a=None)
self.assertEqual(str(cm.exception),
"Expected int for argument 'a' not None.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("Attr")
self.assertEqual(str(cm.exception), "No argument for attr a")
def testAttrFloat(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrFloat", a=1.2, name="t")
self.assertProtoEquals("""
name: 't' op: 'AttrFloat' attr { key: 'a' value { f: 1.2 } }
""", op.node_def)
op = self._lib.apply_op("AttrFloat", a=12, name="u")
self.assertProtoEquals("""
name: 'u' op: 'AttrFloat' attr { key: 'a' value { f: 12 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrFloat", a="bad")
self.assertEqual(str(cm.exception),
"Expected float for argument 'a' not 'bad'.")
def testAttrBool(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrBool", a=True, name="t")
self.assertProtoEquals("""
name: 't' op: 'AttrBool' attr { key: 'a' value { b: true } }
""", op.node_def)
op = self._lib.apply_op("AttrBool", a=False, name="u")
self.assertProtoEquals("""
name: 'u' op: 'AttrBool' attr { key: 'a' value { b: false } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBool", a=0)
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not 0.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBool", a=1)
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not 1.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBool", a=[])
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not [].")
def testAttrBoolList(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrBoolList", a=[True, False, True], name="t")
self.assertProtoEquals("""
name: 't' op: 'AttrBoolList'
attr { key: 'a' value { list { b: true b: false b:true } } }
""", op.node_def)
op = self._lib.apply_op("AttrBoolList", a=[], name="u")
self.assertProtoEquals("""
name: 'u' op: 'AttrBoolList' attr { key: 'a' value { list { } } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("AttrBoolList", a=[0])
self.assertEqual(str(cm.exception),
"Expected bool for argument 'a' not 0.")
def testAttrMin(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrMin", a=12, name="s")
self.assertProtoEquals("""
name: 's' op: 'AttrMin' attr { key: 'a' value { i: 12 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrMin", a=2)
self.assertEqual(str(cm.exception),
"Attr 'a' of 'AttrMin' Op passed 2 less than minimum 5.")
def testAttrListMin(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrListMin", a=[1, 2], name="r")
self.assertProtoEquals("""
name: 'r' op: 'AttrListMin'
attr { key: 'a' value { list { i: 1 i: 2 } } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrListMin", a=[17])
self.assertEqual(str(cm.exception),
"Attr 'a' of 'AttrListMin' Op "
"passed list of length 1 less than minimum 2.")
def testAttrEnum(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrEnum", a="oranges", name="e")
self.assertProtoEquals("""
name: 'e' op: 'AttrEnum' attr { key: 'a' value { s: 'oranges' } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrEnum", a="invalid")
self.assertEqual(str(cm.exception),
'Attr \'a\' of \'AttrEnum\' Op '
'passed string \'invalid\' not in: '
'"apples", "oranges".')
def testAttrEnumList(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrEnumList", a=["oranges", "apples"], name="f")
self.assertProtoEquals("""
name: 'f' op: 'AttrEnumList'
attr { key: 'a' value { list { s: 'oranges' s: 'apples' } } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("AttrEnumList", a=["apples", "invalid", "oranges"])
self.assertEqual(str(cm.exception),
'Attr \'a\' of \'AttrEnumList\' Op '
'passed string \'invalid\' not '
'in: "apples", "oranges".')
def testAttrShape(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrShape", a=[5], name="s1")
self.assertProtoEquals("""
name: 's1' op: 'AttrShape'
attr { key: 'a' value { shape { dim { size: 5 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrShape", a=(4, 3, 2), name="s2")
self.assertProtoEquals("""
name: 's2' op: 'AttrShape'
attr { key: 'a' value {
shape { dim { size: 4 } dim { size: 3 } dim { size: 2 } } } }
""", op.node_def)
op = self._lib.apply_op(
"AttrShape", a=tensor_shape.TensorShape([3, 2]), name="s3")
self.assertProtoEquals("""
name: 's3' op: 'AttrShape'
attr { key: 'a' value {
shape { dim { size: 3 } dim { size: 2 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrShape", a=[], name="s4")
self.assertProtoEquals("""
name: 's4' op: 'AttrShape' attr { key: 'a' value { shape { } } }
""", op.node_def)
shape = tensor_shape_pb2.TensorShapeProto()
shape.dim.add().size = 6
shape.dim.add().size = 3
op = self._lib.apply_op("AttrShape", a=shape, name="s5")
self.assertProtoEquals("""
name: 's5' op: 'AttrShape'
attr { key: 'a' value { shape { dim { size: 6 } dim { size: 3 } } } }
""", op.node_def)
# TODO(josh11b): Re-enable this test once we stop promoting scalars to
# shapes.
# with self.assertRaises(TypeError) as cm:
# self._lib.apply_op("AttrShape", a=5)
# self.assertEqual(str(cm.exception),
# "Don't know how to convert 5 to a TensorShapeProto for"
# " argument 'a'")
with self.assertRaises(TypeError):
self._lib.apply_op("AttrShape", a="ABC")
def testAttrShapeList(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrShapeList", a=[[3, 2], [6, 5, 4]], name="sl")
self.assertProtoEquals("""
name: 'sl' op: 'AttrShapeList'
attr { key: 'a' value { list {
shape { dim { size: 3 } dim { size: 2 } }
shape { dim { size: 6 } dim { size: 5 } dim { size: 4 } } } } }
""", op.node_def)
op = self._lib.apply_op("AttrShapeList", a=[], name="esl")
self.assertProtoEquals("""
name: 'esl' op: 'AttrShapeList' attr { key: 'a' value { list { } } }
""", op.node_def)
def testAttrPartialShape(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrPartialShape", a=[5], name="s1")
self.assertProtoEquals("""
name: 's1' op: 'AttrPartialShape'
attr { key: 'a' value { shape { dim { size: 5 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrPartialShape", a=(4, None, 2), name="s2")
self.assertProtoEquals("""
name: 's2' op: 'AttrPartialShape'
attr { key: 'a' value {
shape { dim { size: 4 } dim { size: -1 } dim { size: 2 } } } }
""", op.node_def)
op = self._lib.apply_op(
"AttrPartialShape", a=tensor_shape.TensorShape([3, None]), name="s3")
self.assertProtoEquals("""
name: 's3' op: 'AttrPartialShape'
attr { key: 'a' value {
shape { dim { size: 3 } dim { size: -1 } } } }
""", op.node_def)
op = self._lib.apply_op("AttrPartialShape", a=[], name="s4")
self.assertProtoEquals("""
name: 's4' op: 'AttrPartialShape'
attr { key: 'a' value { shape { } } }
""", op.node_def)
shape = tensor_shape_pb2.TensorShapeProto()
shape.dim.add().size = -1
shape.dim.add().size = 3
op = self._lib.apply_op("AttrPartialShape", a=shape, name="s5")
self.assertProtoEquals("""
name: 's5' op: 'AttrPartialShape'
attr { key: 'a' value {
shape { dim { size: -1 } dim { size: 3 } } } }
""", op.node_def)
# TODO(ebrevdo): Re-enable once we stop promoting scalars to shapes.
# with self.assertRaises(TypeError) as cm:
# self._lib.apply_op("AttrPartialShape", a=5)
# self.assertEqual(str(cm.exception),
# "Don't know how to convert 5 to a TensorShapeProto for"
# " argument 'a'")
with self.assertRaises(TypeError):
self._lib.apply_op("AttrPartialShape", a="ABC")
def testAttrPartialShapeList(self):
with ops.Graph().as_default():
op = self._lib.apply_op(
"AttrPartialShapeList", a=[[3, 2], [6, None, 4]], name="sl")
self.assertProtoEquals("""
name: 'sl' op: 'AttrPartialShapeList'
attr { key: 'a' value { list {
shape { dim { size: 3 } dim { size: 2 } }
shape { dim { size: 6 } dim { size: -1 } dim { size: 4 } } } } }
""", op.node_def)
op = self._lib.apply_op("AttrPartialShapeList", a=[], name="esl")
self.assertProtoEquals("""
name: 'esl' op: 'AttrPartialShapeList' attr {
key: 'a' value { list { } } }
""", op.node_def)
def testAttrDefault(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrDefault", a=None, name="d")
self.assertProtoEquals("""
name: 'd' op: 'AttrDefault' attr { key: 'a' value { s: 'banana' } }
""", op.node_def)
op = self._lib.apply_op("AttrDefault", a="kiwi", name="c")
self.assertProtoEquals("""
name: 'c' op: 'AttrDefault' attr { key: 'a' value { s: 'kiwi' } }
""", op.node_def)
def testAttrListDefault(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrListDefault", a=None, name="b")
self.assertProtoEquals("""
name: 'b' op: 'AttrListDefault'
attr { key: 'a' value { list { i: 5 i: 15 } } }
""", op.node_def)
op = self._lib.apply_op("AttrListDefault", a=[3], name="a")
self.assertProtoEquals("""
name: 'a' op: 'AttrListDefault'
attr { key: 'a' value { list { i: 3 } } }
""", op.node_def)
op = self._lib.apply_op("AttrListDefault", a=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'AttrListDefault'
attr { key: 'a' value { list { } } }
""", op.node_def)
def testAttrEmptyListDefault(self):
with ops.Graph().as_default():
op = self._lib.apply_op("AttrEmptyListDefault", a=None, name="b")
self.assertProtoEquals("""
name: 'b' op: 'AttrEmptyListDefault'
attr { key: 'a' value { list { } } }
""", op.node_def)
op = self._lib.apply_op("AttrEmptyListDefault", a=[3], name="a")
self.assertProtoEquals("""
name: 'a' op: 'AttrEmptyListDefault'
attr { key: 'a' value { list { f: 3 } } }
""", op.node_def)
op = self._lib.apply_op("AttrEmptyListDefault", a=[], name="empty")
self.assertProtoEquals("""
name: 'empty' op: 'AttrEmptyListDefault'
attr { key: 'a' value { list { } } }
""", op.node_def)
def testReservedAttr(self):
with ops.Graph().as_default():
op = self._lib.apply_op("ReservedAttr", range_=7, name="x")
self.assertProtoEquals("""
name: 'x' op: 'ReservedAttr' attr { key: 'range' value { i: 7 } }
""", op.node_def)
def testDefaultAttrType(self):
with ops.Graph().as_default():
# Give an input whose type has no obvious output type.
op = self._lib.apply_op("AttrTypeDefault", a=[], name="n")
self.assertProtoEquals("""
name: 'n' op: 'AttrTypeDefault' input: 'n/a'
attr { key: 'T' value { type: DT_INT32 } }
""", op.node_def)
# Give an input whose type can be inferred as different
# than the default.
op = self._lib.apply_op("AttrTypeDefault", a=[1.0], name="f")
self.assertProtoEquals("""
name: 'f' op: 'AttrTypeDefault' input: 'f/a'
attr { key: 'T' value { type: DT_FLOAT } }
""", op.node_def)
def testDefaultListAttrType(self):
with ops.Graph().as_default():
# Give an input whose type can be inferred as different
# than the default.
op = self._lib.apply_op("AttrListTypeDefault", a=[1.0], b=[2.0], name="n")
self.assertProtoEquals("""
name: 'n' op: 'AttrListTypeDefault' input: 'n/a_0' input: 'n/b_0'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'N' value { i: 1 } }
""", op.node_def)
def testNIntsIn(self):
with ops.Graph().as_default():
op = self._lib.apply_op("NIntsIn", a=[1, 2], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NIntsIn' input: 'n/a_0' input: 'n/a_1'
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NIntsIn", a=[5, 4, 3, 2, 1], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NIntsIn'
input: 'o/a_0' input: 'o/a_1' input: 'o/a_2' input: 'o/a_3' input: 'o/a_4'
attr { key: 'N' value { i: 5 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn", a=["foo", "bar"])
self.assertEqual(
str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op have types "
"[string, string] that do not match expected type int32.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn",
a=[self.Tensor(dtypes.string),
self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op have "
"types [string, string] that do not match expected type "
"int32.")
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NIntsIn", a=[99])
self.assertEqual(str(cm.exception),
"List argument 'a' to 'NIntsIn' Op "
"with length 1 shorter than "
"minimum length 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn", a=[38, "bar"])
self.assertEqual(
str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op have types "
"[int32, string] that do not match expected type int32.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn",
a=[self.Tensor(dtypes.int32),
self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NIntsIn' Op "
"have types [int32, string] that do not match expected "
"type int32.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsIn", a=17)
self.assertStartsWith(str(cm.exception),
"Expected list for 'a' argument "
"to 'NIntsIn' Op, not ")
def testNPolymorphicIn(self):
with ops.Graph().as_default():
op = self._lib.apply_op("NPolymorphicIn", a=[1, 2], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NPolymorphicIn' input: 'n/a_0' input: 'n/a_1'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn", a=[5, 4, 3, 2, 1], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NPolymorphicIn'
input: 'o/a_0' input: 'o/a_1' input: 'o/a_2' input: 'o/a_3' input: 'o/a_4'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 5 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn", a=["foo", "bar"], name="p")
self.assertProtoEquals("""
name: 'p' op: 'NPolymorphicIn' input: 'p/a_0' input: 'p/a_1'
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn",
a=[1, self.Tensor(dtypes.float32, name="x")],
name="q")
self.assertProtoEquals("""
name: 'q' op: 'NPolymorphicIn' input: 'q/a_0' input: 'x'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicIn",
a=[self.Tensor(dtypes.float32, name="y"),
self.Tensor(dtypes.float32_ref, name="z")],
name="r")
self.assertProtoEquals("""
name: 'r' op: 'NPolymorphicIn' input: 'y' input: 'z'
attr { key: 'T' value { type: DT_FLOAT } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[99])
self.assertEqual(str(cm.exception),
"List argument 'a' to 'NPolymorphicIn' Op with length 1 "
"shorter than minimum length 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[38, "bar"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [int32, string] that don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[38, self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [int32, string] that don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=[38, None])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [int32, <NOT CONVERTIBLE TO TENSOR>] that "
"don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn",
a=["abcd", self.Tensor(dtypes.int32)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'a' of 'NPolymorphicIn' Op "
"have types [string, int32] that don't all match.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicIn", a=17)
self.assertStartsWith(str(cm.exception),
"Expected list for 'a' argument "
"to 'NPolymorphicIn' Op, not ")
def testNPolymorphicRestrictIn(self):
with ops.Graph().as_default():
op = self._lib.apply_op("NPolymorphicRestrictIn", a=["foo", "bar"],
name="p")
self.assertProtoEquals("""
name: 'p' op: 'NPolymorphicRestrictIn' input: 'p/a_0' input: 'p/a_1'
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NPolymorphicRestrictIn",
a=[False, True, False],
name="b")
self.assertProtoEquals("""
name: 'b' op: 'NPolymorphicRestrictIn'
input: 'b/a_0' input: 'b/a_1' input: 'b/a_2'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 3 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicRestrictIn", a=[1, 2])
self.assertEqual(
str(cm.exception),
"Value passed to parameter 'a' has DataType int32 not in "
"list of allowed values: string, bool")
def testNInTwice(self):
with ops.Graph().as_default():
op = self._lib.apply_op("NInTwice", a=[1, 2], b=["one", "two"], name="n")
self.assertProtoEquals("""
name: 'n' op: 'NInTwice'
input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NInTwice", a=[], b=[], name="o")
self.assertProtoEquals("""
name: 'o' op: 'NInTwice' attr { key: 'N' value { i: 0 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NInTwice", a=[1, 2, 3], b=["too short"])
self.assertEqual(str(cm.exception),
"List argument 'b' to 'NInTwice' Op "
"with length 1 must match "
"length 3 of argument 'a'.")
def testNInPolymorphicTwice(self):
with ops.Graph().as_default():
op = self._lib.apply_op("NInPolymorphicTwice", a=[1, 2], b=[3, 4],
name="n")
self.assertProtoEquals("""
name: 'n' op: 'NInPolymorphicTwice'
input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NInPolymorphicTwice", a=[1, 2, 3], b=[5])
self.assertEqual(str(cm.exception),
"List argument 'b' to 'NInPolymorphicTwice' Op "
"with length 1 "
"must match length 3 of argument 'a'.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NInPolymorphicTwice", a=[1, 2], b=["one", "two"])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of 'NInPolymorphicTwice' "
"Op have types [string, string] that do not match type "
"int32 inferred from earlier arguments.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NInPolymorphicTwice",
a=[self.Tensor(dtypes.int32)],
b=[self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of "
"'NInPolymorphicTwice' Op have types [string] that do "
"not match type int32 inferred from earlier arguments.")
def testNInTwoTypeVariables(self):
with ops.Graph().as_default():
op = self._lib.apply_op("NInTwoTypeVariables",
a=[1, 2],
b=[True, False],
name="n")
self.assertProtoEquals("""
name: 'n' op: 'NInTwoTypeVariables'
input: 'n/a_0' input: 'n/a_1' input: 'n/b_0' input: 'n/b_1'
attr { key: 'S' value { type: DT_INT32 } }
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NInTwoTypeVariables", a=[1, 2], b=[3, 4],
name="o")
self.assertProtoEquals("""
name: 'o' op: 'NInTwoTypeVariables'
input: 'o/a_0' input: 'o/a_1' input: 'o/b_0' input: 'o/b_1'
attr { key: 'S' value { type: DT_INT32 } }
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", op.node_def)
op = self._lib.apply_op("NInTwoTypeVariables",
a=[self.Tensor(dtypes.int32, name="q")],
b=[self.Tensor(dtypes.string, name="r")],
name="p")
self.assertProtoEquals("""
name: 'p' op: 'NInTwoTypeVariables' input: 'q' input: 'r'
attr { key: 'S' value { type: DT_INT32 } }
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 1 } }
""", op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NInTwoTypeVariables", a=[1, 2, 3], b=["5"])
self.assertEqual(str(cm.exception),
"List argument 'b' to 'NInTwoTypeVariables' Op "
"with length 1 "
"must match length 3 of argument 'a'.")
def testInPolymorphicTwice(self):
with ops.Graph().as_default():
op = self._lib.apply_op("InPolymorphicTwice", a=[8], b=[3, 4, 5],
name="n")
self.assertProtoEquals("""
name: 'n' op: 'InPolymorphicTwice'
input: 'n/a_0' input: 'n/b_0' input: 'n/b_1' input: 'n/b_2'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 1 } }
attr { key: 'M' value { i: 3 } }
""", op.node_def)
op = self._lib.apply_op("InPolymorphicTwice", a=[8], b=[], name="o")
self.assertProtoEquals("""
name: 'o' op: 'InPolymorphicTwice' input: 'o/a_0'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 1 } }
attr { key: 'M' value { i: 0 } }
""", op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("InPolymorphicTwice", a=[], b=[3, 4, 5])
self.assertEqual(str(cm.exception),
"Don't know how to infer type variable from empty input "
"list passed to input 'a' of 'InPolymorphicTwice' Op.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("InPolymorphicTwice", a=[1, 2], b=["one", "two"])
self.assertEqual(
str(cm.exception),
"Tensors in list passed to 'b' of 'InPolymorphicTwice' Op "
"have types [string, string] that do not match type int32 "
"inferred from earlier arguments.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("InPolymorphicTwice",
a=[self.Tensor(dtypes.int32)],
b=[self.Tensor(dtypes.string)])
self.assertEqual(str(cm.exception),
"Tensors in list passed to 'b' of 'InPolymorphicTwice' "
"Op have types [string] that do not match type int32 "
"inferred from earlier arguments.")
def testNIntsOut(self):
with ops.Graph().as_default():
out1, out2 = self._lib.apply_op("NIntsOut", N=2, name="n")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 'n' op: 'NIntsOut' attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3, out4, out5 = self._lib.apply_op(
"NIntsOut", N=5, name="o")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertEqual(dtypes.int32, out3.dtype)
self.assertEqual(dtypes.int32, out4.dtype)
self.assertEqual(dtypes.int32, out5.dtype)
self.assertProtoEquals("""
name: 'o' op: 'NIntsOut' attr { key: 'N' value { i: 5 } }
""", out5.op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NIntsOut", N=1)
self.assertEqual(
str(cm.exception),
"Attr 'N' of 'NIntsOut' Op passed 1 less than minimum 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NIntsOut", N=[3])
self.assertEqual(str(cm.exception),
"Expected int for argument 'N' not [3].")
def testNIntsOutDefault(self):
with ops.Graph().as_default():
out1, out2, out3 = self._lib.apply_op(
"NIntsOutDefault", N=None, name="z")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertEqual(dtypes.int32, out3.dtype)
self.assertProtoEquals("""
name: 'z' op: 'NIntsOutDefault' attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
out1, out2 = self._lib.apply_op("NIntsOutDefault", N=2, name="y")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 'y' op: 'NIntsOutDefault' attr { key: 'N' value { i: 2 } }
""", out2.op.node_def)
def testNPolymorphicOut(self):
with ops.Graph().as_default():
out1, out2 = self._lib.apply_op("NPolymorphicOut",
N=2,
T=dtypes.int32,
name="n")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 'n' op: 'NPolymorphicOut'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicOut", T=dtypes.string, N=3, name="o")
self.assertEqual(dtypes.string, out1.dtype)
self.assertEqual(dtypes.string, out2.dtype)
self.assertEqual(dtypes.string, out3.dtype)
self.assertProtoEquals("""
name: 'o' op: 'NPolymorphicOut'
attr { key: 'T' value { type: DT_STRING } }
attr { key: 'N' value { i: 3 } }
""", out3.op.node_def)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("NPolymorphicOut", N=1, T=dtypes.string)
self.assertEqual(str(cm.exception),
"Attr 'N' of 'NPolymorphicOut' Op "
"passed 1 less than minimum 2.")
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicOut", N=3, T=[dtypes.string])
self.assertEqual(
str(cm.exception),
"Expected DataType for argument 'T' not [tf.string].")
def testNPolymorphicOutDefault(self):
with ops.Graph().as_default():
out1, out2 = self._lib.apply_op(
"NPolymorphicOutDefault", N=None, T=None, name="r")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertProtoEquals("""
name: 'r' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicOutDefault", N=3, T=None, name="s")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertEqual(dtypes.bool, out3.dtype)
self.assertProtoEquals("""
name: 's' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
out1, out2 = self._lib.apply_op(
"NPolymorphicOutDefault", N=None, T=dtypes.int32, name="t")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertProtoEquals("""
name: 't' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 2 } }
""", out1.op.node_def)
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicOutDefault", N=3, T=dtypes.int32, name="u")
self.assertEqual(dtypes.int32, out1.dtype)
self.assertEqual(dtypes.int32, out2.dtype)
self.assertEqual(dtypes.int32, out3.dtype)
self.assertProtoEquals("""
name: 'u' op: 'NPolymorphicOutDefault'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
def testNPolymorphicRestrictOut(self):
with ops.Graph().as_default():
out1, out2, out3 = self._lib.apply_op(
"NPolymorphicRestrictOut", N=3, T=dtypes.bool, name="u")
self.assertEqual(dtypes.bool, out1.dtype)
self.assertEqual(dtypes.bool, out2.dtype)
self.assertEqual(dtypes.bool, out3.dtype)
self.assertProtoEquals("""
name: 'u' op: 'NPolymorphicRestrictOut'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: 'N' value { i: 3 } }
""", out1.op.node_def)
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("NPolymorphicRestrictOut", N=2, T=dtypes.int32)
self.assertEqual(str(cm.exception),
"Value passed to parameter 'T' has DataType int32 "
"not in list of allowed values: string, bool")
def testRef(self):
with ops.Graph().as_default():
out = self._lib.apply_op("RefOut", T=dtypes.bool, name="o")
self.assertEqual(dtypes.bool_ref, out.dtype)
self.assertProtoEquals("""
name: 'o' op: 'RefOut'
attr { key: 'T' value { type: DT_BOOL } }
""", out.op.node_def)
op = self._lib.apply_op("RefIn", a=out, name="i")
self.assertProtoEquals("""
name: 'i' op: 'RefIn' input: 'o'
attr { key: 'T' value { type: DT_BOOL } }
attr { key: "_class" value { list { s: "loc:@o" } } }
""", op.node_def)
# Can pass ref to non-ref input.
out = self._lib.apply_op("RefOut", T=dtypes.int32, name="r")
out = self._lib.apply_op("Simple", a=out, name="s")
self.assertProtoEquals("""
name: 's' op: 'Simple' input: 'r'
""", out.op.node_def)
# Can't pass non-ref to ref input.
with self.assertRaises(TypeError) as cm:
self._lib.apply_op("RefIn", a=2)
self.assertEqual(
str(cm.exception),
"'RefIn' Op requires that input 'a' be a mutable tensor " +
"(e.g.: a tf.Variable)")
input_a = self._lib.apply_op("RefOut", T=dtypes.int32, name="t")
input_b = self._lib.apply_op("RefOut", T=dtypes.int32, name="u")
op = self._lib.apply_op("TwoRefsIn", a=input_a, b=input_b, name="v")
# NOTE(mrry): The order of colocation constraints is an implementation
# detail.
self.assertProtoEquals("""
name: 'v' op: 'TwoRefsIn' input: 't' input: 'u'
attr { key: 'T' value { type: DT_INT32 } }
attr { key: "_class" value { list { s: "loc:@t" s: "loc:@u" } } }
""", op.node_def)
def testSpecifyDevice(self):
graph = ops.Graph()
with graph.as_default():
with graph.device("/job:ADevice"):
self._lib.apply_op("Simple", a=3)
# We look at the whole graph here to make sure the Const op is also given
# the specified device.
graph_def = graph.as_graph_def()
self.assertEqual(len(graph_def.node), 2)
for node in graph_def.node:
self.assertDeviceEqual(node.device, "/job:ADevice")
def testStructuredOutputSingleList(self):
with ops.Graph().as_default():
for n_a in [0, 1, 3]:
a = self._lib.apply_op("SimpleStruct", n_a=n_a)
self.assertTrue(isinstance(a, list))
self.assertEqual(n_a, len(a))
def testStructuredOutputListAndSingle(self):
with ops.Graph().as_default():
for n_a in [0, 1, 3]:
a, b = self._lib.apply_op("MixedStruct", n_a=n_a)
self.assertTrue(isinstance(a, list))
self.assertEqual(n_a, len(a))
self.assertTrue(all(x.dtype == dtypes.int32 for x in a))
self.assertTrue(isinstance(b, ops.Tensor))
self.assertEqual(dtypes.float32, b.dtype)
def testStructuredOutputMultipleLists(self):
with ops.Graph().as_default():
for n_a in [0, 1, 3]:
for n_b in [0, 1, 3]:
for t_c in [[],
[dtypes.int32],
[dtypes.int32, dtypes.float32]]:
a, b, c = self._lib.apply_op("ComplexStruct",
n_a=n_a,
n_b=n_b,
t_c=t_c)
self.assertEqual(n_a, len(a))
self.assertTrue(all(x.dtype == dtypes.int32 for x in a))
self.assertEqual(n_b, len(b))
self.assertTrue(all(x.dtype == dtypes.int64 for x in b))
self.assertEqual(t_c, [x.dtype for x in c])
class OpDefLibraryGraphTest(test_util.TensorFlowTestCase):
def setUp(self):
self._lib = test_ops._op_def_lib
def _add_op(self, ascii): # pylint: disable=redefined-builtin
op_def = op_def_pb2.OpDef()
text_format.Merge(ascii, op_def)
self._lib.add_op(op_def)
def testNoGraph(self):
out = self._lib.apply_op("Simple", a=3)
self.assertEqual(out.graph, ops.get_default_graph())
def testDefaultGraph(self):
graph = ops.Graph()
with graph.as_default():
out = self._lib.apply_op("Simple", a=3)
self.assertEqual(out.graph, graph)
def testDifferentGraphFails(self):
with ops.Graph().as_default():
a = self._lib.apply_op("Simple", a=3)
with ops.Graph().as_default():
b = self._lib.apply_op("Simple", a=4)
with self.assertRaises(ValueError) as cm:
self._lib.apply_op("Binary", a=a, b=b)
self.assertTrue("must be from the same graph" in str(cm.exception))
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
neiudemo1/django | tests/template_tests/syntax_tests/test_load.py | 475 | 3378 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from ..utils import setup
class LoadTagTests(SimpleTestCase):
libraries = {
'subpackage.echo': 'template_tests.templatetags.subpackage.echo',
'testtags': 'template_tests.templatetags.testtags',
}
@setup({'load01': '{% load testtags subpackage.echo %}{% echo test %} {% echo2 "test" %}'})
def test_load01(self):
output = self.engine.render_to_string('load01')
self.assertEqual(output, 'test test')
@setup({'load02': '{% load subpackage.echo %}{% echo2 "test" %}'})
def test_load02(self):
output = self.engine.render_to_string('load02')
self.assertEqual(output, 'test')
# {% load %} tag, importing individual tags
@setup({'load03': '{% load echo from testtags %}{% echo this that theother %}'})
def test_load03(self):
output = self.engine.render_to_string('load03')
self.assertEqual(output, 'this that theother')
@setup({'load04': '{% load echo other_echo from testtags %}'
'{% echo this that theother %} {% other_echo and another thing %}'})
def test_load04(self):
output = self.engine.render_to_string('load04')
self.assertEqual(output, 'this that theother and another thing')
@setup({'load05': '{% load echo upper from testtags %}'
'{% echo this that theother %} {{ statement|upper }}'})
def test_load05(self):
output = self.engine.render_to_string('load05', {'statement': 'not shouting'})
self.assertEqual(output, 'this that theother NOT SHOUTING')
@setup({'load06': '{% load echo2 from subpackage.echo %}{% echo2 "test" %}'})
def test_load06(self):
output = self.engine.render_to_string('load06')
self.assertEqual(output, 'test')
# {% load %} tag errors
@setup({'load07': '{% load echo other_echo bad_tag from testtags %}'})
def test_load07(self):
msg = "'bad_tag' is not a valid tag or filter in tag library 'testtags'"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.get_template('load07')
@setup({'load08': '{% load echo other_echo bad_tag from %}'})
def test_load08(self):
msg = "'echo' is not a registered tag library. Must be one of:\nsubpackage.echo\ntesttags"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.get_template('load08')
@setup({'load09': '{% load from testtags %}'})
def test_load09(self):
msg = "'from' is not a registered tag library. Must be one of:\nsubpackage.echo\ntesttags"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.get_template('load09')
@setup({'load10': '{% load echo from bad_library %}'})
def test_load10(self):
msg = "'bad_library' is not a registered tag library. Must be one of:\nsubpackage.echo\ntesttags"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.get_template('load10')
@setup({'load12': '{% load subpackage.missing %}'})
def test_load12(self):
msg = "'subpackage.missing' is not a registered tag library. Must be one of:\nsubpackage.echo\ntesttags"
with self.assertRaisesMessage(TemplateSyntaxError, msg):
self.engine.get_template('load12')
| bsd-3-clause |
bestvibes/neo4j-social-network | mac_env/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/euctwfreq.py | 3133 | 34872 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# EUCTW frequency table
# Converted from big5 work
# by Taiwan's Mandarin Promotion Council
# <http:#www.edu.tw:81/mandr/>
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Idea Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
EUCTW_TYPICAL_DISTRIBUTION_RATIO = 0.75
# Char to FreqOrder table ,
EUCTW_TABLE_SIZE = 8102
EUCTWCharToFreqOrder = (
1,1800,1506, 255,1431, 198, 9, 82, 6,7310, 177, 202,3615,1256,2808, 110, # 2742
3735, 33,3241, 261, 76, 44,2113, 16,2931,2184,1176, 659,3868, 26,3404,2643, # 2758
1198,3869,3313,4060, 410,2211, 302, 590, 361,1963, 8, 204, 58,4296,7311,1931, # 2774
63,7312,7313, 317,1614, 75, 222, 159,4061,2412,1480,7314,3500,3068, 224,2809, # 2790
3616, 3, 10,3870,1471, 29,2774,1135,2852,1939, 873, 130,3242,1123, 312,7315, # 2806
4297,2051, 507, 252, 682,7316, 142,1914, 124, 206,2932, 34,3501,3173, 64, 604, # 2822
7317,2494,1976,1977, 155,1990, 645, 641,1606,7318,3405, 337, 72, 406,7319, 80, # 2838
630, 238,3174,1509, 263, 939,1092,2644, 756,1440,1094,3406, 449, 69,2969, 591, # 2854
179,2095, 471, 115,2034,1843, 60, 50,2970, 134, 806,1868, 734,2035,3407, 180, # 2870
995,1607, 156, 537,2893, 688,7320, 319,1305, 779,2144, 514,2374, 298,4298, 359, # 2886
2495, 90,2707,1338, 663, 11, 906,1099,2545, 20,2436, 182, 532,1716,7321, 732, # 2902
1376,4062,1311,1420,3175, 25,2312,1056, 113, 399, 382,1949, 242,3408,2467, 529, # 2918
3243, 475,1447,3617,7322, 117, 21, 656, 810,1297,2295,2329,3502,7323, 126,4063, # 2934
706, 456, 150, 613,4299, 71,1118,2036,4064, 145,3069, 85, 835, 486,2114,1246, # 2950
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,7324,2127,2354, 347,3736, 221, # 2966
3503,3110,7325,1955,1153,4065, 83, 296,1199,3070, 192, 624, 93,7326, 822,1897, # 2982
2810,3111, 795,2064, 991,1554,1542,1592, 27, 43,2853, 859, 139,1456, 860,4300, # 2998
437, 712,3871, 164,2392,3112, 695, 211,3017,2096, 195,3872,1608,3504,3505,3618, # 3014
3873, 234, 811,2971,2097,3874,2229,1441,3506,1615,2375, 668,2076,1638, 305, 228, # 3030
1664,4301, 467, 415,7327, 262,2098,1593, 239, 108, 300, 200,1033, 512,1247,2077, # 3046
7328,7329,2173,3176,3619,2673, 593, 845,1062,3244, 88,1723,2037,3875,1950, 212, # 3062
266, 152, 149, 468,1898,4066,4302, 77, 187,7330,3018, 37, 5,2972,7331,3876, # 3078
7332,7333, 39,2517,4303,2894,3177,2078, 55, 148, 74,4304, 545, 483,1474,1029, # 3094
1665, 217,1869,1531,3113,1104,2645,4067, 24, 172,3507, 900,3877,3508,3509,4305, # 3110
32,1408,2811,1312, 329, 487,2355,2247,2708, 784,2674, 4,3019,3314,1427,1788, # 3126
188, 109, 499,7334,3620,1717,1789, 888,1217,3020,4306,7335,3510,7336,3315,1520, # 3142
3621,3878, 196,1034, 775,7337,7338, 929,1815, 249, 439, 38,7339,1063,7340, 794, # 3158
3879,1435,2296, 46, 178,3245,2065,7341,2376,7342, 214,1709,4307, 804, 35, 707, # 3174
324,3622,1601,2546, 140, 459,4068,7343,7344,1365, 839, 272, 978,2257,2572,3409, # 3190
2128,1363,3623,1423, 697, 100,3071, 48, 70,1231, 495,3114,2193,7345,1294,7346, # 3206
2079, 462, 586,1042,3246, 853, 256, 988, 185,2377,3410,1698, 434,1084,7347,3411, # 3222
314,2615,2775,4308,2330,2331, 569,2280, 637,1816,2518, 757,1162,1878,1616,3412, # 3238
287,1577,2115, 768,4309,1671,2854,3511,2519,1321,3737, 909,2413,7348,4069, 933, # 3254
3738,7349,2052,2356,1222,4310, 765,2414,1322, 786,4311,7350,1919,1462,1677,2895, # 3270
1699,7351,4312,1424,2437,3115,3624,2590,3316,1774,1940,3413,3880,4070, 309,1369, # 3286
1130,2812, 364,2230,1653,1299,3881,3512,3882,3883,2646, 525,1085,3021, 902,2000, # 3302
1475, 964,4313, 421,1844,1415,1057,2281, 940,1364,3116, 376,4314,4315,1381, 7, # 3318
2520, 983,2378, 336,1710,2675,1845, 321,3414, 559,1131,3022,2742,1808,1132,1313, # 3334
265,1481,1857,7352, 352,1203,2813,3247, 167,1089, 420,2814, 776, 792,1724,3513, # 3350
4071,2438,3248,7353,4072,7354, 446, 229, 333,2743, 901,3739,1200,1557,4316,2647, # 3366
1920, 395,2744,2676,3740,4073,1835, 125, 916,3178,2616,4317,7355,7356,3741,7357, # 3382
7358,7359,4318,3117,3625,1133,2547,1757,3415,1510,2313,1409,3514,7360,2145, 438, # 3398
2591,2896,2379,3317,1068, 958,3023, 461, 311,2855,2677,4074,1915,3179,4075,1978, # 3414
383, 750,2745,2617,4076, 274, 539, 385,1278,1442,7361,1154,1964, 384, 561, 210, # 3430
98,1295,2548,3515,7362,1711,2415,1482,3416,3884,2897,1257, 129,7363,3742, 642, # 3446
523,2776,2777,2648,7364, 141,2231,1333, 68, 176, 441, 876, 907,4077, 603,2592, # 3462
710, 171,3417, 404, 549, 18,3118,2393,1410,3626,1666,7365,3516,4319,2898,4320, # 3478
7366,2973, 368,7367, 146, 366, 99, 871,3627,1543, 748, 807,1586,1185, 22,2258, # 3494
379,3743,3180,7368,3181, 505,1941,2618,1991,1382,2314,7369, 380,2357, 218, 702, # 3510
1817,1248,3418,3024,3517,3318,3249,7370,2974,3628, 930,3250,3744,7371, 59,7372, # 3526
585, 601,4078, 497,3419,1112,1314,4321,1801,7373,1223,1472,2174,7374, 749,1836, # 3542
690,1899,3745,1772,3885,1476, 429,1043,1790,2232,2116, 917,4079, 447,1086,1629, # 3558
7375, 556,7376,7377,2020,1654, 844,1090, 105, 550, 966,1758,2815,1008,1782, 686, # 3574
1095,7378,2282, 793,1602,7379,3518,2593,4322,4080,2933,2297,4323,3746, 980,2496, # 3590
544, 353, 527,4324, 908,2678,2899,7380, 381,2619,1942,1348,7381,1341,1252, 560, # 3606
3072,7382,3420,2856,7383,2053, 973, 886,2080, 143,4325,7384,7385, 157,3886, 496, # 3622
4081, 57, 840, 540,2038,4326,4327,3421,2117,1445, 970,2259,1748,1965,2081,4082, # 3638
3119,1234,1775,3251,2816,3629, 773,1206,2129,1066,2039,1326,3887,1738,1725,4083, # 3654
279,3120, 51,1544,2594, 423,1578,2130,2066, 173,4328,1879,7386,7387,1583, 264, # 3670
610,3630,4329,2439, 280, 154,7388,7389,7390,1739, 338,1282,3073, 693,2857,1411, # 3686
1074,3747,2440,7391,4330,7392,7393,1240, 952,2394,7394,2900,1538,2679, 685,1483, # 3702
4084,2468,1436, 953,4085,2054,4331, 671,2395, 79,4086,2441,3252, 608, 567,2680, # 3718
3422,4087,4088,1691, 393,1261,1791,2396,7395,4332,7396,7397,7398,7399,1383,1672, # 3734
3748,3182,1464, 522,1119, 661,1150, 216, 675,4333,3888,1432,3519, 609,4334,2681, # 3750
2397,7400,7401,7402,4089,3025, 0,7403,2469, 315, 231,2442, 301,3319,4335,2380, # 3766
7404, 233,4090,3631,1818,4336,4337,7405, 96,1776,1315,2082,7406, 257,7407,1809, # 3782
3632,2709,1139,1819,4091,2021,1124,2163,2778,1777,2649,7408,3074, 363,1655,3183, # 3798
7409,2975,7410,7411,7412,3889,1567,3890, 718, 103,3184, 849,1443, 341,3320,2934, # 3814
1484,7413,1712, 127, 67, 339,4092,2398, 679,1412, 821,7414,7415, 834, 738, 351, # 3830
2976,2146, 846, 235,1497,1880, 418,1992,3749,2710, 186,1100,2147,2746,3520,1545, # 3846
1355,2935,2858,1377, 583,3891,4093,2573,2977,7416,1298,3633,1078,2549,3634,2358, # 3862
78,3750,3751, 267,1289,2099,2001,1594,4094, 348, 369,1274,2194,2175,1837,4338, # 3878
1820,2817,3635,2747,2283,2002,4339,2936,2748, 144,3321, 882,4340,3892,2749,3423, # 3894
4341,2901,7417,4095,1726, 320,7418,3893,3026, 788,2978,7419,2818,1773,1327,2859, # 3910
3894,2819,7420,1306,4342,2003,1700,3752,3521,2359,2650, 787,2022, 506, 824,3636, # 3926
534, 323,4343,1044,3322,2023,1900, 946,3424,7421,1778,1500,1678,7422,1881,4344, # 3942
165, 243,4345,3637,2521, 123, 683,4096, 764,4346, 36,3895,1792, 589,2902, 816, # 3958
626,1667,3027,2233,1639,1555,1622,3753,3896,7423,3897,2860,1370,1228,1932, 891, # 3974
2083,2903, 304,4097,7424, 292,2979,2711,3522, 691,2100,4098,1115,4347, 118, 662, # 3990
7425, 611,1156, 854,2381,1316,2861, 2, 386, 515,2904,7426,7427,3253, 868,2234, # 4006
1486, 855,2651, 785,2212,3028,7428,1040,3185,3523,7429,3121, 448,7430,1525,7431, # 4022
2164,4348,7432,3754,7433,4099,2820,3524,3122, 503, 818,3898,3123,1568, 814, 676, # 4038
1444, 306,1749,7434,3755,1416,1030, 197,1428, 805,2821,1501,4349,7435,7436,7437, # 4054
1993,7438,4350,7439,7440,2195, 13,2779,3638,2980,3124,1229,1916,7441,3756,2131, # 4070
7442,4100,4351,2399,3525,7443,2213,1511,1727,1120,7444,7445, 646,3757,2443, 307, # 4086
7446,7447,1595,3186,7448,7449,7450,3639,1113,1356,3899,1465,2522,2523,7451, 519, # 4102
7452, 128,2132, 92,2284,1979,7453,3900,1512, 342,3125,2196,7454,2780,2214,1980, # 4118
3323,7455, 290,1656,1317, 789, 827,2360,7456,3758,4352, 562, 581,3901,7457, 401, # 4134
4353,2248, 94,4354,1399,2781,7458,1463,2024,4355,3187,1943,7459, 828,1105,4101, # 4150
1262,1394,7460,4102, 605,4356,7461,1783,2862,7462,2822, 819,2101, 578,2197,2937, # 4166
7463,1502, 436,3254,4103,3255,2823,3902,2905,3425,3426,7464,2712,2315,7465,7466, # 4182
2332,2067, 23,4357, 193, 826,3759,2102, 699,1630,4104,3075, 390,1793,1064,3526, # 4198
7467,1579,3076,3077,1400,7468,4105,1838,1640,2863,7469,4358,4359, 137,4106, 598, # 4214
3078,1966, 780, 104, 974,2938,7470, 278, 899, 253, 402, 572, 504, 493,1339,7471, # 4230
3903,1275,4360,2574,2550,7472,3640,3029,3079,2249, 565,1334,2713, 863, 41,7473, # 4246
7474,4361,7475,1657,2333, 19, 463,2750,4107, 606,7476,2981,3256,1087,2084,1323, # 4262
2652,2982,7477,1631,1623,1750,4108,2682,7478,2864, 791,2714,2653,2334, 232,2416, # 4278
7479,2983,1498,7480,2654,2620, 755,1366,3641,3257,3126,2025,1609, 119,1917,3427, # 4294
862,1026,4109,7481,3904,3760,4362,3905,4363,2260,1951,2470,7482,1125, 817,4110, # 4310
4111,3906,1513,1766,2040,1487,4112,3030,3258,2824,3761,3127,7483,7484,1507,7485, # 4326
2683, 733, 40,1632,1106,2865, 345,4113, 841,2524, 230,4364,2984,1846,3259,3428, # 4342
7486,1263, 986,3429,7487, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562,3907, # 4358
3908,2939, 967,2751,2655,1349, 592,2133,1692,3324,2985,1994,4114,1679,3909,1901, # 4374
2185,7488, 739,3642,2715,1296,1290,7489,4115,2198,2199,1921,1563,2595,2551,1870, # 4390
2752,2986,7490, 435,7491, 343,1108, 596, 17,1751,4365,2235,3430,3643,7492,4366, # 4406
294,3527,2940,1693, 477, 979, 281,2041,3528, 643,2042,3644,2621,2782,2261,1031, # 4422
2335,2134,2298,3529,4367, 367,1249,2552,7493,3530,7494,4368,1283,3325,2004, 240, # 4438
1762,3326,4369,4370, 836,1069,3128, 474,7495,2148,2525, 268,3531,7496,3188,1521, # 4454
1284,7497,1658,1546,4116,7498,3532,3533,7499,4117,3327,2684,1685,4118, 961,1673, # 4470
2622, 190,2005,2200,3762,4371,4372,7500, 570,2497,3645,1490,7501,4373,2623,3260, # 4486
1956,4374, 584,1514, 396,1045,1944,7502,4375,1967,2444,7503,7504,4376,3910, 619, # 4502
7505,3129,3261, 215,2006,2783,2553,3189,4377,3190,4378, 763,4119,3763,4379,7506, # 4518
7507,1957,1767,2941,3328,3646,1174, 452,1477,4380,3329,3130,7508,2825,1253,2382, # 4534
2186,1091,2285,4120, 492,7509, 638,1169,1824,2135,1752,3911, 648, 926,1021,1324, # 4550
4381, 520,4382, 997, 847,1007, 892,4383,3764,2262,1871,3647,7510,2400,1784,4384, # 4566
1952,2942,3080,3191,1728,4121,2043,3648,4385,2007,1701,3131,1551, 30,2263,4122, # 4582
7511,2026,4386,3534,7512, 501,7513,4123, 594,3431,2165,1821,3535,3432,3536,3192, # 4598
829,2826,4124,7514,1680,3132,1225,4125,7515,3262,4387,4126,3133,2336,7516,4388, # 4614
4127,7517,3912,3913,7518,1847,2383,2596,3330,7519,4389, 374,3914, 652,4128,4129, # 4630
375,1140, 798,7520,7521,7522,2361,4390,2264, 546,1659, 138,3031,2445,4391,7523, # 4646
2250, 612,1848, 910, 796,3765,1740,1371, 825,3766,3767,7524,2906,2554,7525, 692, # 4662
444,3032,2624, 801,4392,4130,7526,1491, 244,1053,3033,4131,4132, 340,7527,3915, # 4678
1041,2987, 293,1168, 87,1357,7528,1539, 959,7529,2236, 721, 694,4133,3768, 219, # 4694
1478, 644,1417,3331,2656,1413,1401,1335,1389,3916,7530,7531,2988,2362,3134,1825, # 4710
730,1515, 184,2827, 66,4393,7532,1660,2943, 246,3332, 378,1457, 226,3433, 975, # 4726
3917,2944,1264,3537, 674, 696,7533, 163,7534,1141,2417,2166, 713,3538,3333,4394, # 4742
3918,7535,7536,1186, 15,7537,1079,1070,7538,1522,3193,3539, 276,1050,2716, 758, # 4758
1126, 653,2945,3263,7539,2337, 889,3540,3919,3081,2989, 903,1250,4395,3920,3434, # 4774
3541,1342,1681,1718, 766,3264, 286, 89,2946,3649,7540,1713,7541,2597,3334,2990, # 4790
7542,2947,2215,3194,2866,7543,4396,2498,2526, 181, 387,1075,3921, 731,2187,3335, # 4806
7544,3265, 310, 313,3435,2299, 770,4134, 54,3034, 189,4397,3082,3769,3922,7545, # 4822
1230,1617,1849, 355,3542,4135,4398,3336, 111,4136,3650,1350,3135,3436,3035,4137, # 4838
2149,3266,3543,7546,2784,3923,3924,2991, 722,2008,7547,1071, 247,1207,2338,2471, # 4854
1378,4399,2009, 864,1437,1214,4400, 373,3770,1142,2216, 667,4401, 442,2753,2555, # 4870
3771,3925,1968,4138,3267,1839, 837, 170,1107, 934,1336,1882,7548,7549,2118,4139, # 4886
2828, 743,1569,7550,4402,4140, 582,2384,1418,3437,7551,1802,7552, 357,1395,1729, # 4902
3651,3268,2418,1564,2237,7553,3083,3772,1633,4403,1114,2085,4141,1532,7554, 482, # 4918
2446,4404,7555,7556,1492, 833,1466,7557,2717,3544,1641,2829,7558,1526,1272,3652, # 4934
4142,1686,1794, 416,2556,1902,1953,1803,7559,3773,2785,3774,1159,2316,7560,2867, # 4950
4405,1610,1584,3036,2419,2754, 443,3269,1163,3136,7561,7562,3926,7563,4143,2499, # 4966
3037,4406,3927,3137,2103,1647,3545,2010,1872,4144,7564,4145, 431,3438,7565, 250, # 4982
97, 81,4146,7566,1648,1850,1558, 160, 848,7567, 866, 740,1694,7568,2201,2830, # 4998
3195,4147,4407,3653,1687, 950,2472, 426, 469,3196,3654,3655,3928,7569,7570,1188, # 5014
424,1995, 861,3546,4148,3775,2202,2685, 168,1235,3547,4149,7571,2086,1674,4408, # 5030
3337,3270, 220,2557,1009,7572,3776, 670,2992, 332,1208, 717,7573,7574,3548,2447, # 5046
3929,3338,7575, 513,7576,1209,2868,3339,3138,4409,1080,7577,7578,7579,7580,2527, # 5062
3656,3549, 815,1587,3930,3931,7581,3550,3439,3777,1254,4410,1328,3038,1390,3932, # 5078
1741,3933,3778,3934,7582, 236,3779,2448,3271,7583,7584,3657,3780,1273,3781,4411, # 5094
7585, 308,7586,4412, 245,4413,1851,2473,1307,2575, 430, 715,2136,2449,7587, 270, # 5110
199,2869,3935,7588,3551,2718,1753, 761,1754, 725,1661,1840,4414,3440,3658,7589, # 5126
7590, 587, 14,3272, 227,2598, 326, 480,2265, 943,2755,3552, 291, 650,1883,7591, # 5142
1702,1226, 102,1547, 62,3441, 904,4415,3442,1164,4150,7592,7593,1224,1548,2756, # 5158
391, 498,1493,7594,1386,1419,7595,2055,1177,4416, 813, 880,1081,2363, 566,1145, # 5174
4417,2286,1001,1035,2558,2599,2238, 394,1286,7596,7597,2068,7598, 86,1494,1730, # 5190
3936, 491,1588, 745, 897,2948, 843,3340,3937,2757,2870,3273,1768, 998,2217,2069, # 5206
397,1826,1195,1969,3659,2993,3341, 284,7599,3782,2500,2137,2119,1903,7600,3938, # 5222
2150,3939,4151,1036,3443,1904, 114,2559,4152, 209,1527,7601,7602,2949,2831,2625, # 5238
2385,2719,3139, 812,2560,7603,3274,7604,1559, 737,1884,3660,1210, 885, 28,2686, # 5254
3553,3783,7605,4153,1004,1779,4418,7606, 346,1981,2218,2687,4419,3784,1742, 797, # 5270
1642,3940,1933,1072,1384,2151, 896,3941,3275,3661,3197,2871,3554,7607,2561,1958, # 5286
4420,2450,1785,7608,7609,7610,3942,4154,1005,1308,3662,4155,2720,4421,4422,1528, # 5302
2600, 161,1178,4156,1982, 987,4423,1101,4157, 631,3943,1157,3198,2420,1343,1241, # 5318
1016,2239,2562, 372, 877,2339,2501,1160, 555,1934, 911,3944,7611, 466,1170, 169, # 5334
1051,2907,2688,3663,2474,2994,1182,2011,2563,1251,2626,7612, 992,2340,3444,1540, # 5350
2721,1201,2070,2401,1996,2475,7613,4424, 528,1922,2188,1503,1873,1570,2364,3342, # 5366
3276,7614, 557,1073,7615,1827,3445,2087,2266,3140,3039,3084, 767,3085,2786,4425, # 5382
1006,4158,4426,2341,1267,2176,3664,3199, 778,3945,3200,2722,1597,2657,7616,4427, # 5398
7617,3446,7618,7619,7620,3277,2689,1433,3278, 131, 95,1504,3946, 723,4159,3141, # 5414
1841,3555,2758,2189,3947,2027,2104,3665,7621,2995,3948,1218,7622,3343,3201,3949, # 5430
4160,2576, 248,1634,3785, 912,7623,2832,3666,3040,3786, 654, 53,7624,2996,7625, # 5446
1688,4428, 777,3447,1032,3950,1425,7626, 191, 820,2120,2833, 971,4429, 931,3202, # 5462
135, 664, 783,3787,1997, 772,2908,1935,3951,3788,4430,2909,3203, 282,2723, 640, # 5478
1372,3448,1127, 922, 325,3344,7627,7628, 711,2044,7629,7630,3952,2219,2787,1936, # 5494
3953,3345,2220,2251,3789,2300,7631,4431,3790,1258,3279,3954,3204,2138,2950,3955, # 5510
3956,7632,2221, 258,3205,4432, 101,1227,7633,3280,1755,7634,1391,3281,7635,2910, # 5526
2056, 893,7636,7637,7638,1402,4161,2342,7639,7640,3206,3556,7641,7642, 878,1325, # 5542
1780,2788,4433, 259,1385,2577, 744,1183,2267,4434,7643,3957,2502,7644, 684,1024, # 5558
4162,7645, 472,3557,3449,1165,3282,3958,3959, 322,2152, 881, 455,1695,1152,1340, # 5574
660, 554,2153,4435,1058,4436,4163, 830,1065,3346,3960,4437,1923,7646,1703,1918, # 5590
7647, 932,2268, 122,7648,4438, 947, 677,7649,3791,2627, 297,1905,1924,2269,4439, # 5606
2317,3283,7650,7651,4164,7652,4165, 84,4166, 112, 989,7653, 547,1059,3961, 701, # 5622
3558,1019,7654,4167,7655,3450, 942, 639, 457,2301,2451, 993,2951, 407, 851, 494, # 5638
4440,3347, 927,7656,1237,7657,2421,3348, 573,4168, 680, 921,2911,1279,1874, 285, # 5654
790,1448,1983, 719,2167,7658,7659,4441,3962,3963,1649,7660,1541, 563,7661,1077, # 5670
7662,3349,3041,3451, 511,2997,3964,3965,3667,3966,1268,2564,3350,3207,4442,4443, # 5686
7663, 535,1048,1276,1189,2912,2028,3142,1438,1373,2834,2952,1134,2012,7664,4169, # 5702
1238,2578,3086,1259,7665, 700,7666,2953,3143,3668,4170,7667,4171,1146,1875,1906, # 5718
4444,2601,3967, 781,2422, 132,1589, 203, 147, 273,2789,2402, 898,1786,2154,3968, # 5734
3969,7668,3792,2790,7669,7670,4445,4446,7671,3208,7672,1635,3793, 965,7673,1804, # 5750
2690,1516,3559,1121,1082,1329,3284,3970,1449,3794, 65,1128,2835,2913,2759,1590, # 5766
3795,7674,7675, 12,2658, 45, 976,2579,3144,4447, 517,2528,1013,1037,3209,7676, # 5782
3796,2836,7677,3797,7678,3452,7679,2602, 614,1998,2318,3798,3087,2724,2628,7680, # 5798
2580,4172, 599,1269,7681,1810,3669,7682,2691,3088, 759,1060, 489,1805,3351,3285, # 5814
1358,7683,7684,2386,1387,1215,2629,2252, 490,7685,7686,4173,1759,2387,2343,7687, # 5830
4448,3799,1907,3971,2630,1806,3210,4449,3453,3286,2760,2344, 874,7688,7689,3454, # 5846
3670,1858, 91,2914,3671,3042,3800,4450,7690,3145,3972,2659,7691,3455,1202,1403, # 5862
3801,2954,2529,1517,2503,4451,3456,2504,7692,4452,7693,2692,1885,1495,1731,3973, # 5878
2365,4453,7694,2029,7695,7696,3974,2693,1216, 237,2581,4174,2319,3975,3802,4454, # 5894
4455,2694,3560,3457, 445,4456,7697,7698,7699,7700,2761, 61,3976,3672,1822,3977, # 5910
7701, 687,2045, 935, 925, 405,2660, 703,1096,1859,2725,4457,3978,1876,1367,2695, # 5926
3352, 918,2105,1781,2476, 334,3287,1611,1093,4458, 564,3146,3458,3673,3353, 945, # 5942
2631,2057,4459,7702,1925, 872,4175,7703,3459,2696,3089, 349,4176,3674,3979,4460, # 5958
3803,4177,3675,2155,3980,4461,4462,4178,4463,2403,2046, 782,3981, 400, 251,4179, # 5974
1624,7704,7705, 277,3676, 299,1265, 476,1191,3804,2121,4180,4181,1109, 205,7706, # 5990
2582,1000,2156,3561,1860,7707,7708,7709,4464,7710,4465,2565, 107,2477,2157,3982, # 6006
3460,3147,7711,1533, 541,1301, 158, 753,4182,2872,3562,7712,1696, 370,1088,4183, # 6022
4466,3563, 579, 327, 440, 162,2240, 269,1937,1374,3461, 968,3043, 56,1396,3090, # 6038
2106,3288,3354,7713,1926,2158,4467,2998,7714,3564,7715,7716,3677,4468,2478,7717, # 6054
2791,7718,1650,4469,7719,2603,7720,7721,3983,2661,3355,1149,3356,3984,3805,3985, # 6070
7722,1076, 49,7723, 951,3211,3289,3290, 450,2837, 920,7724,1811,2792,2366,4184, # 6086
1908,1138,2367,3806,3462,7725,3212,4470,1909,1147,1518,2423,4471,3807,7726,4472, # 6102
2388,2604, 260,1795,3213,7727,7728,3808,3291, 708,7729,3565,1704,7730,3566,1351, # 6118
1618,3357,2999,1886, 944,4185,3358,4186,3044,3359,4187,7731,3678, 422, 413,1714, # 6134
3292, 500,2058,2345,4188,2479,7732,1344,1910, 954,7733,1668,7734,7735,3986,2404, # 6150
4189,3567,3809,4190,7736,2302,1318,2505,3091, 133,3092,2873,4473, 629, 31,2838, # 6166
2697,3810,4474, 850, 949,4475,3987,2955,1732,2088,4191,1496,1852,7737,3988, 620, # 6182
3214, 981,1242,3679,3360,1619,3680,1643,3293,2139,2452,1970,1719,3463,2168,7738, # 6198
3215,7739,7740,3361,1828,7741,1277,4476,1565,2047,7742,1636,3568,3093,7743, 869, # 6214
2839, 655,3811,3812,3094,3989,3000,3813,1310,3569,4477,7744,7745,7746,1733, 558, # 6230
4478,3681, 335,1549,3045,1756,4192,3682,1945,3464,1829,1291,1192, 470,2726,2107, # 6246
2793, 913,1054,3990,7747,1027,7748,3046,3991,4479, 982,2662,3362,3148,3465,3216, # 6262
3217,1946,2794,7749, 571,4480,7750,1830,7751,3570,2583,1523,2424,7752,2089, 984, # 6278
4481,3683,1959,7753,3684, 852, 923,2795,3466,3685, 969,1519, 999,2048,2320,1705, # 6294
7754,3095, 615,1662, 151, 597,3992,2405,2321,1049, 275,4482,3686,4193, 568,3687, # 6310
3571,2480,4194,3688,7755,2425,2270, 409,3218,7756,1566,2874,3467,1002, 769,2840, # 6326
194,2090,3149,3689,2222,3294,4195, 628,1505,7757,7758,1763,2177,3001,3993, 521, # 6342
1161,2584,1787,2203,2406,4483,3994,1625,4196,4197, 412, 42,3096, 464,7759,2632, # 6358
4484,3363,1760,1571,2875,3468,2530,1219,2204,3814,2633,2140,2368,4485,4486,3295, # 6374
1651,3364,3572,7760,7761,3573,2481,3469,7762,3690,7763,7764,2271,2091, 460,7765, # 6390
4487,7766,3002, 962, 588,3574, 289,3219,2634,1116, 52,7767,3047,1796,7768,7769, # 6406
7770,1467,7771,1598,1143,3691,4198,1984,1734,1067,4488,1280,3365, 465,4489,1572, # 6422
510,7772,1927,2241,1812,1644,3575,7773,4490,3692,7774,7775,2663,1573,1534,7776, # 6438
7777,4199, 536,1807,1761,3470,3815,3150,2635,7778,7779,7780,4491,3471,2915,1911, # 6454
2796,7781,3296,1122, 377,3220,7782, 360,7783,7784,4200,1529, 551,7785,2059,3693, # 6470
1769,2426,7786,2916,4201,3297,3097,2322,2108,2030,4492,1404, 136,1468,1479, 672, # 6486
1171,3221,2303, 271,3151,7787,2762,7788,2049, 678,2727, 865,1947,4493,7789,2013, # 6502
3995,2956,7790,2728,2223,1397,3048,3694,4494,4495,1735,2917,3366,3576,7791,3816, # 6518
509,2841,2453,2876,3817,7792,7793,3152,3153,4496,4202,2531,4497,2304,1166,1010, # 6534
552, 681,1887,7794,7795,2957,2958,3996,1287,1596,1861,3154, 358, 453, 736, 175, # 6550
478,1117, 905,1167,1097,7796,1853,1530,7797,1706,7798,2178,3472,2287,3695,3473, # 6566
3577,4203,2092,4204,7799,3367,1193,2482,4205,1458,2190,2205,1862,1888,1421,3298, # 6582
2918,3049,2179,3474, 595,2122,7800,3997,7801,7802,4206,1707,2636, 223,3696,1359, # 6598
751,3098, 183,3475,7803,2797,3003, 419,2369, 633, 704,3818,2389, 241,7804,7805, # 6614
7806, 838,3004,3697,2272,2763,2454,3819,1938,2050,3998,1309,3099,2242,1181,7807, # 6630
1136,2206,3820,2370,1446,4207,2305,4498,7808,7809,4208,1055,2605, 484,3698,7810, # 6646
3999, 625,4209,2273,3368,1499,4210,4000,7811,4001,4211,3222,2274,2275,3476,7812, # 6662
7813,2764, 808,2606,3699,3369,4002,4212,3100,2532, 526,3370,3821,4213, 955,7814, # 6678
1620,4214,2637,2427,7815,1429,3700,1669,1831, 994, 928,7816,3578,1260,7817,7818, # 6694
7819,1948,2288, 741,2919,1626,4215,2729,2455, 867,1184, 362,3371,1392,7820,7821, # 6710
4003,4216,1770,1736,3223,2920,4499,4500,1928,2698,1459,1158,7822,3050,3372,2877, # 6726
1292,1929,2506,2842,3701,1985,1187,2071,2014,2607,4217,7823,2566,2507,2169,3702, # 6742
2483,3299,7824,3703,4501,7825,7826, 666,1003,3005,1022,3579,4218,7827,4502,1813, # 6758
2253, 574,3822,1603, 295,1535, 705,3823,4219, 283, 858, 417,7828,7829,3224,4503, # 6774
4504,3051,1220,1889,1046,2276,2456,4004,1393,1599, 689,2567, 388,4220,7830,2484, # 6790
802,7831,2798,3824,2060,1405,2254,7832,4505,3825,2109,1052,1345,3225,1585,7833, # 6806
809,7834,7835,7836, 575,2730,3477, 956,1552,1469,1144,2323,7837,2324,1560,2457, # 6822
3580,3226,4005, 616,2207,3155,2180,2289,7838,1832,7839,3478,4506,7840,1319,3704, # 6838
3705,1211,3581,1023,3227,1293,2799,7841,7842,7843,3826, 607,2306,3827, 762,2878, # 6854
1439,4221,1360,7844,1485,3052,7845,4507,1038,4222,1450,2061,2638,4223,1379,4508, # 6870
2585,7846,7847,4224,1352,1414,2325,2921,1172,7848,7849,3828,3829,7850,1797,1451, # 6886
7851,7852,7853,7854,2922,4006,4007,2485,2346, 411,4008,4009,3582,3300,3101,4509, # 6902
1561,2664,1452,4010,1375,7855,7856, 47,2959, 316,7857,1406,1591,2923,3156,7858, # 6918
1025,2141,3102,3157, 354,2731, 884,2224,4225,2407, 508,3706, 726,3583, 996,2428, # 6934
3584, 729,7859, 392,2191,1453,4011,4510,3707,7860,7861,2458,3585,2608,1675,2800, # 6950
919,2347,2960,2348,1270,4511,4012, 73,7862,7863, 647,7864,3228,2843,2255,1550, # 6966
1346,3006,7865,1332, 883,3479,7866,7867,7868,7869,3301,2765,7870,1212, 831,1347, # 6982
4226,4512,2326,3830,1863,3053, 720,3831,4513,4514,3832,7871,4227,7872,7873,4515, # 6998
7874,7875,1798,4516,3708,2609,4517,3586,1645,2371,7876,7877,2924, 669,2208,2665, # 7014
2429,7878,2879,7879,7880,1028,3229,7881,4228,2408,7882,2256,1353,7883,7884,4518, # 7030
3158, 518,7885,4013,7886,4229,1960,7887,2142,4230,7888,7889,3007,2349,2350,3833, # 7046
516,1833,1454,4014,2699,4231,4519,2225,2610,1971,1129,3587,7890,2766,7891,2961, # 7062
1422, 577,1470,3008,1524,3373,7892,7893, 432,4232,3054,3480,7894,2586,1455,2508, # 7078
2226,1972,1175,7895,1020,2732,4015,3481,4520,7896,2733,7897,1743,1361,3055,3482, # 7094
2639,4016,4233,4521,2290, 895, 924,4234,2170, 331,2243,3056, 166,1627,3057,1098, # 7110
7898,1232,2880,2227,3374,4522, 657, 403,1196,2372, 542,3709,3375,1600,4235,3483, # 7126
7899,4523,2767,3230, 576, 530,1362,7900,4524,2533,2666,3710,4017,7901, 842,3834, # 7142
7902,2801,2031,1014,4018, 213,2700,3376, 665, 621,4236,7903,3711,2925,2430,7904, # 7158
2431,3302,3588,3377,7905,4237,2534,4238,4525,3589,1682,4239,3484,1380,7906, 724, # 7174
2277, 600,1670,7907,1337,1233,4526,3103,2244,7908,1621,4527,7909, 651,4240,7910, # 7190
1612,4241,2611,7911,2844,7912,2734,2307,3058,7913, 716,2459,3059, 174,1255,2701, # 7206
4019,3590, 548,1320,1398, 728,4020,1574,7914,1890,1197,3060,4021,7915,3061,3062, # 7222
3712,3591,3713, 747,7916, 635,4242,4528,7917,7918,7919,4243,7920,7921,4529,7922, # 7238
3378,4530,2432, 451,7923,3714,2535,2072,4244,2735,4245,4022,7924,1764,4531,7925, # 7254
4246, 350,7926,2278,2390,2486,7927,4247,4023,2245,1434,4024, 488,4532, 458,4248, # 7270
4025,3715, 771,1330,2391,3835,2568,3159,2159,2409,1553,2667,3160,4249,7928,2487, # 7286
2881,2612,1720,2702,4250,3379,4533,7929,2536,4251,7930,3231,4252,2768,7931,2015, # 7302
2736,7932,1155,1017,3716,3836,7933,3303,2308, 201,1864,4253,1430,7934,4026,7935, # 7318
7936,7937,7938,7939,4254,1604,7940, 414,1865, 371,2587,4534,4535,3485,2016,3104, # 7334
4536,1708, 960,4255, 887, 389,2171,1536,1663,1721,7941,2228,4027,2351,2926,1580, # 7350
7942,7943,7944,1744,7945,2537,4537,4538,7946,4539,7947,2073,7948,7949,3592,3380, # 7366
2882,4256,7950,4257,2640,3381,2802, 673,2703,2460, 709,3486,4028,3593,4258,7951, # 7382
1148, 502, 634,7952,7953,1204,4540,3594,1575,4541,2613,3717,7954,3718,3105, 948, # 7398
3232, 121,1745,3837,1110,7955,4259,3063,2509,3009,4029,3719,1151,1771,3838,1488, # 7414
4030,1986,7956,2433,3487,7957,7958,2093,7959,4260,3839,1213,1407,2803, 531,2737, # 7430
2538,3233,1011,1537,7960,2769,4261,3106,1061,7961,3720,3721,1866,2883,7962,2017, # 7446
120,4262,4263,2062,3595,3234,2309,3840,2668,3382,1954,4542,7963,7964,3488,1047, # 7462
2704,1266,7965,1368,4543,2845, 649,3383,3841,2539,2738,1102,2846,2669,7966,7967, # 7478
1999,7968,1111,3596,2962,7969,2488,3842,3597,2804,1854,3384,3722,7970,7971,3385, # 7494
2410,2884,3304,3235,3598,7972,2569,7973,3599,2805,4031,1460, 856,7974,3600,7975, # 7510
2885,2963,7976,2886,3843,7977,4264, 632,2510, 875,3844,1697,3845,2291,7978,7979, # 7526
4544,3010,1239, 580,4545,4265,7980, 914, 936,2074,1190,4032,1039,2123,7981,7982, # 7542
7983,3386,1473,7984,1354,4266,3846,7985,2172,3064,4033, 915,3305,4267,4268,3306, # 7558
1605,1834,7986,2739, 398,3601,4269,3847,4034, 328,1912,2847,4035,3848,1331,4270, # 7574
3011, 937,4271,7987,3602,4036,4037,3387,2160,4546,3388, 524, 742, 538,3065,1012, # 7590
7988,7989,3849,2461,7990, 658,1103, 225,3850,7991,7992,4547,7993,4548,7994,3236, # 7606
1243,7995,4038, 963,2246,4549,7996,2705,3603,3161,7997,7998,2588,2327,7999,4550, # 7622
8000,8001,8002,3489,3307, 957,3389,2540,2032,1930,2927,2462, 870,2018,3604,1746, # 7638
2770,2771,2434,2463,8003,3851,8004,3723,3107,3724,3490,3390,3725,8005,1179,3066, # 7654
8006,3162,2373,4272,3726,2541,3163,3108,2740,4039,8007,3391,1556,2542,2292, 977, # 7670
2887,2033,4040,1205,3392,8008,1765,3393,3164,2124,1271,1689, 714,4551,3491,8009, # 7686
2328,3852, 533,4273,3605,2181, 617,8010,2464,3308,3492,2310,8011,8012,3165,8013, # 7702
8014,3853,1987, 618, 427,2641,3493,3394,8015,8016,1244,1690,8017,2806,4274,4552, # 7718
8018,3494,8019,8020,2279,1576, 473,3606,4275,3395, 972,8021,3607,8022,3067,8023, # 7734
8024,4553,4554,8025,3727,4041,4042,8026, 153,4555, 356,8027,1891,2888,4276,2143, # 7750
408, 803,2352,8028,3854,8029,4277,1646,2570,2511,4556,4557,3855,8030,3856,4278, # 7766
8031,2411,3396, 752,8032,8033,1961,2964,8034, 746,3012,2465,8035,4279,3728, 698, # 7782
4558,1892,4280,3608,2543,4559,3609,3857,8036,3166,3397,8037,1823,1302,4043,2706, # 7798
3858,1973,4281,8038,4282,3167, 823,1303,1288,1236,2848,3495,4044,3398, 774,3859, # 7814
8039,1581,4560,1304,2849,3860,4561,8040,2435,2161,1083,3237,4283,4045,4284, 344, # 7830
1173, 288,2311, 454,1683,8041,8042,1461,4562,4046,2589,8043,8044,4563, 985, 894, # 7846
8045,3399,3168,8046,1913,2928,3729,1988,8047,2110,1974,8048,4047,8049,2571,1194, # 7862
425,8050,4564,3169,1245,3730,4285,8051,8052,2850,8053, 636,4565,1855,3861, 760, # 7878
1799,8054,4286,2209,1508,4566,4048,1893,1684,2293,8055,8056,8057,4287,4288,2210, # 7894
479,8058,8059, 832,8060,4049,2489,8061,2965,2490,3731, 990,3109, 627,1814,2642, # 7910
4289,1582,4290,2125,2111,3496,4567,8062, 799,4291,3170,8063,4568,2112,1737,3013, # 7926
1018, 543, 754,4292,3309,1676,4569,4570,4050,8064,1489,8065,3497,8066,2614,2889, # 7942
4051,8067,8068,2966,8069,8070,8071,8072,3171,4571,4572,2182,1722,8073,3238,3239, # 7958
1842,3610,1715, 481, 365,1975,1856,8074,8075,1962,2491,4573,8076,2126,3611,3240, # 7974
433,1894,2063,2075,8077, 602,2741,8078,8079,8080,8081,8082,3014,1628,3400,8083, # 7990
3172,4574,4052,2890,4575,2512,8084,2544,2772,8085,8086,8087,3310,4576,2891,8088, # 8006
4577,8089,2851,4578,4579,1221,2967,4053,2513,8090,8091,8092,1867,1989,8093,8094, # 8022
8095,1895,8096,8097,4580,1896,4054, 318,8098,2094,4055,4293,8099,8100, 485,8101, # 8038
938,3862, 553,2670, 116,8102,3863,3612,8103,3498,2671,2773,3401,3311,2807,8104, # 8054
3613,2929,4056,1747,2930,2968,8105,8106, 207,8107,8108,2672,4581,2514,8109,3015, # 8070
890,3614,3864,8110,1877,3732,3402,8111,2183,2353,3403,1652,8112,8113,8114, 941, # 8086
2294, 208,3499,4057,2019, 330,4294,3865,2892,2492,3733,4295,8115,8116,8117,8118, # 8102
#Everything below is of no interest for detection purpose
2515,1613,4582,8119,3312,3866,2516,8120,4058,8121,1637,4059,2466,4583,3867,8122, # 8118
2493,3016,3734,8123,8124,2192,8125,8126,2162,8127,8128,8129,8130,8131,8132,8133, # 8134
8134,8135,8136,8137,8138,8139,8140,8141,8142,8143,8144,8145,8146,8147,8148,8149, # 8150
8150,8151,8152,8153,8154,8155,8156,8157,8158,8159,8160,8161,8162,8163,8164,8165, # 8166
8166,8167,8168,8169,8170,8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181, # 8182
8182,8183,8184,8185,8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197, # 8198
8198,8199,8200,8201,8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213, # 8214
8214,8215,8216,8217,8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229, # 8230
8230,8231,8232,8233,8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245, # 8246
8246,8247,8248,8249,8250,8251,8252,8253,8254,8255,8256,8257,8258,8259,8260,8261, # 8262
8262,8263,8264,8265,8266,8267,8268,8269,8270,8271,8272,8273,8274,8275,8276,8277, # 8278
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,8290,8291,8292,8293, # 8294
8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,8308,8309, # 8310
8310,8311,8312,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322,8323,8324,8325, # 8326
8326,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337,8338,8339,8340,8341, # 8342
8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353,8354,8355,8356,8357, # 8358
8358,8359,8360,8361,8362,8363,8364,8365,8366,8367,8368,8369,8370,8371,8372,8373, # 8374
8374,8375,8376,8377,8378,8379,8380,8381,8382,8383,8384,8385,8386,8387,8388,8389, # 8390
8390,8391,8392,8393,8394,8395,8396,8397,8398,8399,8400,8401,8402,8403,8404,8405, # 8406
8406,8407,8408,8409,8410,8411,8412,8413,8414,8415,8416,8417,8418,8419,8420,8421, # 8422
8422,8423,8424,8425,8426,8427,8428,8429,8430,8431,8432,8433,8434,8435,8436,8437, # 8438
8438,8439,8440,8441,8442,8443,8444,8445,8446,8447,8448,8449,8450,8451,8452,8453, # 8454
8454,8455,8456,8457,8458,8459,8460,8461,8462,8463,8464,8465,8466,8467,8468,8469, # 8470
8470,8471,8472,8473,8474,8475,8476,8477,8478,8479,8480,8481,8482,8483,8484,8485, # 8486
8486,8487,8488,8489,8490,8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501, # 8502
8502,8503,8504,8505,8506,8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517, # 8518
8518,8519,8520,8521,8522,8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533, # 8534
8534,8535,8536,8537,8538,8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549, # 8550
8550,8551,8552,8553,8554,8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,8565, # 8566
8566,8567,8568,8569,8570,8571,8572,8573,8574,8575,8576,8577,8578,8579,8580,8581, # 8582
8582,8583,8584,8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597, # 8598
8598,8599,8600,8601,8602,8603,8604,8605,8606,8607,8608,8609,8610,8611,8612,8613, # 8614
8614,8615,8616,8617,8618,8619,8620,8621,8622,8623,8624,8625,8626,8627,8628,8629, # 8630
8630,8631,8632,8633,8634,8635,8636,8637,8638,8639,8640,8641,8642,8643,8644,8645, # 8646
8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,8657,8658,8659,8660,8661, # 8662
8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672,8673,8674,8675,8676,8677, # 8678
8678,8679,8680,8681,8682,8683,8684,8685,8686,8687,8688,8689,8690,8691,8692,8693, # 8694
8694,8695,8696,8697,8698,8699,8700,8701,8702,8703,8704,8705,8706,8707,8708,8709, # 8710
8710,8711,8712,8713,8714,8715,8716,8717,8718,8719,8720,8721,8722,8723,8724,8725, # 8726
8726,8727,8728,8729,8730,8731,8732,8733,8734,8735,8736,8737,8738,8739,8740,8741) # 8742
# flake8: noqa
| mit |
kubeflow/kfp-tekton | components/gcp/container/component_sdk/python/kfp_component/google/dataproc/_submit_hadoop_job.py | 1 | 2945 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._submit_job import submit_job
def submit_hadoop_job(project_id, region, cluster_name, job_id_output_path,
main_jar_file_uri=None, main_class=None, args=[], hadoop_job={}, job={},
wait_interval=30):
"""Submits a Cloud Dataproc job for running Apache Hadoop MapReduce jobs
on Apache Hadoop YARN.
Args:
project_id (str): Required. The ID of the Google Cloud Platform project
that the cluster belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the
request.
cluster_name (str): Required. The cluster to run the job.
main_jar_file_uri (str): The HCFS URI of the jar file containing the main
class. Examples:
`gs://foo-bucket/analytics-binaries/extract-useful-metrics-mr.jar`
`hdfs:/tmp/test-samples/custom-wordcount.jar`
`file:///home/usr/lib/hadoop-mapreduce/hadoop-mapreduce-examples.jar`
main_class (str): The name of the driver's main class. The jar file
containing the class must be in the default CLASSPATH or specified
in `jarFileUris`.
args (list): Optional. The arguments to pass to the driver. Do not include
arguments, such as -libjars or -Dfoo=bar, that can be set as job properties,
since a collision may occur that causes an incorrect job submission.
hadoop_job (dict): Optional. The full payload of a [hadoop job](
https://cloud.google.com/dataproc/docs/reference/rest/v1/HadoopJob).
job (dict): Optional. The full payload of a [Dataproc job](
https://cloud.google.com/dataproc/docs/reference/rest/v1/projects.regions.jobs).
wait_interval (int): The wait seconds between polling the operation.
Defaults to 30s.
job_id_output_path (str): Path for the ID of the created job
Returns:
The created job payload.
"""
if not hadoop_job:
hadoop_job = {}
if not job:
job = {}
if main_jar_file_uri:
hadoop_job['mainJarFileUri'] = main_jar_file_uri
if main_class:
hadoop_job['mainClass'] = main_class
if args:
hadoop_job['args'] = args
job['hadoopJob'] = hadoop_job
return submit_job(project_id, region, cluster_name, job, wait_interval, job_id_output_path=job_id_output_path) | apache-2.0 |
agriggio/pysmt | setup.py | 1 | 2121 | from setuptools import setup, find_packages
import pysmt
long_description=\
"""============================================================
pySMT: A library for SMT formulae manipulation and solving
============================================================
pySMT makes working with Satisfiability Modulo Theory simple.
Among others, you can:
* Define formulae in a solver independent way in a simple and
inutitive way,
* Write ad-hoc simplifiers and operators,
* Dump your problems in the SMT-Lib format,
* Solve them using one of the native solvers, or by wrapping any
SMT-Lib complaint solver.
Supported Theories and Solvers
==============================
pySMT provides methods to define a formula in Linear Real Arithmetic (LRA),
Real Difference Logic (RDL), their combination (LIRA),
Equalities and Uninterpreted Functions (EUF), Bit-Vectors (BV), and Arrays (A).
The following solvers are supported through native APIs:
* MathSAT (http://mathsat.fbk.eu/)
* Z3 (https://github.com/Z3Prover/z3/)
* CVC4 (http://cvc4.cs.nyu.edu/web/)
* Yices 2 (http://yices.csl.sri.com/)
* CUDD (http://vlsi.colorado.edu/~fabio/CUDD/)
* PicoSAT (http://fmv.jku.at/picosat/)
* Boolector (http://fmv.jku.at/boolector/)
Additionally, you can use any SMT-LIB 2 compliant solver.
PySMT assumes that the python bindings for the SMT Solver are installed and
accessible from your PYTHONPATH.
pySMT works on both Python 3 and Python 2.
Wanna know more?
================
Visit http://www.pysmt.org
"""
setup(
name='PySMT',
version=pysmt.__version__,
author='PySMT Team',
author_email='[email protected]',
packages = find_packages(),
include_package_data = True,
url='http://www.pysmt.org',
license='APACHE',
description='A solver-agnostic library for SMT Formulae manipulation and solving',
long_description=long_description,
install_requires=["six"],
entry_points={
'console_scripts': [
'pysmt = pysmt.cmd.shell:main',
'pysmt-shell = pysmt.cmd.shell:main_interactive',
'pysmt-install = pysmt.cmd.install:main',
],
},
)
| apache-2.0 |
weiqiangdragonite/blog_tmp | python/flask/microblog/app/forms.py | 2 | 1204 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask_wtf import Form
from wtforms import TextField, PasswordField, BooleanField, SubmitField, TextAreaField
from wtforms.validators import Required, Length
class LoginForm(Form):
openid = TextField("openid", validators = [Required()])
# password = PasswordField("password", validators = [Required()])
remember_me = BooleanField("remember_me", default = False)
submit = SubmitField("sigin")
class EditForm(Form):
nickname = TextField("nickname", validators = [Required()])
about_me = TextAreaField("about_me", validators = [Length(min = 0, max = 140)])
def __init__(self, original_nickname, *args, **kwargs):
Form.__init__(self, *args, **kwargs)
self.original_nickname = original_nickname
def validate(self):
if not Form.validate(self):
return False
if self.nickname.data == self.original_nickname:
return True
user = User.query.filter_by(nickname = self.nickname.data).first()
if user != None:
self.nickname.errors.append("This nickname is already in use. Please choose another one.")
return False
return True
| gpl-2.0 |
trademob/boto | boto/rds/vpcsecuritygroupmembership.py | 177 | 3131 | # Copyright (c) 2013 Anthony Tonns http://www.corsis.com/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Represents a VPCSecurityGroupMembership
"""
class VPCSecurityGroupMembership(object):
"""
Represents VPC Security Group that this RDS database is a member of
Properties reference available from the AWS documentation at
http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/\
API_VpcSecurityGroupMembership.html
Example::
pri = "sg-abcdefgh"
sec = "sg-hgfedcba"
# Create with list of str
db = c.create_dbinstance(... vpc_security_groups=[pri], ... )
# Modify with list of str
db.modify(... vpc_security_groups=[pri,sec], ... )
# Create with objects
memberships = []
membership = VPCSecurityGroupMembership()
membership.vpc_group = pri
memberships.append(membership)
db = c.create_dbinstance(... vpc_security_groups=memberships, ... )
# Modify with objects
memberships = d.vpc_security_groups
membership = VPCSecurityGroupMembership()
membership.vpc_group = sec
memberships.append(membership)
db.modify(... vpc_security_groups=memberships, ... )
:ivar connection: :py:class:`boto.rds.RDSConnection` associated with the
current object
:ivar vpc_group: This id of the VPC security group
:ivar status: Status of the VPC security group membership
<boto.ec2.securitygroup.SecurityGroup>` objects that this RDS Instance
is a member of
"""
def __init__(self, connection=None, status=None, vpc_group=None):
self.connection = connection
self.status = status
self.vpc_group = vpc_group
def __repr__(self):
return 'VPCSecurityGroupMembership:%s' % self.vpc_group
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'VpcSecurityGroupId':
self.vpc_group = value
elif name == 'Status':
self.status = value
else:
setattr(self, name, value)
| mit |
dcroc16/skunk_works | google_appengine/lib/django-1.4/django/utils/unittest/loader.py | 353 | 13437 | """Loading unittests."""
import os
import re
import sys
import traceback
import types
import unittest
from fnmatch import fnmatch
from django.utils.unittest import case, suite
try:
from os.path import relpath
except ImportError:
from django.utils.unittest.compatibility import relpath
__unittest = True
def _CmpToKey(mycmp):
'Convert a cmp= function into a key= function'
class K(object):
def __init__(self, obj):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) == -1
return K
# what about .pyc or .pyo (etc)
# we would need to avoid loading the same tests multiple times
# from '.py', '.pyc' *and* '.pyo'
VALID_MODULE_NAME = re.compile(r'[_a-z]\w*\.py$', re.IGNORECASE)
def _make_failed_import_test(name, suiteClass):
message = 'Failed to import test module: %s' % name
if hasattr(traceback, 'format_exc'):
# Python 2.3 compatibility
# format_exc returns two frames of discover.py as well
message += '\n%s' % traceback.format_exc()
return _make_failed_test('ModuleImportFailure', name, ImportError(message),
suiteClass)
def _make_failed_load_tests(name, exception, suiteClass):
return _make_failed_test('LoadTestsFailure', name, exception, suiteClass)
def _make_failed_test(classname, methodname, exception, suiteClass):
def testFailure(self):
raise exception
attrs = {methodname: testFailure}
TestClass = type(classname, (case.TestCase,), attrs)
return suiteClass((TestClass(methodname),))
class TestLoader(unittest.TestLoader):
"""
This class is responsible for loading tests according to various criteria
and returning them wrapped in a TestSuite
"""
testMethodPrefix = 'test'
sortTestMethodsUsing = cmp
suiteClass = suite.TestSuite
_top_level_dir = None
def loadTestsFromTestCase(self, testCaseClass):
"""Return a suite of all tests cases contained in testCaseClass"""
if issubclass(testCaseClass, suite.TestSuite):
raise TypeError("Test cases should not be derived from TestSuite."
" Maybe you meant to derive from TestCase?")
testCaseNames = self.getTestCaseNames(testCaseClass)
if not testCaseNames and hasattr(testCaseClass, 'runTest'):
testCaseNames = ['runTest']
loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
return loaded_suite
def loadTestsFromModule(self, module, use_load_tests=True):
"""Return a suite of all tests cases contained in the given module"""
tests = []
for name in dir(module):
obj = getattr(module, name)
if isinstance(obj, type) and issubclass(obj, unittest.TestCase):
tests.append(self.loadTestsFromTestCase(obj))
load_tests = getattr(module, 'load_tests', None)
tests = self.suiteClass(tests)
if use_load_tests and load_tests is not None:
try:
return load_tests(self, tests, None)
except Exception, e:
return _make_failed_load_tests(module.__name__, e,
self.suiteClass)
return tests
def loadTestsFromName(self, name, module=None):
"""Return a suite of all tests cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.
"""
parts = name.split('.')
if module is None:
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[-1]
if not parts_copy:
raise
parts = parts[1:]
obj = module
for part in parts:
parent, obj = obj, getattr(obj, part)
if isinstance(obj, types.ModuleType):
return self.loadTestsFromModule(obj)
elif isinstance(obj, type) and issubclass(obj, unittest.TestCase):
return self.loadTestsFromTestCase(obj)
elif (isinstance(obj, types.UnboundMethodType) and
isinstance(parent, type) and
issubclass(parent, case.TestCase)):
return self.suiteClass([parent(obj.__name__)])
elif isinstance(obj, unittest.TestSuite):
return obj
elif hasattr(obj, '__call__'):
test = obj()
if isinstance(test, unittest.TestSuite):
return test
elif isinstance(test, unittest.TestCase):
return self.suiteClass([test])
else:
raise TypeError("calling %s returned %s, not a test" %
(obj, test))
else:
raise TypeError("don't know how to make test from: %s" % obj)
def loadTestsFromNames(self, names, module=None):
"""Return a suite of all tests cases found using the given sequence
of string specifiers. See 'loadTestsFromName()'.
"""
suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
def getTestCaseNames(self, testCaseClass):
"""Return a sorted sequence of method names found within testCaseClass
"""
def isTestMethod(attrname, testCaseClass=testCaseClass,
prefix=self.testMethodPrefix):
return attrname.startswith(prefix) and \
hasattr(getattr(testCaseClass, attrname), '__call__')
testFnNames = filter(isTestMethod, dir(testCaseClass))
if self.sortTestMethodsUsing:
testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing))
return testFnNames
def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
"""Find and return all test modules from the specified start
directory, recursing into subdirectories to find them. Only test files
that match the pattern will be loaded. (Using shell style pattern
matching.)
All test modules must be importable from the top level of the project.
If the start directory is not the top level directory then the top
level directory must be specified separately.
If a test package name (directory with '__init__.py') matches the
pattern then the package will be checked for a 'load_tests' function. If
this exists then it will be called with loader, tests, pattern.
If load_tests exists then discovery does *not* recurse into the package,
load_tests is responsible for loading all tests in the package.
The pattern is deliberately not stored as a loader attribute so that
packages can continue discovery themselves. top_level_dir is stored so
load_tests does not need to pass this argument in to loader.discover().
"""
set_implicit_top = False
if top_level_dir is None and self._top_level_dir is not None:
# make top_level_dir optional if called from load_tests in a package
top_level_dir = self._top_level_dir
elif top_level_dir is None:
set_implicit_top = True
top_level_dir = start_dir
top_level_dir = os.path.abspath(top_level_dir)
if not top_level_dir in sys.path:
# all test modules must be importable from the top level directory
# should we *unconditionally* put the start directory in first
# in sys.path to minimise likelihood of conflicts between installed
# modules and development versions?
sys.path.insert(0, top_level_dir)
self._top_level_dir = top_level_dir
is_not_importable = False
if os.path.isdir(os.path.abspath(start_dir)):
start_dir = os.path.abspath(start_dir)
if start_dir != top_level_dir:
is_not_importable = not os.path.isfile(os.path.join(start_dir, '__init__.py'))
else:
# support for discovery from dotted module names
try:
__import__(start_dir)
except ImportError:
is_not_importable = True
else:
the_module = sys.modules[start_dir]
top_part = start_dir.split('.')[0]
start_dir = os.path.abspath(os.path.dirname((the_module.__file__)))
if set_implicit_top:
self._top_level_dir = os.path.abspath(os.path.dirname(os.path.dirname(sys.modules[top_part].__file__)))
sys.path.remove(top_level_dir)
if is_not_importable:
raise ImportError('Start directory is not importable: %r' % start_dir)
tests = list(self._find_tests(start_dir, pattern))
return self.suiteClass(tests)
def _get_name_from_path(self, path):
path = os.path.splitext(os.path.normpath(path))[0]
_relpath = relpath(path, self._top_level_dir)
assert not os.path.isabs(_relpath), "Path must be within the project"
assert not _relpath.startswith('..'), "Path must be within the project"
name = _relpath.replace(os.path.sep, '.')
return name
def _get_module_from_name(self, name):
__import__(name)
return sys.modules[name]
def _match_path(self, path, full_path, pattern):
# override this method to use alternative matching strategy
return fnmatch(path, pattern)
def _find_tests(self, start_dir, pattern):
"""Used by discovery. Yields test suites it loads."""
paths = os.listdir(start_dir)
for path in paths:
full_path = os.path.join(start_dir, path)
if os.path.isfile(full_path):
if not VALID_MODULE_NAME.match(path):
# valid Python identifiers only
continue
if not self._match_path(path, full_path, pattern):
continue
# if the test file matches, load it
name = self._get_name_from_path(full_path)
try:
module = self._get_module_from_name(name)
except:
yield _make_failed_import_test(name, self.suiteClass)
else:
mod_file = os.path.abspath(getattr(module, '__file__', full_path))
realpath = os.path.splitext(mod_file)[0]
fullpath_noext = os.path.splitext(full_path)[0]
if realpath.lower() != fullpath_noext.lower():
module_dir = os.path.dirname(realpath)
mod_name = os.path.splitext(os.path.basename(full_path))[0]
expected_dir = os.path.dirname(full_path)
msg = ("%r module incorrectly imported from %r. Expected %r. "
"Is this module globally installed?")
raise ImportError(msg % (mod_name, module_dir, expected_dir))
yield self.loadTestsFromModule(module)
elif os.path.isdir(full_path):
if not os.path.isfile(os.path.join(full_path, '__init__.py')):
continue
load_tests = None
tests = None
if fnmatch(path, pattern):
# only check load_tests if the package directory itself matches the filter
name = self._get_name_from_path(full_path)
package = self._get_module_from_name(name)
load_tests = getattr(package, 'load_tests', None)
tests = self.loadTestsFromModule(package, use_load_tests=False)
if load_tests is None:
if tests is not None:
# tests loaded from package file
yield tests
# recurse into the package
for test in self._find_tests(full_path, pattern):
yield test
else:
try:
yield load_tests(self, tests, pattern)
except Exception, e:
yield _make_failed_load_tests(package.__name__, e,
self.suiteClass)
defaultTestLoader = TestLoader()
def _makeLoader(prefix, sortUsing, suiteClass=None):
loader = TestLoader()
loader.sortTestMethodsUsing = sortUsing
loader.testMethodPrefix = prefix
if suiteClass:
loader.suiteClass = suiteClass
return loader
def getTestCaseNames(testCaseClass, prefix, sortUsing=cmp):
return _makeLoader(prefix, sortUsing).getTestCaseNames(testCaseClass)
def makeSuite(testCaseClass, prefix='test', sortUsing=cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromTestCase(testCaseClass)
def findTestCases(module, prefix='test', sortUsing=cmp,
suiteClass=suite.TestSuite):
return _makeLoader(prefix, sortUsing, suiteClass).loadTestsFromModule(module)
| mit |
soldag/home-assistant | homeassistant/components/smart_meter_texas/config_flow.py | 10 | 2811 | """Config flow for Smart Meter Texas integration."""
import asyncio
import logging
from aiohttp import ClientError
from smart_meter_texas import Account, Client
from smart_meter_texas.exceptions import (
SmartMeterTexasAPIError,
SmartMeterTexasAuthError,
)
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers import aiohttp_client
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema(
{vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str}
)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
client_session = aiohttp_client.async_get_clientsession(hass)
account = Account(data["username"], data["password"])
client = Client(client_session, account)
try:
await client.authenticate()
except (asyncio.TimeoutError, ClientError, SmartMeterTexasAPIError) as error:
raise CannotConnect from error
except SmartMeterTexasAuthError as error:
raise InvalidAuth(error) from error
# Return info that you want to store in the config entry.
return {"title": account.username}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Smart Meter Texas."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
if not errors:
# Ensure the same account cannot be setup more than once.
await self.async_set_unique_id(user_input[CONF_USERNAME])
self._abort_if_unique_id_configured()
return self.async_create_entry(title=info["title"], data=user_input)
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
| apache-2.0 |
patsissons/Flexget | flexget/plugins/urlrewrite_bakabt.py | 11 | 1604 | from __future__ import unicode_literals, division, absolute_import
import urllib2
import logging
from flexget import plugin
from flexget.event import event
from flexget.plugins.plugin_urlrewriting import UrlRewritingError
from flexget.utils.tools import urlopener
from flexget.utils.soup import get_soup
log = logging.getLogger('bakabt')
class UrlRewriteBakaBT(object):
"""BakaBT urlrewriter."""
# urlrewriter API
def url_rewritable(self, task, entry):
url = entry['url']
if url.startswith('http://www.bakabt.com/download/'):
return False
if url.startswith('http://www.bakabt.com/') or url.startswith('http://bakabt.com/'):
return True
return False
# urlrewriter API
def url_rewrite(self, task, entry):
entry['url'] = self.parse_download_page(entry['url'])
@plugin.internet(log)
def parse_download_page(self, url):
txheaders = {'User-agent': 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'}
req = urllib2.Request(url, None, txheaders)
page = urlopener(req, log)
try:
soup = get_soup(page)
except Exception as e:
raise UrlRewritingError(e)
tag_a = soup.find('a', attrs={'class': 'download_link'})
if not tag_a:
raise UrlRewritingError('Unable to locate download link from url %s' % url)
torrent_url = 'http://www.bakabt.com' + tag_a.get('href')
return torrent_url
@event('plugin.register')
def register_plugin():
plugin.register(UrlRewriteBakaBT, 'bakabt', groups=['urlrewriter'], api_ver=2)
| mit |
AltSchool/django-allauth | test_settings.py | 1 | 5873 | SECRET_KEY = 'psst'
SITE_ID = 1
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
ROOT_URLCONF = 'allauth.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages'
],
},
},
]
MIDDLEWARE = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.agave',
'allauth.socialaccount.providers.amazon',
'allauth.socialaccount.providers.angellist',
'allauth.socialaccount.providers.asana',
'allauth.socialaccount.providers.auth0',
'allauth.socialaccount.providers.authentiq',
'allauth.socialaccount.providers.azure',
'allauth.socialaccount.providers.baidu',
'allauth.socialaccount.providers.basecamp',
'allauth.socialaccount.providers.battlenet',
'allauth.socialaccount.providers.bitbucket',
'allauth.socialaccount.providers.bitbucket_oauth2',
'allauth.socialaccount.providers.bitly',
'allauth.socialaccount.providers.box',
'allauth.socialaccount.providers.cern',
'allauth.socialaccount.providers.coinbase',
'allauth.socialaccount.providers.dataporten',
'allauth.socialaccount.providers.daum',
'allauth.socialaccount.providers.digitalocean',
'allauth.socialaccount.providers.discord',
'allauth.socialaccount.providers.disqus',
'allauth.socialaccount.providers.douban',
'allauth.socialaccount.providers.doximity',
'allauth.socialaccount.providers.draugiem',
'allauth.socialaccount.providers.dropbox',
'allauth.socialaccount.providers.dwolla',
'allauth.socialaccount.providers.edmodo',
'allauth.socialaccount.providers.eveonline',
'allauth.socialaccount.providers.evernote',
'allauth.socialaccount.providers.eventbrite',
'allauth.socialaccount.providers.facebook',
'allauth.socialaccount.providers.fake',
'allauth.socialaccount.providers.feedly',
'allauth.socialaccount.providers.fivehundredpx',
'allauth.socialaccount.providers.flickr',
'allauth.socialaccount.providers.foursquare',
'allauth.socialaccount.providers.fxa',
'allauth.socialaccount.providers.github',
'allauth.socialaccount.providers.gitlab',
'allauth.socialaccount.providers.globus',
'allauth.socialaccount.providers.google',
'allauth.socialaccount.providers.hubic',
'allauth.socialaccount.providers.instagram',
'allauth.socialaccount.providers.jupyterhub',
'allauth.socialaccount.providers.kakao',
'allauth.socialaccount.providers.line',
'allauth.socialaccount.providers.linkedin',
'allauth.socialaccount.providers.linkedin_oauth2',
'allauth.socialaccount.providers.mailchimp',
'allauth.socialaccount.providers.mailru',
'allauth.socialaccount.providers.meetup',
'allauth.socialaccount.providers.microsoft',
'allauth.socialaccount.providers.naver',
'allauth.socialaccount.providers.nextcloud',
'allauth.socialaccount.providers.odnoklassniki',
'allauth.socialaccount.providers.openid',
'allauth.socialaccount.providers.openstreetmap',
'allauth.socialaccount.providers.orcid',
'allauth.socialaccount.providers.patreon',
'allauth.socialaccount.providers.paypal',
'allauth.socialaccount.providers.persona',
'allauth.socialaccount.providers.pinterest',
'allauth.socialaccount.providers.quickbooks',
'allauth.socialaccount.providers.reddit',
'allauth.socialaccount.providers.robinhood',
'allauth.socialaccount.providers.salesforce',
'allauth.socialaccount.providers.sharefile',
'allauth.socialaccount.providers.shopify',
'allauth.socialaccount.providers.slack',
'allauth.socialaccount.providers.soundcloud',
'allauth.socialaccount.providers.spotify',
'allauth.socialaccount.providers.stackexchange',
'allauth.socialaccount.providers.steam',
'allauth.socialaccount.providers.strava',
'allauth.socialaccount.providers.stripe',
'allauth.socialaccount.providers.telegram',
'allauth.socialaccount.providers.trello',
'allauth.socialaccount.providers.tumblr',
'allauth.socialaccount.providers.twentythreeandme',
'allauth.socialaccount.providers.twitch',
'allauth.socialaccount.providers.twitter',
'allauth.socialaccount.providers.untappd',
'allauth.socialaccount.providers.vimeo',
'allauth.socialaccount.providers.vimeo_oauth2',
'allauth.socialaccount.providers.vk',
'allauth.socialaccount.providers.weibo',
'allauth.socialaccount.providers.weixin',
'allauth.socialaccount.providers.windowslive',
'allauth.socialaccount.providers.xing',
'allauth.socialaccount.providers.yahoo',
)
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
STATIC_ROOT = '/tmp/' # Dummy
STATIC_URL = '/static/'
| mit |
Tithen-Firion/youtube-dl | youtube_dl/YoutubeDL.py | 2 | 105297 | #!/usr/bin/env python
# coding: utf-8
from __future__ import absolute_import, unicode_literals
import collections
import contextlib
import copy
import datetime
import errno
import fileinput
import io
import itertools
import json
import locale
import operator
import os
import platform
import re
import shutil
import subprocess
import socket
import sys
import time
import tokenize
import traceback
import random
from .compat import (
compat_basestring,
compat_cookiejar,
compat_get_terminal_size,
compat_http_client,
compat_kwargs,
compat_numeric_types,
compat_os_name,
compat_str,
compat_tokenize_tokenize,
compat_urllib_error,
compat_urllib_request,
compat_urllib_request_DataHandler,
)
from .utils import (
age_restricted,
args_to_str,
ContentTooShortError,
date_from_str,
DateRange,
DEFAULT_OUTTMPL,
determine_ext,
determine_protocol,
DownloadError,
encode_compat_str,
encodeFilename,
error_to_compat_str,
expand_path,
ExtractorError,
format_bytes,
formatSeconds,
GeoRestrictedError,
ISO3166Utils,
locked_file,
make_HTTPS_handler,
MaxDownloadsReached,
PagedList,
parse_filesize,
PerRequestProxyHandler,
platform_name,
PostProcessingError,
preferredencoding,
prepend_extension,
register_socks_protocols,
render_table,
replace_extension,
SameFileError,
sanitize_filename,
sanitize_path,
sanitize_url,
sanitized_Request,
std_headers,
subtitles_filename,
UnavailableVideoError,
url_basename,
version_tuple,
write_json_file,
write_string,
YoutubeDLCookieProcessor,
YoutubeDLHandler,
)
from .cache import Cache
from .extractor import get_info_extractor, gen_extractor_classes, _LAZY_LOADER
from .downloader import get_suitable_downloader
from .downloader.rtmp import rtmpdump_version
from .postprocessor import (
FFmpegFixupM3u8PP,
FFmpegFixupM4aPP,
FFmpegFixupStretchedPP,
FFmpegMergerPP,
FFmpegPostProcessor,
get_postprocessor,
)
from .version import __version__
if compat_os_name == 'nt':
import ctypes
class YoutubeDL(object):
"""YoutubeDL class.
YoutubeDL objects are the ones responsible of downloading the
actual video file and writing it to disk if the user has requested
it, among some other tasks. In most cases there should be one per
program. As, given a video URL, the downloader doesn't know how to
extract all the needed information, task that InfoExtractors do, it
has to pass the URL to one of them.
For this, YoutubeDL objects have a method that allows
InfoExtractors to be registered in a given order. When it is passed
a URL, the YoutubeDL object handles it to the first InfoExtractor it
finds that reports being able to handle it. The InfoExtractor extracts
all the information about the video or videos the URL refers to, and
YoutubeDL process the extracted information, possibly using a File
Downloader to download the video.
YoutubeDL objects accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of
options instead. These options are available through the params
attribute for the InfoExtractors to use. The YoutubeDL also
registers itself as the downloader in charge for the InfoExtractors
that are added to it, so this is a "mutual registration".
Available options:
username: Username for authentication purposes.
password: Password for authentication purposes.
videopassword: Password for accessing a video.
ap_mso: Adobe Pass multiple-system operator identifier.
ap_username: Multiple-system operator account username.
ap_password: Multiple-system operator account password.
usenetrc: Use netrc for authentication instead.
verbose: Print additional info to stdout.
quiet: Do not print messages to stdout.
no_warnings: Do not print out anything for warnings.
forceurl: Force printing final URL.
forcetitle: Force printing title.
forceid: Force printing ID.
forcethumbnail: Force printing thumbnail URL.
forcedescription: Force printing description.
forcefilename: Force printing final filename.
forceduration: Force printing duration.
forcejson: Force printing info_dict as JSON.
dump_single_json: Force printing the info_dict of the whole playlist
(or video) as a single JSON line.
simulate: Do not download the video files.
format: Video format code. See options.py for more information.
outtmpl: Template for output names.
restrictfilenames: Do not allow "&" and spaces in file names
ignoreerrors: Do not stop on download errors.
force_generic_extractor: Force downloader to use the generic extractor
nooverwrites: Prevent overwriting files.
playliststart: Playlist item to start at.
playlistend: Playlist item to end at.
playlist_items: Specific indices of playlist to download.
playlistreverse: Download playlist items in reverse order.
playlistrandom: Download playlist items in random order.
matchtitle: Download only matching titles.
rejecttitle: Reject downloads for matching titles.
logger: Log messages to a logging.Logger instance.
logtostderr: Log messages to stderr instead of stdout.
writedescription: Write the video description to a .description file
writeinfojson: Write the video description to a .info.json file
writeannotations: Write the video annotations to a .annotations.xml file
writethumbnail: Write the thumbnail image to a file
write_all_thumbnails: Write all thumbnail formats to files
writesubtitles: Write the video subtitles to a file
writeautomaticsub: Write the automatically generated subtitles to a file
allsubtitles: Downloads all the subtitles of the video
(requires writesubtitles or writeautomaticsub)
listsubtitles: Lists all available subtitles for the video
subtitlesformat: The format code for subtitles
subtitleslangs: List of languages of the subtitles to download
keepvideo: Keep the video file after post-processing
daterange: A DateRange object, download only if the upload_date is in the range.
skip_download: Skip the actual download of the video file
cachedir: Location of the cache files in the filesystem.
False to disable filesystem cache.
noplaylist: Download single video instead of a playlist if in doubt.
age_limit: An integer representing the user's age in years.
Unsuitable videos for the given age are skipped.
min_views: An integer representing the minimum view count the video
must have in order to not be skipped.
Videos without view count information are always
downloaded. None for no limit.
max_views: An integer representing the maximum view count.
Videos that are more popular than that are not
downloaded.
Videos without view count information are always
downloaded. None for no limit.
download_archive: File name of a file where all downloads are recorded.
Videos already present in the file are not downloaded
again.
cookiefile: File name where cookies should be read from and dumped to.
nocheckcertificate:Do not verify SSL certificates
prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
At the moment, this is only supported by YouTube.
proxy: URL of the proxy server to use
geo_verification_proxy: URL of the proxy to use for IP address verification
on geo-restricted sites. (Experimental)
socket_timeout: Time to wait for unresponsive hosts, in seconds
bidi_workaround: Work around buggy terminals without bidirectional text
support, using fridibi
debug_printtraffic:Print out sent and received HTTP traffic
include_ads: Download ads as well
default_search: Prepend this string if an input url is not valid.
'auto' for elaborate guessing
encoding: Use this encoding instead of the system-specified.
extract_flat: Do not resolve URLs, return the immediate result.
Pass in 'in_playlist' to only show this behavior for
playlist items.
postprocessors: A list of dictionaries, each with an entry
* key: The name of the postprocessor. See
youtube_dl/postprocessor/__init__.py for a list.
as well as any further keyword arguments for the
postprocessor.
progress_hooks: A list of functions that get called on download
progress, with a dictionary with the entries
* status: One of "downloading", "error", or "finished".
Check this first and ignore unknown values.
If status is one of "downloading", or "finished", the
following properties may also be present:
* filename: The final filename (always present)
* tmpfilename: The filename we're currently writing to
* downloaded_bytes: Bytes on disk
* total_bytes: Size of the whole file, None if unknown
* total_bytes_estimate: Guess of the eventual file size,
None if unavailable.
* elapsed: The number of seconds since download started.
* eta: The estimated time in seconds, None if unknown
* speed: The download speed in bytes/second, None if
unknown
* fragment_index: The counter of the currently
downloaded video fragment.
* fragment_count: The number of fragments (= individual
files that will be merged)
Progress hooks are guaranteed to be called at least once
(with status "finished") if the download is successful.
merge_output_format: Extension to use when merging formats.
fixup: Automatically correct known faults of the file.
One of:
- "never": do nothing
- "warn": only emit a warning
- "detect_or_warn": check whether we can do anything
about it, warn otherwise (default)
source_address: (Experimental) Client-side IP address to bind to.
call_home: Boolean, true iff we are allowed to contact the
youtube-dl servers for debugging.
sleep_interval: Number of seconds to sleep before each download when
used alone or a lower bound of a range for randomized
sleep before each download (minimum possible number
of seconds to sleep) when used along with
max_sleep_interval.
max_sleep_interval:Upper bound of a range for randomized sleep before each
download (maximum possible number of seconds to sleep).
Must only be used along with sleep_interval.
Actual sleep time will be a random float from range
[sleep_interval; max_sleep_interval].
listformats: Print an overview of available video formats and exit.
list_thumbnails: Print a table of all thumbnails and exit.
match_filter: A function that gets called with the info_dict of
every video.
If it returns a message, the video is ignored.
If it returns None, the video is downloaded.
match_filter_func in utils.py is one example for this.
no_color: Do not emit color codes in output.
geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
HTTP header (experimental)
geo_bypass_country:
Two-letter ISO 3166-2 country code that will be used for
explicit geographic restriction bypassing via faking
X-Forwarded-For HTTP header (experimental)
The following options determine which downloader is picked:
external_downloader: Executable of the external downloader to call.
None or unset for standard (built-in) downloader.
hls_prefer_native: Use the native HLS downloader instead of ffmpeg/avconv
if True, otherwise use ffmpeg/avconv if False, otherwise
use downloader suggested by extractor if None.
The following parameters are not used by YoutubeDL itself, they are used by
the downloader (see youtube_dl/downloader/common.py):
nopart, updatetime, buffersize, ratelimit, min_filesize, max_filesize, test,
noresizebuffer, retries, continuedl, noprogress, consoletitle,
xattr_set_filesize, external_downloader_args, hls_use_mpegts.
The following options are used by the post processors:
prefer_ffmpeg: If True, use ffmpeg instead of avconv if both are available,
otherwise prefer avconv.
postprocessor_args: A list of additional command-line arguments for the
postprocessor.
"""
params = None
_ies = []
_pps = []
_download_retcode = None
_num_downloads = None
_screen_file = None
def __init__(self, params=None, auto_init=True):
"""Create a FileDownloader object with the given options."""
if params is None:
params = {}
self._ies = []
self._ies_instances = {}
self._pps = []
self._progress_hooks = []
self._download_retcode = 0
self._num_downloads = 0
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
self._err_file = sys.stderr
self.params = {
# Default parameters
'nocheckcertificate': False,
}
self.params.update(params)
self.cache = Cache(self)
def check_deprecated(param, option, suggestion):
if self.params.get(param) is not None:
self.report_warning(
'%s is deprecated. Use %s instead.' % (option, suggestion))
return True
return False
if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
if self.params.get('geo_verification_proxy') is None:
self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
check_deprecated('autonumber_size', '--autonumber-size', 'output template with %(autonumber)0Nd, where N in the number of digits')
check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
if params.get('bidi_workaround', False):
try:
import pty
master, slave = pty.openpty()
width = compat_get_terminal_size().columns
if width is None:
width_args = []
else:
width_args = ['-w', str(width)]
sp_kwargs = dict(
stdin=subprocess.PIPE,
stdout=slave,
stderr=self._err_file)
try:
self._output_process = subprocess.Popen(
['bidiv'] + width_args, **sp_kwargs
)
except OSError:
self._output_process = subprocess.Popen(
['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
self._output_channel = os.fdopen(master, 'rb')
except OSError as ose:
if ose.errno == errno.ENOENT:
self.report_warning('Could not find fribidi executable, ignoring --bidi-workaround . Make sure that fribidi is an executable file in one of the directories in your $PATH.')
else:
raise
if (sys.version_info >= (3,) and sys.platform != 'win32' and
sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968'] and
not params.get('restrictfilenames', False)):
# On Python 3, the Unicode filesystem API will throw errors (#1474)
self.report_warning(
'Assuming --restrict-filenames since file system encoding '
'cannot encode all characters. '
'Set the LC_ALL environment variable to fix this.')
self.params['restrictfilenames'] = True
if isinstance(params.get('outtmpl'), bytes):
self.report_warning(
'Parameter outtmpl is bytes, but should be a unicode string. '
'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
self._setup_opener()
if auto_init:
self.print_debug_header()
self.add_default_info_extractors()
for pp_def_raw in self.params.get('postprocessors', []):
pp_class = get_postprocessor(pp_def_raw['key'])
pp_def = dict(pp_def_raw)
del pp_def['key']
pp = pp_class(self, **compat_kwargs(pp_def))
self.add_post_processor(pp)
for ph in self.params.get('progress_hooks', []):
self.add_progress_hook(ph)
register_socks_protocols()
def warn_if_short_id(self, argv):
# short YouTube ID starting with dash?
idxs = [
i for i, a in enumerate(argv)
if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
if idxs:
correct_argv = (
['youtube-dl'] +
[a for i, a in enumerate(argv) if i not in idxs] +
['--'] + [argv[i] for i in idxs]
)
self.report_warning(
'Long argument string detected. '
'Use -- to separate parameters and URLs, like this:\n%s\n' %
args_to_str(correct_argv))
def add_info_extractor(self, ie):
"""Add an InfoExtractor object to the end of the list."""
self._ies.append(ie)
if not isinstance(ie, type):
self._ies_instances[ie.ie_key()] = ie
ie.set_downloader(self)
def get_info_extractor(self, ie_key):
"""
Get an instance of an IE with name ie_key, it will try to get one from
the _ies list, if there's no instance it will create a new one and add
it to the extractor list.
"""
ie = self._ies_instances.get(ie_key)
if ie is None:
ie = get_info_extractor(ie_key)()
self.add_info_extractor(ie)
return ie
def add_default_info_extractors(self):
"""
Add the InfoExtractors returned by gen_extractors to the end of the list
"""
for ie in gen_extractor_classes():
self.add_info_extractor(ie)
def add_post_processor(self, pp):
"""Add a PostProcessor object to the end of the chain."""
self._pps.append(pp)
pp.set_downloader(self)
def add_progress_hook(self, ph):
"""Add the progress hook (currently only for the file downloader)"""
self._progress_hooks.append(ph)
def _bidi_workaround(self, message):
if not hasattr(self, '_output_channel'):
return message
assert hasattr(self, '_output_process')
assert isinstance(message, compat_str)
line_count = message.count('\n') + 1
self._output_process.stdin.write((message + '\n').encode('utf-8'))
self._output_process.stdin.flush()
res = ''.join(self._output_channel.readline().decode('utf-8')
for _ in range(line_count))
return res[:-len('\n')]
def to_screen(self, message, skip_eol=False):
"""Print message to stdout if not in quiet mode."""
return self.to_stdout(message, skip_eol, check_quiet=True)
def _write_string(self, s, out=None):
write_string(s, out=out, encoding=self.params.get('encoding'))
def to_stdout(self, message, skip_eol=False, check_quiet=False):
"""Print message to stdout if not in quiet mode."""
if self.params.get('logger'):
self.params['logger'].debug(message)
elif not check_quiet or not self.params.get('quiet', False):
message = self._bidi_workaround(message)
terminator = ['\n', ''][skip_eol]
output = message + terminator
self._write_string(output, self._screen_file)
def to_stderr(self, message):
"""Print message to stderr."""
assert isinstance(message, compat_str)
if self.params.get('logger'):
self.params['logger'].error(message)
else:
message = self._bidi_workaround(message)
output = message + '\n'
self._write_string(output, self._err_file)
def to_console_title(self, message):
if not self.params.get('consoletitle', False):
return
if compat_os_name == 'nt' and ctypes.windll.kernel32.GetConsoleWindow():
# c_wchar_p() might not be necessary if `message` is
# already of type unicode()
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
elif 'TERM' in os.environ:
self._write_string('\033]0;%s\007' % message, self._screen_file)
def save_console_title(self):
if not self.params.get('consoletitle', False):
return
if 'TERM' in os.environ:
# Save the title on stack
self._write_string('\033[22;0t', self._screen_file)
def restore_console_title(self):
if not self.params.get('consoletitle', False):
return
if 'TERM' in os.environ:
# Restore the title from stack
self._write_string('\033[23;0t', self._screen_file)
def __enter__(self):
self.save_console_title()
return self
def __exit__(self, *args):
self.restore_console_title()
if self.params.get('cookiefile') is not None:
self.cookiejar.save()
def trouble(self, message=None, tb=None):
"""Determine action to take when a download problem appears.
Depending on if the downloader has been configured to ignore
download errors or not, this method may throw an exception or
not when errors are found, after printing the message.
tb, if given, is additional traceback information.
"""
if message is not None:
self.to_stderr(message)
if self.params.get('verbose'):
if tb is None:
if sys.exc_info()[0]: # if .trouble has been called from an except block
tb = ''
if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
tb += encode_compat_str(traceback.format_exc())
else:
tb_data = traceback.format_list(traceback.extract_stack())
tb = ''.join(tb_data)
self.to_stderr(tb)
if not self.params.get('ignoreerrors', False):
if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
exc_info = sys.exc_info()[1].exc_info
else:
exc_info = sys.exc_info()
raise DownloadError(message, exc_info)
self._download_retcode = 1
def report_warning(self, message):
'''
Print the message to stderr, it will be prefixed with 'WARNING:'
If stderr is a tty file the 'WARNING:' will be colored
'''
if self.params.get('logger') is not None:
self.params['logger'].warning(message)
else:
if self.params.get('no_warnings'):
return
if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
_msg_header = '\033[0;33mWARNING:\033[0m'
else:
_msg_header = 'WARNING:'
warning_message = '%s %s' % (_msg_header, message)
self.to_stderr(warning_message)
def report_error(self, message, tb=None):
'''
Do the same as trouble, but prefixes the message with 'ERROR:', colored
in red if stderr is a tty file.
'''
if not self.params.get('no_color') and self._err_file.isatty() and compat_os_name != 'nt':
_msg_header = '\033[0;31mERROR:\033[0m'
else:
_msg_header = 'ERROR:'
error_message = '%s %s' % (_msg_header, message)
self.trouble(error_message, tb)
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
try:
self.to_screen('[download] %s has already been downloaded' % file_name)
except UnicodeEncodeError:
self.to_screen('[download] The file has already been downloaded')
def prepare_filename(self, info_dict):
"""Generate the output filename."""
try:
template_dict = dict(info_dict)
template_dict['epoch'] = int(time.time())
autonumber_size = self.params.get('autonumber_size')
if autonumber_size is None:
autonumber_size = 5
template_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads
if template_dict.get('resolution') is None:
if template_dict.get('width') and template_dict.get('height'):
template_dict['resolution'] = '%dx%d' % (template_dict['width'], template_dict['height'])
elif template_dict.get('height'):
template_dict['resolution'] = '%sp' % template_dict['height']
elif template_dict.get('width'):
template_dict['resolution'] = '%dx?' % template_dict['width']
sanitize = lambda k, v: sanitize_filename(
compat_str(v),
restricted=self.params.get('restrictfilenames'),
is_id=(k == 'id' or k.endswith('_id')))
template_dict = dict((k, v if isinstance(v, compat_numeric_types) else sanitize(k, v))
for k, v in template_dict.items()
if v is not None and not isinstance(v, (list, tuple, dict)))
template_dict = collections.defaultdict(lambda: 'NA', template_dict)
outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
# For fields playlist_index and autonumber convert all occurrences
# of %(field)s to %(field)0Nd for backward compatibility
field_size_compat_map = {
'playlist_index': len(str(template_dict['n_entries'])),
'autonumber': autonumber_size,
}
FIELD_SIZE_COMPAT_RE = r'(?<!%)%\((?P<field>autonumber|playlist_index)\)s'
mobj = re.search(FIELD_SIZE_COMPAT_RE, outtmpl)
if mobj:
outtmpl = re.sub(
FIELD_SIZE_COMPAT_RE,
r'%%(\1)0%dd' % field_size_compat_map[mobj.group('field')],
outtmpl)
NUMERIC_FIELDS = set((
'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
'timestamp', 'upload_year', 'upload_month', 'upload_day',
'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
'average_rating', 'comment_count', 'age_limit',
'start_time', 'end_time',
'chapter_number', 'season_number', 'episode_number',
'track_number', 'disc_number', 'release_year',
'playlist_index',
))
# Missing numeric fields used together with integer presentation types
# in format specification will break the argument substitution since
# string 'NA' is returned for missing fields. We will patch output
# template for missing fields to meet string presentation type.
for numeric_field in NUMERIC_FIELDS:
if numeric_field not in template_dict:
# As of [1] format syntax is:
# %[mapping_key][conversion_flags][minimum_width][.precision][length_modifier]type
# 1. https://docs.python.org/2/library/stdtypes.html#string-formatting
FORMAT_RE = r'''(?x)
(?<!%)
%
\({0}\) # mapping key
(?:[#0\-+ ]+)? # conversion flags (optional)
(?:\d+)? # minimum field width (optional)
(?:\.\d+)? # precision (optional)
[hlL]? # length modifier (optional)
[diouxXeEfFgGcrs%] # conversion type
'''
outtmpl = re.sub(
FORMAT_RE.format(numeric_field),
r'%({0})s'.format(numeric_field), outtmpl)
filename = expand_path(outtmpl % template_dict)
# Temporary fix for #4787
# 'Treat' all problem characters by passing filename through preferredencoding
# to workaround encoding issues with subprocess on python2 @ Windows
if sys.version_info < (3, 0) and sys.platform == 'win32':
filename = encodeFilename(filename, True).decode(preferredencoding())
return sanitize_path(filename)
except ValueError as err:
self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
return None
def _match_entry(self, info_dict, incomplete):
""" Returns None iff the file should be downloaded """
video_title = info_dict.get('title', info_dict.get('id', 'video'))
if 'title' in info_dict:
# This can happen when we're just evaluating the playlist
title = info_dict['title']
matchtitle = self.params.get('matchtitle', False)
if matchtitle:
if not re.search(matchtitle, title, re.IGNORECASE):
return '"' + title + '" title did not match pattern "' + matchtitle + '"'
rejecttitle = self.params.get('rejecttitle', False)
if rejecttitle:
if re.search(rejecttitle, title, re.IGNORECASE):
return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
date = info_dict.get('upload_date')
if date is not None:
dateRange = self.params.get('daterange', DateRange())
if date not in dateRange:
return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
view_count = info_dict.get('view_count')
if view_count is not None:
min_views = self.params.get('min_views')
if min_views is not None and view_count < min_views:
return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
max_views = self.params.get('max_views')
if max_views is not None and view_count > max_views:
return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
return 'Skipping "%s" because it is age restricted' % video_title
if self.in_download_archive(info_dict):
return '%s has already been recorded in archive' % video_title
if not incomplete:
match_filter = self.params.get('match_filter')
if match_filter is not None:
ret = match_filter(info_dict)
if ret is not None:
return ret
return None
@staticmethod
def add_extra_info(info_dict, extra_info):
'''Set the keys from extra_info in info dict if they are missing'''
for key, value in extra_info.items():
info_dict.setdefault(key, value)
def extract_info(self, url, download=True, ie_key=None, extra_info={},
process=True, force_generic_extractor=False):
'''
Returns a list with a dictionary for each video we find.
If 'download', also downloads the videos.
extra_info is a dict containing the extra values to add to each result
'''
if not ie_key and force_generic_extractor:
ie_key = 'Generic'
if ie_key:
ies = [self.get_info_extractor(ie_key)]
else:
ies = self._ies
for ie in ies:
if not ie.suitable(url):
continue
ie = self.get_info_extractor(ie.ie_key())
if not ie.working():
self.report_warning('The program functionality for this site has been marked as broken, '
'and will probably not work.')
try:
ie_result = ie.extract(url)
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
break
if isinstance(ie_result, list):
# Backwards compatibility: old IE result format
ie_result = {
'_type': 'compat_list',
'entries': ie_result,
}
self.add_default_extra_info(ie_result, ie, url)
if process:
return self.process_ie_result(ie_result, download, extra_info)
else:
return ie_result
except GeoRestrictedError as e:
msg = e.msg
if e.countries:
msg += '\nThis video is available in %s.' % ', '.join(
map(ISO3166Utils.short2full, e.countries))
msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
self.report_error(msg)
break
except ExtractorError as e: # An error we somewhat expected
self.report_error(compat_str(e), e.format_traceback())
break
except MaxDownloadsReached:
raise
except Exception as e:
if self.params.get('ignoreerrors', False):
self.report_error(error_to_compat_str(e), tb=encode_compat_str(traceback.format_exc()))
break
else:
raise
else:
self.report_error('no suitable InfoExtractor for URL %s' % url)
def add_default_extra_info(self, ie_result, ie, url):
self.add_extra_info(ie_result, {
'extractor': ie.IE_NAME,
'webpage_url': url,
'webpage_url_basename': url_basename(url),
'extractor_key': ie.ie_key(),
})
def process_ie_result(self, ie_result, download=True, extra_info={}):
"""
Take the result of the ie(may be modified) and resolve all unresolved
references (URLs, playlist items).
It will also download the videos if 'download'.
Returns the resolved ie_result.
"""
result_type = ie_result.get('_type', 'video')
if result_type in ('url', 'url_transparent'):
ie_result['url'] = sanitize_url(ie_result['url'])
extract_flat = self.params.get('extract_flat', False)
if ((extract_flat == 'in_playlist' and 'playlist' in extra_info) or
extract_flat is True):
if self.params.get('forcejson', False):
self.to_stdout(json.dumps(ie_result))
return ie_result
if result_type == 'video':
self.add_extra_info(ie_result, extra_info)
return self.process_video_result(ie_result, download=download)
elif result_type == 'url':
# We have to add extra_info to the results because it may be
# contained in a playlist
return self.extract_info(ie_result['url'],
download,
ie_key=ie_result.get('ie_key'),
extra_info=extra_info)
elif result_type == 'url_transparent':
# Use the information from the embedding page
info = self.extract_info(
ie_result['url'], ie_key=ie_result.get('ie_key'),
extra_info=extra_info, download=False, process=False)
# extract_info may return None when ignoreerrors is enabled and
# extraction failed with an error, don't crash and return early
# in this case
if not info:
return info
force_properties = dict(
(k, v) for k, v in ie_result.items() if v is not None)
for f in ('_type', 'url', 'ie_key'):
if f in force_properties:
del force_properties[f]
new_result = info.copy()
new_result.update(force_properties)
# Extracted info may not be a video result (i.e.
# info.get('_type', 'video') != video) but rather an url or
# url_transparent. In such cases outer metadata (from ie_result)
# should be propagated to inner one (info). For this to happen
# _type of info should be overridden with url_transparent. This
# fixes issue from https://github.com/rg3/youtube-dl/pull/11163.
if new_result.get('_type') == 'url':
new_result['_type'] = 'url_transparent'
return self.process_ie_result(
new_result, download=download, extra_info=extra_info)
elif result_type in ('playlist', 'multi_video'):
# We process each entry in the playlist
playlist = ie_result.get('title') or ie_result.get('id')
self.to_screen('[download] Downloading playlist: %s' % playlist)
playlist_results = []
playliststart = self.params.get('playliststart', 1) - 1
playlistend = self.params.get('playlistend')
# For backwards compatibility, interpret -1 as whole list
if playlistend == -1:
playlistend = None
playlistitems_str = self.params.get('playlist_items')
playlistitems = None
if playlistitems_str is not None:
def iter_playlistitems(format):
for string_segment in format.split(','):
if '-' in string_segment:
start, end = string_segment.split('-')
for item in range(int(start), int(end) + 1):
yield int(item)
else:
yield int(string_segment)
playlistitems = iter_playlistitems(playlistitems_str)
ie_entries = ie_result['entries']
if isinstance(ie_entries, list):
n_all_entries = len(ie_entries)
if playlistitems:
entries = [
ie_entries[i - 1] for i in playlistitems
if -n_all_entries <= i - 1 < n_all_entries]
else:
entries = ie_entries[playliststart:playlistend]
n_entries = len(entries)
self.to_screen(
'[%s] playlist %s: Collected %d video ids (downloading %d of them)' %
(ie_result['extractor'], playlist, n_all_entries, n_entries))
elif isinstance(ie_entries, PagedList):
if playlistitems:
entries = []
for item in playlistitems:
entries.extend(ie_entries.getslice(
item - 1, item
))
else:
entries = ie_entries.getslice(
playliststart, playlistend)
n_entries = len(entries)
self.to_screen(
'[%s] playlist %s: Downloading %d videos' %
(ie_result['extractor'], playlist, n_entries))
else: # iterable
if playlistitems:
entry_list = list(ie_entries)
entries = [entry_list[i - 1] for i in playlistitems]
else:
entries = list(itertools.islice(
ie_entries, playliststart, playlistend))
n_entries = len(entries)
self.to_screen(
'[%s] playlist %s: Downloading %d videos' %
(ie_result['extractor'], playlist, n_entries))
if self.params.get('playlistreverse', False):
entries = entries[::-1]
if self.params.get('playlistrandom', False):
random.shuffle(entries)
x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
for i, entry in enumerate(entries, 1):
self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
# This __x_forwarded_for_ip thing is a bit ugly but requires
# minimal changes
if x_forwarded_for:
entry['__x_forwarded_for_ip'] = x_forwarded_for
extra = {
'n_entries': n_entries,
'playlist': playlist,
'playlist_id': ie_result.get('id'),
'playlist_title': ie_result.get('title'),
'playlist_index': i + playliststart,
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'],
}
reason = self._match_entry(entry, incomplete=True)
if reason is not None:
self.to_screen('[download] ' + reason)
continue
entry_result = self.process_ie_result(entry,
download=download,
extra_info=extra)
playlist_results.append(entry_result)
ie_result['entries'] = playlist_results
self.to_screen('[download] Finished downloading playlist: %s' % playlist)
return ie_result
elif result_type == 'compat_list':
self.report_warning(
'Extractor %s returned a compat_list result. '
'It needs to be updated.' % ie_result.get('extractor'))
def _fixup(r):
self.add_extra_info(
r,
{
'extractor': ie_result['extractor'],
'webpage_url': ie_result['webpage_url'],
'webpage_url_basename': url_basename(ie_result['webpage_url']),
'extractor_key': ie_result['extractor_key'],
}
)
return r
ie_result['entries'] = [
self.process_ie_result(_fixup(r), download, extra_info)
for r in ie_result['entries']
]
return ie_result
else:
raise Exception('Invalid result type: %s' % result_type)
def _build_format_filter(self, filter_spec):
" Returns a function to filter the formats according to the filter_spec "
OPERATORS = {
'<': operator.lt,
'<=': operator.le,
'>': operator.gt,
'>=': operator.ge,
'=': operator.eq,
'!=': operator.ne,
}
operator_rex = re.compile(r'''(?x)\s*
(?P<key>width|height|tbr|abr|vbr|asr|filesize|fps)
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)
$
''' % '|'.join(map(re.escape, OPERATORS.keys())))
m = operator_rex.search(filter_spec)
if m:
try:
comparison_value = int(m.group('value'))
except ValueError:
comparison_value = parse_filesize(m.group('value'))
if comparison_value is None:
comparison_value = parse_filesize(m.group('value') + 'B')
if comparison_value is None:
raise ValueError(
'Invalid value %r in format specification %r' % (
m.group('value'), filter_spec))
op = OPERATORS[m.group('op')]
if not m:
STR_OPERATORS = {
'=': operator.eq,
'!=': operator.ne,
'^=': lambda attr, value: attr.startswith(value),
'$=': lambda attr, value: attr.endswith(value),
'*=': lambda attr, value: value in attr,
}
str_operator_rex = re.compile(r'''(?x)
\s*(?P<key>ext|acodec|vcodec|container|protocol|format_id)
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?
\s*(?P<value>[a-zA-Z0-9._-]+)
\s*$
''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
m = str_operator_rex.search(filter_spec)
if m:
comparison_value = m.group('value')
op = STR_OPERATORS[m.group('op')]
if not m:
raise ValueError('Invalid filter specification %r' % filter_spec)
def _filter(f):
actual_value = f.get(m.group('key'))
if actual_value is None:
return m.group('none_inclusive')
return op(actual_value, comparison_value)
return _filter
def build_format_selector(self, format_spec):
def syntax_error(note, start):
message = (
'Invalid format specification: '
'{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
return SyntaxError(message)
PICKFIRST = 'PICKFIRST'
MERGE = 'MERGE'
SINGLE = 'SINGLE'
GROUP = 'GROUP'
FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
def _parse_filter(tokens):
filter_parts = []
for type, string, start, _, _ in tokens:
if type == tokenize.OP and string == ']':
return ''.join(filter_parts)
else:
filter_parts.append(string)
def _remove_unused_ops(tokens):
# Remove operators that we don't use and join them with the surrounding strings
# for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
ALLOWED_OPS = ('/', '+', ',', '(', ')')
last_string, last_start, last_end, last_line = None, None, None, None
for type, string, start, end, line in tokens:
if type == tokenize.OP and string == '[':
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
last_string = None
yield type, string, start, end, line
# everything inside brackets will be handled by _parse_filter
for type, string, start, end, line in tokens:
yield type, string, start, end, line
if type == tokenize.OP and string == ']':
break
elif type == tokenize.OP and string in ALLOWED_OPS:
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
last_string = None
yield type, string, start, end, line
elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
if not last_string:
last_string = string
last_start = start
last_end = end
else:
last_string += string
if last_string:
yield tokenize.NAME, last_string, last_start, last_end, last_line
def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
selectors = []
current_selector = None
for type, string, start, _, _ in tokens:
# ENCODING is only defined in python 3.x
if type == getattr(tokenize, 'ENCODING', None):
continue
elif type in [tokenize.NAME, tokenize.NUMBER]:
current_selector = FormatSelector(SINGLE, string, [])
elif type == tokenize.OP:
if string == ')':
if not inside_group:
# ')' will be handled by the parentheses group
tokens.restore_last_token()
break
elif inside_merge and string in ['/', ',']:
tokens.restore_last_token()
break
elif inside_choice and string == ',':
tokens.restore_last_token()
break
elif string == ',':
if not current_selector:
raise syntax_error('"," must follow a format selector', start)
selectors.append(current_selector)
current_selector = None
elif string == '/':
if not current_selector:
raise syntax_error('"/" must follow a format selector', start)
first_choice = current_selector
second_choice = _parse_format_selection(tokens, inside_choice=True)
current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
elif string == '[':
if not current_selector:
current_selector = FormatSelector(SINGLE, 'best', [])
format_filter = _parse_filter(tokens)
current_selector.filters.append(format_filter)
elif string == '(':
if current_selector:
raise syntax_error('Unexpected "("', start)
group = _parse_format_selection(tokens, inside_group=True)
current_selector = FormatSelector(GROUP, group, [])
elif string == '+':
video_selector = current_selector
audio_selector = _parse_format_selection(tokens, inside_merge=True)
if not video_selector or not audio_selector:
raise syntax_error('"+" must be between two format selectors', start)
current_selector = FormatSelector(MERGE, (video_selector, audio_selector), [])
else:
raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
elif type == tokenize.ENDMARKER:
break
if current_selector:
selectors.append(current_selector)
return selectors
def _build_selector_function(selector):
if isinstance(selector, list):
fs = [_build_selector_function(s) for s in selector]
def selector_function(ctx):
for f in fs:
for format in f(ctx):
yield format
return selector_function
elif selector.type == GROUP:
selector_function = _build_selector_function(selector.selector)
elif selector.type == PICKFIRST:
fs = [_build_selector_function(s) for s in selector.selector]
def selector_function(ctx):
for f in fs:
picked_formats = list(f(ctx))
if picked_formats:
return picked_formats
return []
elif selector.type == SINGLE:
format_spec = selector.selector
def selector_function(ctx):
formats = list(ctx['formats'])
if not formats:
return
if format_spec == 'all':
for f in formats:
yield f
elif format_spec in ['best', 'worst', None]:
format_idx = 0 if format_spec == 'worst' else -1
audiovideo_formats = [
f for f in formats
if f.get('vcodec') != 'none' and f.get('acodec') != 'none']
if audiovideo_formats:
yield audiovideo_formats[format_idx]
# for extractors with incomplete formats (audio only (soundcloud)
# or video only (imgur)) we will fallback to best/worst
# {video,audio}-only format
elif ctx['incomplete_formats']:
yield formats[format_idx]
elif format_spec == 'bestaudio':
audio_formats = [
f for f in formats
if f.get('vcodec') == 'none']
if audio_formats:
yield audio_formats[-1]
elif format_spec == 'worstaudio':
audio_formats = [
f for f in formats
if f.get('vcodec') == 'none']
if audio_formats:
yield audio_formats[0]
elif format_spec == 'bestvideo':
video_formats = [
f for f in formats
if f.get('acodec') == 'none']
if video_formats:
yield video_formats[-1]
elif format_spec == 'worstvideo':
video_formats = [
f for f in formats
if f.get('acodec') == 'none']
if video_formats:
yield video_formats[0]
else:
extensions = ['mp4', 'flv', 'webm', '3gp', 'm4a', 'mp3', 'ogg', 'aac', 'wav']
if format_spec in extensions:
filter_f = lambda f: f['ext'] == format_spec
else:
filter_f = lambda f: f['format_id'] == format_spec
matches = list(filter(filter_f, formats))
if matches:
yield matches[-1]
elif selector.type == MERGE:
def _merge(formats_info):
format_1, format_2 = [f['format_id'] for f in formats_info]
# The first format must contain the video and the
# second the audio
if formats_info[0].get('vcodec') == 'none':
self.report_error('The first format must '
'contain the video, try using '
'"-f %s+%s"' % (format_2, format_1))
return
# Formats must be opposite (video+audio)
if formats_info[0].get('acodec') == 'none' and formats_info[1].get('acodec') == 'none':
self.report_error(
'Both formats %s and %s are video-only, you must specify "-f video+audio"'
% (format_1, format_2))
return
output_ext = (
formats_info[0]['ext']
if self.params.get('merge_output_format') is None
else self.params['merge_output_format'])
return {
'requested_formats': formats_info,
'format': '%s+%s' % (formats_info[0].get('format'),
formats_info[1].get('format')),
'format_id': '%s+%s' % (formats_info[0].get('format_id'),
formats_info[1].get('format_id')),
'width': formats_info[0].get('width'),
'height': formats_info[0].get('height'),
'resolution': formats_info[0].get('resolution'),
'fps': formats_info[0].get('fps'),
'vcodec': formats_info[0].get('vcodec'),
'vbr': formats_info[0].get('vbr'),
'stretched_ratio': formats_info[0].get('stretched_ratio'),
'acodec': formats_info[1].get('acodec'),
'abr': formats_info[1].get('abr'),
'ext': output_ext,
}
video_selector, audio_selector = map(_build_selector_function, selector.selector)
def selector_function(ctx):
for pair in itertools.product(
video_selector(copy.deepcopy(ctx)), audio_selector(copy.deepcopy(ctx))):
yield _merge(pair)
filters = [self._build_format_filter(f) for f in selector.filters]
def final_selector(ctx):
ctx_copy = copy.deepcopy(ctx)
for _filter in filters:
ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
return selector_function(ctx_copy)
return final_selector
stream = io.BytesIO(format_spec.encode('utf-8'))
try:
tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
except tokenize.TokenError:
raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
class TokenIterator(object):
def __init__(self, tokens):
self.tokens = tokens
self.counter = 0
def __iter__(self):
return self
def __next__(self):
if self.counter >= len(self.tokens):
raise StopIteration()
value = self.tokens[self.counter]
self.counter += 1
return value
next = __next__
def restore_last_token(self):
self.counter -= 1
parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
return _build_selector_function(parsed_selector)
def _calc_headers(self, info_dict):
res = std_headers.copy()
add_headers = info_dict.get('http_headers')
if add_headers:
res.update(add_headers)
cookies = self._calc_cookies(info_dict)
if cookies:
res['Cookie'] = cookies
if 'X-Forwarded-For' not in res:
x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
if x_forwarded_for_ip:
res['X-Forwarded-For'] = x_forwarded_for_ip
return res
def _calc_cookies(self, info_dict):
pr = sanitized_Request(info_dict['url'])
self.cookiejar.add_cookie_header(pr)
return pr.get_header('Cookie')
def process_video_result(self, info_dict, download=True):
assert info_dict.get('_type', 'video') == 'video'
if 'id' not in info_dict:
raise ExtractorError('Missing "id" field in extractor result')
if 'title' not in info_dict:
raise ExtractorError('Missing "title" field in extractor result')
if not isinstance(info_dict['id'], compat_str):
self.report_warning('"id" field is not a string - forcing string conversion')
info_dict['id'] = compat_str(info_dict['id'])
if 'playlist' not in info_dict:
# It isn't part of a playlist
info_dict['playlist'] = None
info_dict['playlist_index'] = None
thumbnails = info_dict.get('thumbnails')
if thumbnails is None:
thumbnail = info_dict.get('thumbnail')
if thumbnail:
info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
if thumbnails:
thumbnails.sort(key=lambda t: (
t.get('preference') if t.get('preference') is not None else -1,
t.get('width') if t.get('width') is not None else -1,
t.get('height') if t.get('height') is not None else -1,
t.get('id') if t.get('id') is not None else '', t.get('url')))
for i, t in enumerate(thumbnails):
t['url'] = sanitize_url(t['url'])
if t.get('width') and t.get('height'):
t['resolution'] = '%dx%d' % (t['width'], t['height'])
if t.get('id') is None:
t['id'] = '%d' % i
if self.params.get('list_thumbnails'):
self.list_thumbnails(info_dict)
return
thumbnail = info_dict.get('thumbnail')
if thumbnail:
info_dict['thumbnail'] = sanitize_url(thumbnail)
elif thumbnails:
info_dict['thumbnail'] = thumbnails[-1]['url']
if 'display_id' not in info_dict and 'id' in info_dict:
info_dict['display_id'] = info_dict['id']
if info_dict.get('upload_date') is None and info_dict.get('timestamp') is not None:
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
# see http://bugs.python.org/issue1646728)
try:
upload_date = datetime.datetime.utcfromtimestamp(info_dict['timestamp'])
info_dict['upload_date'] = upload_date.strftime('%Y%m%d')
except (ValueError, OverflowError, OSError):
pass
# Auto generate title fields corresponding to the *_number fields when missing
# in order to always have clean titles. This is very common for TV series.
for field in ('chapter', 'season', 'episode'):
if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
subtitles = info_dict.get('subtitles')
if subtitles:
for _, subtitle in subtitles.items():
for subtitle_format in subtitle:
if subtitle_format.get('url'):
subtitle_format['url'] = sanitize_url(subtitle_format['url'])
if subtitle_format.get('ext') is None:
subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
if self.params.get('listsubtitles', False):
if 'automatic_captions' in info_dict:
self.list_subtitles(info_dict['id'], info_dict.get('automatic_captions'), 'automatic captions')
self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
return
info_dict['requested_subtitles'] = self.process_subtitles(
info_dict['id'], subtitles,
info_dict.get('automatic_captions'))
# We now pick which formats have to be downloaded
if info_dict.get('formats') is None:
# There's only one format available
formats = [info_dict]
else:
formats = info_dict['formats']
if not formats:
raise ExtractorError('No video formats found!')
formats_dict = {}
# We check that all the formats have the format and format_id fields
for i, format in enumerate(formats):
if 'url' not in format:
raise ExtractorError('Missing "url" key in result (index %d)' % i)
format['url'] = sanitize_url(format['url'])
if format.get('format_id') is None:
format['format_id'] = compat_str(i)
else:
# Sanitize format_id from characters used in format selector expression
format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
format_id = format['format_id']
if format_id not in formats_dict:
formats_dict[format_id] = []
formats_dict[format_id].append(format)
# Make sure all formats have unique format_id
for format_id, ambiguous_formats in formats_dict.items():
if len(ambiguous_formats) > 1:
for i, format in enumerate(ambiguous_formats):
format['format_id'] = '%s-%d' % (format_id, i)
for i, format in enumerate(formats):
if format.get('format') is None:
format['format'] = '{id} - {res}{note}'.format(
id=format['format_id'],
res=self.format_resolution(format),
note=' ({0})'.format(format['format_note']) if format.get('format_note') is not None else '',
)
# Automatically determine file extension if missing
if format.get('ext') is None:
format['ext'] = determine_ext(format['url']).lower()
# Automatically determine protocol if missing (useful for format
# selection purposes)
if format.get('protocol') is None:
format['protocol'] = determine_protocol(format)
# Add HTTP headers, so that external programs can use them from the
# json output
full_format_info = info_dict.copy()
full_format_info.update(format)
format['http_headers'] = self._calc_headers(full_format_info)
# Remove private housekeeping stuff
if '__x_forwarded_for_ip' in info_dict:
del info_dict['__x_forwarded_for_ip']
# TODO Central sorting goes here
if formats[0] is not info_dict:
# only set the 'formats' fields if the original info_dict list them
# otherwise we end up with a circular reference, the first (and unique)
# element in the 'formats' field in info_dict is info_dict itself,
# which can't be exported to json
info_dict['formats'] = formats
if self.params.get('listformats'):
self.list_formats(info_dict)
return
req_format = self.params.get('format')
if req_format is None:
req_format_list = []
if (self.params.get('outtmpl', DEFAULT_OUTTMPL) != '-' and
not info_dict.get('is_live')):
merger = FFmpegMergerPP(self)
if merger.available and merger.can_merge():
req_format_list.append('bestvideo+bestaudio')
req_format_list.append('best')
req_format = '/'.join(req_format_list)
format_selector = self.build_format_selector(req_format)
# While in format selection we may need to have an access to the original
# format set in order to calculate some metrics or do some processing.
# For now we need to be able to guess whether original formats provided
# by extractor are incomplete or not (i.e. whether extractor provides only
# video-only or audio-only formats) for proper formats selection for
# extractors with such incomplete formats (see
# https://github.com/rg3/youtube-dl/pull/5556).
# Since formats may be filtered during format selection and may not match
# the original formats the results may be incorrect. Thus original formats
# or pre-calculated metrics should be passed to format selection routines
# as well.
# We will pass a context object containing all necessary additional data
# instead of just formats.
# This fixes incorrect format selection issue (see
# https://github.com/rg3/youtube-dl/issues/10083).
incomplete_formats = (
# All formats are video-only or
all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats) or
# all formats are audio-only
all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
ctx = {
'formats': formats,
'incomplete_formats': incomplete_formats,
}
formats_to_download = list(format_selector(ctx))
if not formats_to_download:
raise ExtractorError('requested format not available',
expected=True)
if download:
if len(formats_to_download) > 1:
self.to_screen('[info] %s: downloading video in %s formats' % (info_dict['id'], len(formats_to_download)))
for format in formats_to_download:
new_info = dict(info_dict)
new_info.update(format)
self.process_info(new_info)
# We update the info dict with the best quality format (backwards compatibility)
info_dict.update(formats_to_download[-1])
return info_dict
def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
"""Select the requested subtitles and their format"""
available_subs = {}
if normal_subtitles and self.params.get('writesubtitles'):
available_subs.update(normal_subtitles)
if automatic_captions and self.params.get('writeautomaticsub'):
for lang, cap_info in automatic_captions.items():
if lang not in available_subs:
available_subs[lang] = cap_info
if (not self.params.get('writesubtitles') and not
self.params.get('writeautomaticsub') or not
available_subs):
return None
if self.params.get('allsubtitles', False):
requested_langs = available_subs.keys()
else:
if self.params.get('subtitleslangs', False):
requested_langs = self.params.get('subtitleslangs')
elif 'en' in available_subs:
requested_langs = ['en']
else:
requested_langs = [list(available_subs.keys())[0]]
formats_query = self.params.get('subtitlesformat', 'best')
formats_preference = formats_query.split('/') if formats_query else []
subs = {}
for lang in requested_langs:
formats = available_subs.get(lang)
if formats is None:
self.report_warning('%s subtitles not available for %s' % (lang, video_id))
continue
for ext in formats_preference:
if ext == 'best':
f = formats[-1]
break
matches = list(filter(lambda f: f['ext'] == ext, formats))
if matches:
f = matches[-1]
break
else:
f = formats[-1]
self.report_warning(
'No subtitle format found matching "%s" for language %s, '
'using %s' % (formats_query, lang, f['ext']))
subs[lang] = f
return subs
def process_info(self, info_dict):
"""Process a single resolved IE result."""
assert info_dict.get('_type', 'video') == 'video'
max_downloads = self.params.get('max_downloads')
if max_downloads is not None:
if self._num_downloads >= int(max_downloads):
raise MaxDownloadsReached()
info_dict['fulltitle'] = info_dict['title']
if len(info_dict['title']) > 200:
info_dict['title'] = info_dict['title'][:197] + '...'
if 'format' not in info_dict:
info_dict['format'] = info_dict['ext']
reason = self._match_entry(info_dict, incomplete=False)
if reason is not None:
self.to_screen('[download] ' + reason)
return
self._num_downloads += 1
info_dict['_filename'] = filename = self.prepare_filename(info_dict)
# Forced printings
if self.params.get('forcetitle', False):
self.to_stdout(info_dict['fulltitle'])
if self.params.get('forceid', False):
self.to_stdout(info_dict['id'])
if self.params.get('forceurl', False):
if info_dict.get('requested_formats') is not None:
for f in info_dict['requested_formats']:
self.to_stdout(f['url'] + f.get('play_path', ''))
else:
# For RTMP URLs, also include the playpath
self.to_stdout(info_dict['url'] + info_dict.get('play_path', ''))
if self.params.get('forcethumbnail', False) and info_dict.get('thumbnail') is not None:
self.to_stdout(info_dict['thumbnail'])
if self.params.get('forcedescription', False) and info_dict.get('description') is not None:
self.to_stdout(info_dict['description'])
if self.params.get('forcefilename', False) and filename is not None:
self.to_stdout(filename)
if self.params.get('forceduration', False) and info_dict.get('duration') is not None:
self.to_stdout(formatSeconds(info_dict['duration']))
if self.params.get('forceformat', False):
self.to_stdout(info_dict['format'])
if self.params.get('forcejson', False):
self.to_stdout(json.dumps(info_dict))
# Do nothing else if in simulate mode
if self.params.get('simulate', False):
return
if filename is None:
return
try:
dn = os.path.dirname(sanitize_path(encodeFilename(filename)))
if dn and not os.path.exists(dn):
os.makedirs(dn)
except (OSError, IOError) as err:
self.report_error('unable to create directory ' + error_to_compat_str(err))
return
if self.params.get('writedescription', False):
descfn = replace_extension(filename, 'description', info_dict.get('ext'))
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(descfn)):
self.to_screen('[info] Video description is already present')
elif info_dict.get('description') is None:
self.report_warning('There\'s no description to write.')
else:
try:
self.to_screen('[info] Writing video description to: ' + descfn)
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
descfile.write(info_dict['description'])
except (OSError, IOError):
self.report_error('Cannot write description file ' + descfn)
return
if self.params.get('writeannotations', False):
annofn = replace_extension(filename, 'annotations.xml', info_dict.get('ext'))
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(annofn)):
self.to_screen('[info] Video annotations are already present')
else:
try:
self.to_screen('[info] Writing video annotations to: ' + annofn)
with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
annofile.write(info_dict['annotations'])
except (KeyError, TypeError):
self.report_warning('There are no annotations to write.')
except (OSError, IOError):
self.report_error('Cannot write annotations file: ' + annofn)
return
subtitles_are_requested = any([self.params.get('writesubtitles', False),
self.params.get('writeautomaticsub')])
if subtitles_are_requested and info_dict.get('requested_subtitles'):
# subtitles download errors are already managed as troubles in relevant IE
# that way it will silently go on when used with unsupporting IE
subtitles = info_dict['requested_subtitles']
ie = self.get_info_extractor(info_dict['extractor_key'])
for sub_lang, sub_info in subtitles.items():
sub_format = sub_info['ext']
if sub_info.get('data') is not None:
sub_data = sub_info['data']
else:
try:
sub_data = ie._download_webpage(
sub_info['url'], info_dict['id'], note=False)
except ExtractorError as err:
self.report_warning('Unable to download subtitle for "%s": %s' %
(sub_lang, error_to_compat_str(err.cause)))
continue
try:
sub_filename = subtitles_filename(filename, sub_lang, sub_format)
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(sub_filename)):
self.to_screen('[info] Video subtitle %s.%s is already_present' % (sub_lang, sub_format))
else:
self.to_screen('[info] Writing video subtitles to: ' + sub_filename)
# Use newline='' to prevent conversion of newline characters
# See https://github.com/rg3/youtube-dl/issues/10268
with io.open(encodeFilename(sub_filename), 'w', encoding='utf-8', newline='') as subfile:
subfile.write(sub_data)
except (OSError, IOError):
self.report_error('Cannot write subtitles file ' + sub_filename)
return
if self.params.get('writeinfojson', False):
infofn = replace_extension(filename, 'info.json', info_dict.get('ext'))
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(infofn)):
self.to_screen('[info] Video description metadata is already present')
else:
self.to_screen('[info] Writing video description metadata as JSON to: ' + infofn)
try:
write_json_file(self.filter_requested_info(info_dict), infofn)
except (OSError, IOError):
self.report_error('Cannot write metadata to JSON file ' + infofn)
return
self._write_thumbnails(info_dict, filename)
if not self.params.get('skip_download', False):
try:
def dl(name, info):
fd = get_suitable_downloader(info, self.params)(self, self.params)
for ph in self._progress_hooks:
fd.add_progress_hook(ph)
if self.params.get('verbose'):
self.to_stdout('[debug] Invoking downloader on %r' % info.get('url'))
return fd.download(name, info)
if info_dict.get('requested_formats') is not None:
downloaded = []
success = True
merger = FFmpegMergerPP(self)
if not merger.available:
postprocessors = []
self.report_warning('You have requested multiple '
'formats but ffmpeg or avconv are not installed.'
' The formats won\'t be merged.')
else:
postprocessors = [merger]
def compatible_formats(formats):
video, audio = formats
# Check extension
video_ext, audio_ext = audio.get('ext'), video.get('ext')
if video_ext and audio_ext:
COMPATIBLE_EXTS = (
('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma'),
('webm')
)
for exts in COMPATIBLE_EXTS:
if video_ext in exts and audio_ext in exts:
return True
# TODO: Check acodec/vcodec
return False
filename_real_ext = os.path.splitext(filename)[1][1:]
filename_wo_ext = (
os.path.splitext(filename)[0]
if filename_real_ext == info_dict['ext']
else filename)
requested_formats = info_dict['requested_formats']
if self.params.get('merge_output_format') is None and not compatible_formats(requested_formats):
info_dict['ext'] = 'mkv'
self.report_warning(
'Requested formats are incompatible for merge and will be merged into mkv.')
# Ensure filename always has a correct extension for successful merge
filename = '%s.%s' % (filename_wo_ext, info_dict['ext'])
if os.path.exists(encodeFilename(filename)):
self.to_screen(
'[download] %s has already been downloaded and '
'merged' % filename)
else:
for f in requested_formats:
new_info = dict(info_dict)
new_info.update(f)
fname = self.prepare_filename(new_info)
fname = prepend_extension(fname, 'f%s' % f['format_id'], new_info['ext'])
downloaded.append(fname)
partial_success = dl(fname, new_info)
success = success and partial_success
info_dict['__postprocessors'] = postprocessors
info_dict['__files_to_merge'] = downloaded
else:
# Just a single file
success = dl(filename, info_dict)
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self.report_error('unable to download video data: %s' % error_to_compat_str(err))
return
except (OSError, IOError) as err:
raise UnavailableVideoError(err)
except (ContentTooShortError, ) as err:
self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
return
if success and filename != '-':
# Fixup content
fixup_policy = self.params.get('fixup')
if fixup_policy is None:
fixup_policy = 'detect_or_warn'
INSTALL_FFMPEG_MESSAGE = 'Install ffmpeg or avconv to fix this automatically.'
stretched_ratio = info_dict.get('stretched_ratio')
if stretched_ratio is not None and stretched_ratio != 1:
if fixup_policy == 'warn':
self.report_warning('%s: Non-uniform pixel ratio (%s)' % (
info_dict['id'], stretched_ratio))
elif fixup_policy == 'detect_or_warn':
stretched_pp = FFmpegFixupStretchedPP(self)
if stretched_pp.available:
info_dict.setdefault('__postprocessors', [])
info_dict['__postprocessors'].append(stretched_pp)
else:
self.report_warning(
'%s: Non-uniform pixel ratio (%s). %s'
% (info_dict['id'], stretched_ratio, INSTALL_FFMPEG_MESSAGE))
else:
assert fixup_policy in ('ignore', 'never')
if (info_dict.get('requested_formats') is None and
info_dict.get('container') == 'm4a_dash'):
if fixup_policy == 'warn':
self.report_warning(
'%s: writing DASH m4a. '
'Only some players support this container.'
% info_dict['id'])
elif fixup_policy == 'detect_or_warn':
fixup_pp = FFmpegFixupM4aPP(self)
if fixup_pp.available:
info_dict.setdefault('__postprocessors', [])
info_dict['__postprocessors'].append(fixup_pp)
else:
self.report_warning(
'%s: writing DASH m4a. '
'Only some players support this container. %s'
% (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
else:
assert fixup_policy in ('ignore', 'never')
if (info_dict.get('protocol') == 'm3u8_native' or
info_dict.get('protocol') == 'm3u8' and
self.params.get('hls_prefer_native')):
if fixup_policy == 'warn':
self.report_warning('%s: malformated aac bitstream.' % (
info_dict['id']))
elif fixup_policy == 'detect_or_warn':
fixup_pp = FFmpegFixupM3u8PP(self)
if fixup_pp.available:
info_dict.setdefault('__postprocessors', [])
info_dict['__postprocessors'].append(fixup_pp)
else:
self.report_warning(
'%s: malformated aac bitstream. %s'
% (info_dict['id'], INSTALL_FFMPEG_MESSAGE))
else:
assert fixup_policy in ('ignore', 'never')
try:
self.post_process(filename, info_dict)
except (PostProcessingError) as err:
self.report_error('postprocessing: %s' % str(err))
return
self.record_download_archive(info_dict)
def download(self, url_list):
"""Download a given list of URLs."""
outtmpl = self.params.get('outtmpl', DEFAULT_OUTTMPL)
if (len(url_list) > 1 and
outtmpl != '-' and
'%' not in outtmpl and
self.params.get('max_downloads') != 1):
raise SameFileError(outtmpl)
for url in url_list:
try:
# It also downloads the videos
res = self.extract_info(
url, force_generic_extractor=self.params.get('force_generic_extractor', False))
except UnavailableVideoError:
self.report_error('unable to download video')
except MaxDownloadsReached:
self.to_screen('[info] Maximum number of downloaded files reached.')
raise
else:
if self.params.get('dump_single_json', False):
self.to_stdout(json.dumps(res))
return self._download_retcode
def download_with_info_file(self, info_filename):
with contextlib.closing(fileinput.FileInput(
[info_filename], mode='r',
openhook=fileinput.hook_encoded('utf-8'))) as f:
# FileInput doesn't have a read method, we can't call json.load
info = self.filter_requested_info(json.loads('\n'.join(f)))
try:
self.process_ie_result(info, download=True)
except DownloadError:
webpage_url = info.get('webpage_url')
if webpage_url is not None:
self.report_warning('The info failed to download, trying with "%s"' % webpage_url)
return self.download([webpage_url])
else:
raise
return self._download_retcode
@staticmethod
def filter_requested_info(info_dict):
return dict(
(k, v) for k, v in info_dict.items()
if k not in ['requested_formats', 'requested_subtitles'])
def post_process(self, filename, ie_info):
"""Run all the postprocessors on the given file."""
info = dict(ie_info)
info['filepath'] = filename
pps_chain = []
if ie_info.get('__postprocessors') is not None:
pps_chain.extend(ie_info['__postprocessors'])
pps_chain.extend(self._pps)
for pp in pps_chain:
files_to_delete = []
try:
files_to_delete, info = pp.run(info)
except PostProcessingError as e:
self.report_error(e.msg)
if files_to_delete and not self.params.get('keepvideo', False):
for old_filename in files_to_delete:
self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
try:
os.remove(encodeFilename(old_filename))
except (IOError, OSError):
self.report_warning('Unable to remove downloaded original file')
def _make_archive_id(self, info_dict):
# Future-proof against any change in case
# and backwards compatibility with prior versions
extractor = info_dict.get('extractor_key')
if extractor is None:
if 'id' in info_dict:
extractor = info_dict.get('ie_key') # key in a playlist
if extractor is None:
return None # Incomplete video information
return extractor.lower() + ' ' + info_dict['id']
def in_download_archive(self, info_dict):
fn = self.params.get('download_archive')
if fn is None:
return False
vid_id = self._make_archive_id(info_dict)
if vid_id is None:
return False # Incomplete video information
try:
with locked_file(fn, 'r', encoding='utf-8') as archive_file:
for line in archive_file:
if line.strip() == vid_id:
return True
except IOError as ioe:
if ioe.errno != errno.ENOENT:
raise
return False
def record_download_archive(self, info_dict):
fn = self.params.get('download_archive')
if fn is None:
return
vid_id = self._make_archive_id(info_dict)
assert vid_id
with locked_file(fn, 'a', encoding='utf-8') as archive_file:
archive_file.write(vid_id + '\n')
@staticmethod
def format_resolution(format, default='unknown'):
if format.get('vcodec') == 'none':
return 'audio only'
if format.get('resolution') is not None:
return format['resolution']
if format.get('height') is not None:
if format.get('width') is not None:
res = '%sx%s' % (format['width'], format['height'])
else:
res = '%sp' % format['height']
elif format.get('width') is not None:
res = '%dx?' % format['width']
else:
res = default
return res
def _format_note(self, fdict):
res = ''
if fdict.get('ext') in ['f4f', 'f4m']:
res += '(unsupported) '
if fdict.get('language'):
if res:
res += ' '
res += '[%s] ' % fdict['language']
if fdict.get('format_note') is not None:
res += fdict['format_note'] + ' '
if fdict.get('tbr') is not None:
res += '%4dk ' % fdict['tbr']
if fdict.get('container') is not None:
if res:
res += ', '
res += '%s container' % fdict['container']
if (fdict.get('vcodec') is not None and
fdict.get('vcodec') != 'none'):
if res:
res += ', '
res += fdict['vcodec']
if fdict.get('vbr') is not None:
res += '@'
elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
res += 'video@'
if fdict.get('vbr') is not None:
res += '%4dk' % fdict['vbr']
if fdict.get('fps') is not None:
if res:
res += ', '
res += '%sfps' % fdict['fps']
if fdict.get('acodec') is not None:
if res:
res += ', '
if fdict['acodec'] == 'none':
res += 'video only'
else:
res += '%-5s' % fdict['acodec']
elif fdict.get('abr') is not None:
if res:
res += ', '
res += 'audio'
if fdict.get('abr') is not None:
res += '@%3dk' % fdict['abr']
if fdict.get('asr') is not None:
res += ' (%5dHz)' % fdict['asr']
if fdict.get('filesize') is not None:
if res:
res += ', '
res += format_bytes(fdict['filesize'])
elif fdict.get('filesize_approx') is not None:
if res:
res += ', '
res += '~' + format_bytes(fdict['filesize_approx'])
return res
def list_formats(self, info_dict):
formats = info_dict.get('formats', [info_dict])
table = [
[f['format_id'], f['ext'], self.format_resolution(f), self._format_note(f)]
for f in formats
if f.get('preference') is None or f['preference'] >= -1000]
if len(formats) > 1:
table[-1][-1] += (' ' if table[-1][-1] else '') + '(best)'
header_line = ['format code', 'extension', 'resolution', 'note']
self.to_screen(
'[info] Available formats for %s:\n%s' %
(info_dict['id'], render_table(header_line, table)))
def list_thumbnails(self, info_dict):
thumbnails = info_dict.get('thumbnails')
if not thumbnails:
self.to_screen('[info] No thumbnails present for %s' % info_dict['id'])
return
self.to_screen(
'[info] Thumbnails for %s:' % info_dict['id'])
self.to_screen(render_table(
['ID', 'width', 'height', 'URL'],
[[t['id'], t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails]))
def list_subtitles(self, video_id, subtitles, name='subtitles'):
if not subtitles:
self.to_screen('%s has no %s' % (video_id, name))
return
self.to_screen(
'Available %s for %s:' % (name, video_id))
self.to_screen(render_table(
['Language', 'formats'],
[[lang, ', '.join(f['ext'] for f in reversed(formats))]
for lang, formats in subtitles.items()]))
def urlopen(self, req):
""" Start an HTTP download """
if isinstance(req, compat_basestring):
req = sanitized_Request(req)
return self._opener.open(req, timeout=self._socket_timeout)
def print_debug_header(self):
if not self.params.get('verbose'):
return
if type('') is not compat_str:
# Python 2.6 on SLES11 SP1 (https://github.com/rg3/youtube-dl/issues/3326)
self.report_warning(
'Your Python is broken! Update to a newer and supported version')
stdout_encoding = getattr(
sys.stdout, 'encoding', 'missing (%s)' % type(sys.stdout).__name__)
encoding_str = (
'[debug] Encodings: locale %s, fs %s, out %s, pref %s\n' % (
locale.getpreferredencoding(),
sys.getfilesystemencoding(),
stdout_encoding,
self.get_encoding()))
write_string(encoding_str, encoding=None)
self._write_string('[debug] youtube-dl version ' + __version__ + '\n')
if _LAZY_LOADER:
self._write_string('[debug] Lazy loading extractors enabled' + '\n')
try:
sp = subprocess.Popen(
['git', 'rev-parse', '--short', 'HEAD'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
cwd=os.path.dirname(os.path.abspath(__file__)))
out, err = sp.communicate()
out = out.decode().strip()
if re.match('[0-9a-f]+', out):
self._write_string('[debug] Git HEAD: ' + out + '\n')
except Exception:
try:
sys.exc_clear()
except Exception:
pass
self._write_string('[debug] Python version %s - %s\n' % (
platform.python_version(), platform_name()))
exe_versions = FFmpegPostProcessor.get_versions(self)
exe_versions['rtmpdump'] = rtmpdump_version()
exe_str = ', '.join(
'%s %s' % (exe, v)
for exe, v in sorted(exe_versions.items())
if v
)
if not exe_str:
exe_str = 'none'
self._write_string('[debug] exe versions: %s\n' % exe_str)
proxy_map = {}
for handler in self._opener.handlers:
if hasattr(handler, 'proxies'):
proxy_map.update(handler.proxies)
self._write_string('[debug] Proxy map: ' + compat_str(proxy_map) + '\n')
if self.params.get('call_home', False):
ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
self._write_string('[debug] Public IP address: %s\n' % ipaddr)
latest_version = self.urlopen(
'https://yt-dl.org/latest/version').read().decode('utf-8')
if version_tuple(latest_version) > version_tuple(__version__):
self.report_warning(
'You are using an outdated version (newest version: %s)! '
'See https://yt-dl.org/update if you need help updating.' %
latest_version)
def _setup_opener(self):
timeout_val = self.params.get('socket_timeout')
self._socket_timeout = 600 if timeout_val is None else float(timeout_val)
opts_cookiefile = self.params.get('cookiefile')
opts_proxy = self.params.get('proxy')
if opts_cookiefile is None:
self.cookiejar = compat_cookiejar.CookieJar()
else:
opts_cookiefile = expand_path(opts_cookiefile)
self.cookiejar = compat_cookiejar.MozillaCookieJar(
opts_cookiefile)
if os.access(opts_cookiefile, os.R_OK):
self.cookiejar.load()
cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
if opts_proxy is not None:
if opts_proxy == '':
proxies = {}
else:
proxies = {'http': opts_proxy, 'https': opts_proxy}
else:
proxies = compat_urllib_request.getproxies()
# Set HTTPS proxy to HTTP one if given (https://github.com/rg3/youtube-dl/issues/805)
if 'http' in proxies and 'https' not in proxies:
proxies['https'] = proxies['http']
proxy_handler = PerRequestProxyHandler(proxies)
debuglevel = 1 if self.params.get('debug_printtraffic') else 0
https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
data_handler = compat_urllib_request_DataHandler()
# When passing our own FileHandler instance, build_opener won't add the
# default FileHandler and allows us to disable the file protocol, which
# can be used for malicious purposes (see
# https://github.com/rg3/youtube-dl/issues/8227)
file_handler = compat_urllib_request.FileHandler()
def file_open(*args, **kwargs):
raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in youtube-dl for security reasons')
file_handler.file_open = file_open
opener = compat_urllib_request.build_opener(
proxy_handler, https_handler, cookie_processor, ydlh, data_handler, file_handler)
# Delete the default user-agent header, which would otherwise apply in
# cases where our custom HTTP handler doesn't come into play
# (See https://github.com/rg3/youtube-dl/issues/1309 for details)
opener.addheaders = []
self._opener = opener
def encode(self, s):
if isinstance(s, bytes):
return s # Already encoded
try:
return s.encode(self.get_encoding())
except UnicodeEncodeError as err:
err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
raise
def get_encoding(self):
encoding = self.params.get('encoding')
if encoding is None:
encoding = preferredencoding()
return encoding
def _write_thumbnails(self, info_dict, filename):
if self.params.get('writethumbnail', False):
thumbnails = info_dict.get('thumbnails')
if thumbnails:
thumbnails = [thumbnails[-1]]
elif self.params.get('write_all_thumbnails', False):
thumbnails = info_dict.get('thumbnails')
else:
return
if not thumbnails:
# No thumbnails present, so return immediately
return
for t in thumbnails:
thumb_ext = determine_ext(t['url'], 'jpg')
suffix = '_%s' % t['id'] if len(thumbnails) > 1 else ''
thumb_display_id = '%s ' % t['id'] if len(thumbnails) > 1 else ''
t['filename'] = thumb_filename = os.path.splitext(filename)[0] + suffix + '.' + thumb_ext
if self.params.get('nooverwrites', False) and os.path.exists(encodeFilename(thumb_filename)):
self.to_screen('[%s] %s: Thumbnail %sis already present' %
(info_dict['extractor'], info_dict['id'], thumb_display_id))
else:
self.to_screen('[%s] %s: Downloading thumbnail %s...' %
(info_dict['extractor'], info_dict['id'], thumb_display_id))
try:
uf = self.urlopen(t['url'])
with open(encodeFilename(thumb_filename), 'wb') as thumbf:
shutil.copyfileobj(uf, thumbf)
self.to_screen('[%s] %s: Writing thumbnail %sto: %s' %
(info_dict['extractor'], info_dict['id'], thumb_display_id, thumb_filename))
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self.report_warning('Unable to download thumbnail "%s": %s' %
(t['url'], error_to_compat_str(err)))
| unlicense |
kalxas/geonode | geonode/maps/management/commands/fix_baselayers.py | 24 | 1412 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.core.management.base import BaseCommand
from geonode.maps.models import Map
from geonode.maps.utils import fix_baselayers
class Command(BaseCommand):
help = ('Fix base layers for all of the GeoNode maps or for a given map.\n\n'
'Arguments:\n'
'map_id - numeric map ID (optional)\n')
args = 'map_id'
def handle(self, *args, **options):
if len(args) == 1:
map_id = args[0]
fix_baselayers(map_id)
else:
for map in Map.objects.all():
fix_baselayers(map.id)
| gpl-3.0 |
TangHao1987/intellij-community | python/helpers/pydev/third_party/pep8/lib2to3/lib2to3/btm_matcher.py | 326 | 6834 | """A bottom-up tree matching algorithm implementation meant to speed
up 2to3's matching process. After the tree patterns are reduced to
their rarest linear path, a linear Aho-Corasick automaton is
created. The linear automaton traverses the linear paths from the
leaves to the root of the AST and returns a set of nodes for further
matching. This reduces significantly the number of candidate nodes."""
__author__ = "George Boutsioukis <[email protected]>"
import logging
import itertools
from collections import defaultdict
from . import pytree
from .btm_utils import reduce_tree
class BMNode(object):
"""Class for a node of the Aho-Corasick automaton used in matching"""
count = itertools.count()
def __init__(self):
self.transition_table = {}
self.fixers = []
self.id = next(BMNode.count)
self.content = ''
class BottomMatcher(object):
"""The main matcher class. After instantiating the patterns should
be added using the add_fixer method"""
def __init__(self):
self.match = set()
self.root = BMNode()
self.nodes = [self.root]
self.fixers = []
self.logger = logging.getLogger("RefactoringTool")
def add_fixer(self, fixer):
"""Reduces a fixer's pattern tree to a linear path and adds it
to the matcher(a common Aho-Corasick automaton). The fixer is
appended on the matching states and called when they are
reached"""
self.fixers.append(fixer)
tree = reduce_tree(fixer.pattern_tree)
linear = tree.get_linear_subpattern()
match_nodes = self.add(linear, start=self.root)
for match_node in match_nodes:
match_node.fixers.append(fixer)
def add(self, pattern, start):
"Recursively adds a linear pattern to the AC automaton"
#print("adding pattern", pattern, "to", start)
if not pattern:
#print("empty pattern")
return [start]
if isinstance(pattern[0], tuple):
#alternatives
#print("alternatives")
match_nodes = []
for alternative in pattern[0]:
#add all alternatives, and add the rest of the pattern
#to each end node
end_nodes = self.add(alternative, start=start)
for end in end_nodes:
match_nodes.extend(self.add(pattern[1:], end))
return match_nodes
else:
#single token
#not last
if pattern[0] not in start.transition_table:
#transition did not exist, create new
next_node = BMNode()
start.transition_table[pattern[0]] = next_node
else:
#transition exists already, follow
next_node = start.transition_table[pattern[0]]
if pattern[1:]:
end_nodes = self.add(pattern[1:], start=next_node)
else:
end_nodes = [next_node]
return end_nodes
def run(self, leaves):
"""The main interface with the bottom matcher. The tree is
traversed from the bottom using the constructed
automaton. Nodes are only checked once as the tree is
retraversed. When the automaton fails, we give it one more
shot(in case the above tree matches as a whole with the
rejected leaf), then we break for the next leaf. There is the
special case of multiple arguments(see code comments) where we
recheck the nodes
Args:
The leaves of the AST tree to be matched
Returns:
A dictionary of node matches with fixers as the keys
"""
current_ac_node = self.root
results = defaultdict(list)
for leaf in leaves:
current_ast_node = leaf
while current_ast_node:
current_ast_node.was_checked = True
for child in current_ast_node.children:
# multiple statements, recheck
if isinstance(child, pytree.Leaf) and child.value == u";":
current_ast_node.was_checked = False
break
if current_ast_node.type == 1:
#name
node_token = current_ast_node.value
else:
node_token = current_ast_node.type
if node_token in current_ac_node.transition_table:
#token matches
current_ac_node = current_ac_node.transition_table[node_token]
for fixer in current_ac_node.fixers:
if not fixer in results:
results[fixer] = []
results[fixer].append(current_ast_node)
else:
#matching failed, reset automaton
current_ac_node = self.root
if (current_ast_node.parent is not None
and current_ast_node.parent.was_checked):
#the rest of the tree upwards has been checked, next leaf
break
#recheck the rejected node once from the root
if node_token in current_ac_node.transition_table:
#token matches
current_ac_node = current_ac_node.transition_table[node_token]
for fixer in current_ac_node.fixers:
if not fixer in results.keys():
results[fixer] = []
results[fixer].append(current_ast_node)
current_ast_node = current_ast_node.parent
return results
def print_ac(self):
"Prints a graphviz diagram of the BM automaton(for debugging)"
print("digraph g{")
def print_node(node):
for subnode_key in node.transition_table.keys():
subnode = node.transition_table[subnode_key]
print("%d -> %d [label=%s] //%s" %
(node.id, subnode.id, type_repr(subnode_key), str(subnode.fixers)))
if subnode_key == 1:
print(subnode.content)
print_node(subnode)
print_node(self.root)
print("}")
# taken from pytree.py for debugging; only used by print_ac
_type_reprs = {}
def type_repr(type_num):
global _type_reprs
if not _type_reprs:
from .pygram import python_symbols
# printing tokens is possible but not as useful
# from .pgen2 import token // token.__dict__.items():
for name, val in python_symbols.__dict__.items():
if type(val) == int: _type_reprs[val] = name
return _type_reprs.setdefault(type_num, type_num)
| apache-2.0 |
BobCromwell/gyp | test/win/gyptest-link-opt-ref.py | 344 | 1145 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure reference optimization setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'linker-flags'
test.run_gyp('opt-ref.gyp', chdir=CHDIR)
test.build('opt-ref.gyp', chdir=CHDIR)
# We're specifying /DEBUG so the default is to not remove unused functions.
output = test.run_dumpbin(
'/disasm', test.built_file_path('test_optref_default.exe', chdir=CHDIR))
if 'unused_function' not in output:
test.fail_test()
# Explicitly off, unused_function preserved.
output = test.run_dumpbin(
'/disasm', test.built_file_path('test_optref_no.exe', chdir=CHDIR))
if 'unused_function' not in output:
test.fail_test()
# Explicitly on, should be removed.
output = test.run_dumpbin(
'/disasm', test.built_file_path('test_optref_yes.exe', chdir=CHDIR))
if 'unused_function' in output:
test.fail_test()
test.pass_test()
| bsd-3-clause |
tlatzko/spmcluster | .tox/docs/lib/python2.7/site-packages/pygments/lexers/ambient.py | 72 | 2557 | # -*- coding: utf-8 -*-
"""
pygments.lexers.ambient
~~~~~~~~~~~~~~~~~~~~~~~
Lexers for AmbientTalk language.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, include, words
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['AmbientTalkLexer']
class AmbientTalkLexer(RegexLexer):
"""
Lexer for `AmbientTalk <https://code.google.com/p/ambienttalk>`_ source code.
.. versionadded:: 2.0
"""
name = 'AmbientTalk'
filenames = ['*.at']
aliases = ['at', 'ambienttalk', 'ambienttalk/2']
mimetypes = ['text/x-ambienttalk']
flags = re.MULTILINE | re.DOTALL
builtin = words(('if:', 'then:', 'else:', 'when:', 'whenever:', 'discovered:',
'disconnected:', 'reconnected:', 'takenOffline:', 'becomes:',
'export:', 'as:', 'object:', 'actor:', 'mirror:', 'taggedAs:',
'mirroredBy:', 'is:'))
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'(def|deftype|import|alias|exclude)\b', Keyword),
(builtin, Name.Builtin),
(r'(true|false|nil)\b', Keyword.Constant),
(r'(~|lobby|jlobby|/)\.', Keyword.Constant, 'namespace'),
(r'"(\\\\|\\"|[^"])*"', String),
(r'\|', Punctuation, 'arglist'),
(r'<:|[*^!%&<>+=,./?-]|:=', Operator),
(r"`[a-zA-Z_]\w*", String.Symbol),
(r"[a-zA-Z_]\w*:", Name.Function),
(r"[{}()\[\];`]", Punctuation),
(r'(self|super)\b', Name.Variable.Instance),
(r"[a-zA-Z_]\w*", Name.Variable),
(r"@[a-zA-Z_]\w*", Name.Class),
(r"@\[", Name.Class, 'annotations'),
include('numbers'),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer)
],
'namespace': [
(r'[a-zA-Z_]\w*\.', Name.Namespace),
(r'[a-zA-Z_]\w*:', Name.Function, '#pop'),
(r'[a-zA-Z_]\w*(?!\.)', Name.Function, '#pop')
],
'annotations': [
(r"(.*?)\]", Name.Class, '#pop')
],
'arglist': [
(r'\|', Punctuation, '#pop'),
(r'\s*(,)\s*', Punctuation),
(r'[a-zA-Z_]\w*', Name.Variable),
],
}
| bsd-2-clause |
marma/rdflib | setup.py | 5 | 5640 | #!/usr/bin/env python
import sys
import os
import re
def setup_python3():
# Taken from "distribute" setup.py
from distutils.filelist import FileList
from distutils import dir_util, file_util, util, log
from os.path import join, exists
tmp_src = join("build", "src")
# Not covered by "setup.py clean --all", so explicit deletion required.
if exists(tmp_src):
dir_util.remove_tree(tmp_src)
log.set_verbosity(1)
fl = FileList()
for line in open("MANIFEST.in"):
if not line.strip():
continue
fl.process_template_line(line)
dir_util.create_tree(tmp_src, fl.files)
outfiles_2to3 = []
for f in fl.files:
outf, copied = file_util.copy_file(f, join(tmp_src, f), update=1)
if copied and outf.endswith(".py"):
outfiles_2to3.append(outf)
util.run_2to3(outfiles_2to3)
# arrange setup to use the copy
sys.path.insert(0, tmp_src)
return tmp_src
kwargs = {}
if sys.version_info[0] >= 3:
from setuptools import setup
# kwargs['use_2to3'] = True # is done in setup_python3 above already
kwargs['install_requires'] = ['isodate', 'pyparsing']
kwargs['tests_require'] = ['html5lib', 'networkx']
kwargs['requires'] = [
'isodate', 'pyparsing',
'SPARQLWrapper']
kwargs['src_root'] = setup_python3()
assert setup
else:
try:
from setuptools import setup
assert setup
kwargs['test_suite'] = "nose.collector"
kwargs['install_requires'] = [
'isodate',
'pyparsing', 'SPARQLWrapper']
kwargs['tests_require'] = ['networkx']
if sys.version_info[1]<7: # Python 2.6
kwargs['install_requires'].append('ordereddict')
if sys.version_info[1]<6: # Python 2.5
kwargs['install_requires'].append('pyparsing<=1.5.7')
kwargs['install_requires'].append('simplejson')
kwargs['install_requires'].append('html5lib==0.95')
else:
kwargs['install_requires'].append('html5lib')
except ImportError:
from distutils.core import setup
# Find version. We have to do this because we can't import it in Python 3 until
# its been automatically converted in the setup process.
def find_version(filename):
_version_re = re.compile(r'__version__ = "(.*)"')
for line in open(filename):
version_match = _version_re.match(line)
if version_match:
return version_match.group(1)
version = find_version('rdflib/__init__.py')
packages = ['rdflib',
'rdflib/extras',
'rdflib/plugins',
'rdflib/plugins/parsers',
'rdflib/plugins/parsers/pyRdfa',
'rdflib/plugins/parsers/pyRdfa/transform',
'rdflib/plugins/parsers/pyRdfa/extras',
'rdflib/plugins/parsers/pyRdfa/host',
'rdflib/plugins/parsers/pyRdfa/rdfs',
'rdflib/plugins/parsers/pyMicrodata',
'rdflib/plugins/serializers',
'rdflib/plugins/sparql',
'rdflib/plugins/sparql/results',
'rdflib/plugins/stores',
'rdflib/tools'
]
if os.environ.get('READTHEDOCS', None):
# if building docs for RTD
# install examples, to get docstrings
packages.append("examples")
setup(
name='rdflib',
version=version,
description="RDFLib is a Python library for working with RDF, a " + \
"simple yet powerful language for representing information.",
author="Daniel 'eikeon' Krech",
author_email="[email protected]",
maintainer="RDFLib Team",
maintainer_email="[email protected]",
url="https://github.com/RDFLib/rdflib",
license="https://raw.github.com/RDFLib/rdflib/master/LICENSE",
platforms=["any"],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"License :: OSI Approved :: BSD License",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: OS Independent",
"Natural Language :: English",
],
long_description="""\
RDFLib is a Python library for working with
RDF, a simple yet powerful language for representing information.
The library contains parsers and serializers for RDF/XML, N3,
NTriples, Turtle, TriX, RDFa and Microdata . The library presents
a Graph interface which can be backed by any one of a number of
Store implementations. The core rdflib includes store
implementations for in memory storage, persistent storage on top
of the Berkeley DB, and a wrapper for remote SPARQL endpoints.
A SPARQL 1.1 engine is also included.
If you have recently reported a bug marked as fixed, or have a craving for
the very latest, you may want the development version instead:
easy_install https://github.com/RDFLib/rdflib/tarball/master
Read the docs at:
http://rdflib.readthedocs.org
""",
packages = packages,
entry_points = {
'console_scripts': [
'rdfpipe = rdflib.tools.rdfpipe:main',
'csv2rdf = rdflib.tools.csv2rdf:main',
'rdf2dot = rdflib.tools.rdf2dot:main',
'rdfs2dot = rdflib.tools.rdfs2dot:main',
'rdfgraphisomorphism = rdflib.tools.graphisomorphism:main',
],
},
**kwargs
)
| bsd-3-clause |
vrsource/mapproxy | mapproxy/util/yaml.py | 2 | 1593 | # This file is part of the MapProxy project.
# Copyright (C) 2011 Omniscale <http://omniscale.de>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from mapproxy.compat import string_type
import yaml
class YAMLError(Exception):
pass
def load_yaml_file(file_or_filename):
"""
Load yaml from file object or filename.
"""
if isinstance(file_or_filename, string_type):
with open(file_or_filename, 'rb') as f:
return load_yaml(f)
return load_yaml(file_or_filename)
def load_yaml(doc):
"""
Load yaml from file object or string.
"""
try:
if getattr(yaml, '__with_libyaml__', False):
try:
return yaml.load(doc, Loader=yaml.CLoader)
except AttributeError:
# handle cases where __with_libyaml__ is True but
# CLoader doesn't work (missing .dispose())
return yaml.load(doc)
return yaml.load(doc)
except (yaml.scanner.ScannerError, yaml.parser.ParserError) as ex:
raise YAMLError(str(ex))
| apache-2.0 |
grevutiu-gabriel/phantomjs | src/qt/qtwebkit/Source/ThirdParty/gtest/scripts/upload_gtest.py | 1963 | 2851 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review.
This simple wrapper passes all command line flags and
[email protected] to upload.py.
USAGE: upload_gtest.py [options for upload.py]
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GTEST_GROUP = '[email protected]'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Test discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GTEST_GROUP not in cc_list:
cc_list.append(GTEST_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GTEST_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
| bsd-3-clause |
coordcn/LuaIO | tools/gyp/pylib/gyp/input_test.py | 1841 | 3207 | #!/usr/bin/env python
# Copyright 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the input.py file."""
import gyp.input
import unittest
import sys
class TestFindCycles(unittest.TestCase):
def setUp(self):
self.nodes = {}
for x in ('a', 'b', 'c', 'd', 'e'):
self.nodes[x] = gyp.input.DependencyGraphNode(x)
def _create_dependency(self, dependent, dependency):
dependent.dependencies.append(dependency)
dependency.dependents.append(dependent)
def test_no_cycle_empty_graph(self):
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_line(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_dag(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['a'], self.nodes['c'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_cycle_self_reference(self):
self._create_dependency(self.nodes['a'], self.nodes['a'])
self.assertEquals([[self.nodes['a'], self.nodes['a']]],
self.nodes['a'].FindCycles())
def test_cycle_two_nodes(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self.assertEquals([[self.nodes['a'], self.nodes['b'], self.nodes['a']]],
self.nodes['a'].FindCycles())
self.assertEquals([[self.nodes['b'], self.nodes['a'], self.nodes['b']]],
self.nodes['b'].FindCycles())
def test_two_cycles(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['b'])
cycles = self.nodes['a'].FindCycles()
self.assertTrue(
[self.nodes['a'], self.nodes['b'], self.nodes['a']] in cycles)
self.assertTrue(
[self.nodes['b'], self.nodes['c'], self.nodes['b']] in cycles)
self.assertEquals(2, len(cycles))
def test_big_cycle(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
self._create_dependency(self.nodes['d'], self.nodes['e'])
self._create_dependency(self.nodes['e'], self.nodes['a'])
self.assertEquals([[self.nodes['a'],
self.nodes['b'],
self.nodes['c'],
self.nodes['d'],
self.nodes['e'],
self.nodes['a']]],
self.nodes['a'].FindCycles())
if __name__ == '__main__':
unittest.main()
| mit |
sjbog/ajenti | plugins/samba/main.py | 17 | 11071 | from ajenti.api import *
from ajenti.ui import *
from ajenti.utils import *
from ajenti import apis
import backend
class SambaPlugin(apis.services.ServiceControlPlugin):
text = 'Samba'
icon = '/dl/samba/icon.png'
folder = 'servers'
service_name = 'smbd'
def on_session_start(self):
self._tab = 0
self._cfg = backend.SambaConfig(self.app)
self._cfg.load()
self._editing_share = None
self._editing_user = None
self._editing = None
self._adding_user = False
def get_main_ui(self):
ui = self.app.inflate('samba:main')
ui.find('tabs').set('active', self._tab)
# Shares
for h in self._cfg.get_shares():
r = UI.DTR(
UI.Image(file='/dl/core/ui/stock/folder.png'),
UI.Label(text=h),
UI.Label(text=self._cfg.shares[h]['path']),
UI.HContainer(
UI.TipIcon(icon='/dl/core/ui/stock/edit.png',
text='Edit', id='editshare/' + h),
UI.TipIcon(
icon='/dl/core/ui/stock/delete.png',
text='Delete', id='delshare/' + h, warning='Delete share %s'%h)
),
)
ui.append('shares', r)
if not self._editing_share is None:
if self._editing_share == '':
ui.append('main', self.get_ui_edit_share())
else:
ui.append('main', self.get_ui_edit_share(
self._cfg.shares[self._editing_share]
))
# Users
for h in sorted(self._cfg.users.keys()):
r = UI.DTR(
UI.Image(file='/dl/core/ui/stock/user.png'),
UI.Label(text=h),
UI.HContainer(
UI.TipIcon(icon='/dl/core/ui/stock/edit.png',
text='Edit', id='edituser/' + h),
UI.TipIcon(
icon='/dl/core/ui/stock/delete.png',
text='Delete', id='deluser/' + h, warning='Delete user %s'%h)
),
)
ui.append('users', r)
if not self._editing_user is None:
if self._editing_user == '':
ui.append('main', self.get_ui_edit_user())
else:
if not self._editing_user in self._cfg.users.keys():
self.put_message('err', 'User not found')
self._editing_user = None
else:
ui.append('main', self.get_ui_edit_user(
self._cfg.users[self._editing_user]
))
if not self._editing is None:
ui.append('main', UI.InputBox(
title=self._editing,
value=self._cfg.users[self._editing_user][self._editing],
id='dlgEdit'
))
if self._adding_user:
ui.append('main', UI.InputBox(
title='New user',
text='Unix login:',
id='dlgAddUser'
))
# Config
ui.append('tab2', self.get_ui_general())
return ui
def get_ui_edit_share(self, s=None):
if s is None or s == '':
s = self._cfg.new_share()
dlg = UI.DialogBox(
UI.Container(
UI.Formline(
UI.TextInput(name='name', value='new'),
text='Name',
) if self._editing_share == '' else None,
UI.Formline(
UI.TextInput(name='path', value=s['path']),
text='Path',
),
UI.Formline(
UI.TextInput(name='valid users', value=s['valid users']),
text='Valid users',
),
UI.Formline(
UI.Checkbox( name='available', checked=s['available']=='yes'),
text='Available',
),
UI.Formline(
UI.Checkbox(name='browseable', checked=s['browseable']=='yes'),
text='Browseable',
),
UI.Formline(
UI.Checkbox(name='read only', checked=s['read only']=='yes'),
text='Read only',
),
UI.Formline(
UI.Checkbox(name='guest ok', checked=s['guest ok']=='yes'),
text='Guest access'
),
UI.Formline(
UI.Checkbox(name='guest only', checked=s['guest only']=='yes'),
text='Force guest',
)
),
id='dlgEditShare',
title='Edit share'
)
return dlg
def get_ui_edit_user(self, u=None):
t = UI.Container()
for k in self._cfg.fields:
if k in u.keys():
t.append(
UI.Formline(
UI.Label(text=u[k]),
UI.Button(design='mini',
text='Change', id='chuser/'+k) if k in self._cfg.editable else None,
text=k
)
)
dlg = UI.DialogBox(
t,
title='Edit user',
id='dlgEditUser'
)
return dlg
def get_ui_general(self):
dlg = UI.FormBox(
UI.Formline(
UI.TextInput(name='server string', value=self._cfg.general['server string']),
text='Machine description',
),
UI.Formline(
UI.TextInput(name='workgroup', value=self._cfg.general['workgroup']),
text='Workgroup',
),
UI.Formline(
UI.TextInput(name='interfaces', value=self._cfg.general['interfaces']),
text='Listen on interfaces',
),
UI.Formline(
UI.TextInput(name='socket options', value=self._cfg.general['socket options']),
text='Socket options',
),
UI.Formline(
UI.SelectInput(
UI.SelectOption(text='Share', value='share',
selected=self._cfg.general['security']=='share'),
UI.SelectOption(text='User', value='user',
selected=self._cfg.general['security']=='user'),
UI.SelectOption(text='Password', value='password',
selected=self._cfg.general['security']=='password'),
UI.SelectOption(text='Other server', value='server',
selected=self._cfg.general['security']=='server'),
UI.SelectOption(text='Active Directory', value='ads',
selected=self._cfg.general['security']=='ads'),
name='security'
),
text='Security',
),
UI.Formline(
UI.TextInput(name='password server', value=self._cfg.general['password server']),
text='Password server',
),
id='frmGeneral'
)
return dlg
@event('button/click')
def on_click(self, event, params, vars=None):
if params[0] == 'restart':
backend.restart()
if params[0] == 'editshare':
self._editing_share = params[1]
self._tab = 0
if params[0] == 'delshare':
if params[1] in self._cfg.shares.keys():
del self._cfg.shares[params[1]]
self._cfg.save()
self._tab = 0
if params[0] == 'newshare':
self._editing_share = ''
self._tab = 0
if params[0] == 'edituser':
self._editing_user = params[1]
self._tab = 1
if params[0] == 'newuser':
self._adding_user = True
self._tab = 1
if params[0] == 'deluser':
self._cfg.del_user(params[1])
self._cfg.load()
self._tab = 1
if params[0] == 'chuser':
self._tab = 1
self._editing = params[1]
@event('dialog/submit')
@event('form/submit')
def on_submit(self, event, params, vars=None):
if params[0] == 'dlgEditShare':
if vars.getvalue('action', '') == 'OK':
es = self._editing_share
if es == '':
es = vars.getvalue('name', 'new')
self._cfg.shares[es] = self._cfg.new_share()
self._cfg.set_param_from_vars(es, 'path', vars)
self._cfg.set_param_from_vars(es, 'valid users', vars)
self._cfg.set_param_from_vars_yn(es, 'available', vars)
self._cfg.set_param_from_vars_yn(es, 'browseable', vars)
self._cfg.set_param_from_vars_yn(es, 'read only', vars)
self._cfg.set_param_from_vars_yn(es, 'guest ok', vars)
self._cfg.set_param_from_vars_yn(es, 'guest only', vars)
self._cfg.save()
self._editing_share = None
if params[0] == 'frmGeneral':
if vars.getvalue('action', '') == 'OK':
self._cfg.set_param_from_vars('general', 'server string', vars)
self._cfg.set_param_from_vars('general', 'workgroup', vars)
self._cfg.set_param_from_vars('general', 'interfaces', vars)
self._cfg.set_param_from_vars('general', 'socket options', vars)
self._cfg.set_param_from_vars('general', 'security', vars)
self._cfg.set_param_from_vars('general', 'password server', vars)
self._cfg.save()
self._tab = 2
if params[0] == 'dlgEditUser':
self._editing_user = None
if params[0] == 'dlgAddUser':
v = vars.getvalue('value', '')
if vars.getvalue('action', '') == 'OK':
if v != '':
self._cfg.add_user(v)
self._cfg.load()
self._editing_user = v
self._adding_user = False
if params[0] == 'dlgEdit':
if vars.getvalue('action', '') == 'OK':
self._cfg.modify_user(self._editing_user, self._editing, vars.getvalue('value', ''))
self._cfg.load()
self._editing = None | lgpl-3.0 |
tenvick/hugular_cstolua | Client/tools/site-packages/PIL/ImageDraw.py | 13 | 11655 | #
# The Python Imaging Library
# $Id: ImageDraw.py 2817 2006-10-07 15:34:03Z fredrik $
#
# drawing interface operations
#
# History:
# 1996-04-13 fl Created (experimental)
# 1996-08-07 fl Filled polygons, ellipses.
# 1996-08-13 fl Added text support
# 1998-06-28 fl Handle I and F images
# 1998-12-29 fl Added arc; use arc primitive to draw ellipses
# 1999-01-10 fl Added shape stuff (experimental)
# 1999-02-06 fl Added bitmap support
# 1999-02-11 fl Changed all primitives to take options
# 1999-02-20 fl Fixed backwards compatibility
# 2000-10-12 fl Copy on write, when necessary
# 2001-02-18 fl Use default ink for bitmap/text also in fill mode
# 2002-10-24 fl Added support for CSS-style color strings
# 2002-12-10 fl Added experimental support for RGBA-on-RGB drawing
# 2002-12-11 fl Refactored low-level drawing API (work in progress)
# 2004-08-26 fl Made Draw() a factory function, added getdraw() support
# 2004-09-04 fl Added width support to line primitive
# 2004-09-10 fl Added font mode handling
# 2006-06-19 fl Added font bearing support (getmask2)
#
# Copyright (c) 1997-2006 by Secret Labs AB
# Copyright (c) 1996-2006 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import Image, ImageColor
try:
import warnings
except ImportError:
warnings = None
##
# A simple 2D drawing interface for PIL images.
# <p>
# Application code should use the <b>Draw</b> factory, instead of
# directly.
class ImageDraw:
##
# Create a drawing instance.
#
# @param im The image to draw in.
# @param mode Optional mode to use for color values. For RGB
# images, this argument can be RGB or RGBA (to blend the
# drawing into the image). For all other modes, this argument
# must be the same as the image mode. If omitted, the mode
# defaults to the mode of the image.
def __init__(self, im, mode=None):
im.load()
if im.readonly:
im._copy() # make it writable
blend = 0
if mode is None:
mode = im.mode
if mode != im.mode:
if mode == "RGBA" and im.mode == "RGB":
blend = 1
else:
raise ValueError("mode mismatch")
if mode == "P":
self.palette = im.palette
else:
self.palette = None
self.im = im.im
self.draw = Image.core.draw(self.im, blend)
self.mode = mode
if mode in ("I", "F"):
self.ink = self.draw.draw_ink(1, mode)
else:
self.ink = self.draw.draw_ink(-1, mode)
if mode in ("1", "P", "I", "F"):
# FIXME: fix Fill2 to properly support matte for I+F images
self.fontmode = "1"
else:
self.fontmode = "L" # aliasing is okay for other modes
self.fill = 0
self.font = None
##
# Set the default pen color.
def setink(self, ink):
# compatibility
if warnings:
warnings.warn(
"'setink' is deprecated; use keyword arguments instead",
DeprecationWarning, stacklevel=2
)
if Image.isStringType(ink):
ink = ImageColor.getcolor(ink, self.mode)
if self.palette and not Image.isNumberType(ink):
ink = self.palette.getcolor(ink)
self.ink = self.draw.draw_ink(ink, self.mode)
##
# Set the default background color.
def setfill(self, onoff):
# compatibility
if warnings:
warnings.warn(
"'setfill' is deprecated; use keyword arguments instead",
DeprecationWarning, stacklevel=2
)
self.fill = onoff
##
# Set the default font.
def setfont(self, font):
# compatibility
self.font = font
##
# Get the current default font.
def getfont(self):
if not self.font:
# FIXME: should add a font repository
import ImageFont
self.font = ImageFont.load_default()
return self.font
def _getink(self, ink, fill=None):
if ink is None and fill is None:
if self.fill:
fill = self.ink
else:
ink = self.ink
else:
if ink is not None:
if Image.isStringType(ink):
ink = ImageColor.getcolor(ink, self.mode)
if self.palette and not Image.isNumberType(ink):
ink = self.palette.getcolor(ink)
ink = self.draw.draw_ink(ink, self.mode)
if fill is not None:
if Image.isStringType(fill):
fill = ImageColor.getcolor(fill, self.mode)
if self.palette and not Image.isNumberType(fill):
fill = self.palette.getcolor(fill)
fill = self.draw.draw_ink(fill, self.mode)
return ink, fill
##
# Draw an arc.
def arc(self, xy, start, end, fill=None):
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_arc(xy, start, end, ink)
##
# Draw a bitmap.
def bitmap(self, xy, bitmap, fill=None):
bitmap.load()
ink, fill = self._getink(fill)
if ink is None:
ink = fill
if ink is not None:
self.draw.draw_bitmap(xy, bitmap.im, ink)
##
# Draw a chord.
def chord(self, xy, start, end, fill=None, outline=None):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_chord(xy, start, end, fill, 1)
if ink is not None:
self.draw.draw_chord(xy, start, end, ink, 0)
##
# Draw an ellipse.
def ellipse(self, xy, fill=None, outline=None):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_ellipse(xy, fill, 1)
if ink is not None:
self.draw.draw_ellipse(xy, ink, 0)
##
# Draw a line, or a connected sequence of line segments.
def line(self, xy, fill=None, width=0):
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_lines(xy, ink, width)
##
# (Experimental) Draw a shape.
def shape(self, shape, fill=None, outline=None):
# experimental
shape.close()
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_outline(shape, fill, 1)
if ink is not None:
self.draw.draw_outline(shape, ink, 0)
##
# Draw a pieslice.
def pieslice(self, xy, start, end, fill=None, outline=None):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_pieslice(xy, start, end, fill, 1)
if ink is not None:
self.draw.draw_pieslice(xy, start, end, ink, 0)
##
# Draw one or more individual pixels.
def point(self, xy, fill=None):
ink, fill = self._getink(fill)
if ink is not None:
self.draw.draw_points(xy, ink)
##
# Draw a polygon.
def polygon(self, xy, fill=None, outline=None):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_polygon(xy, fill, 1)
if ink is not None:
self.draw.draw_polygon(xy, ink, 0)
##
# Draw a rectangle.
def rectangle(self, xy, fill=None, outline=None):
ink, fill = self._getink(outline, fill)
if fill is not None:
self.draw.draw_rectangle(xy, fill, 1)
if ink is not None:
self.draw.draw_rectangle(xy, ink, 0)
##
# Draw text.
def text(self, xy, text, fill=None, font=None, anchor=None):
ink, fill = self._getink(fill)
if font is None:
font = self.getfont()
if ink is None:
ink = fill
if ink is not None:
try:
mask, offset = font.getmask2(text, self.fontmode)
xy = xy[0] + offset[0], xy[1] + offset[1]
except AttributeError:
try:
mask = font.getmask(text, self.fontmode)
except TypeError:
mask = font.getmask(text)
self.draw.draw_bitmap(xy, mask, ink)
##
# Get the size of a given string, in pixels.
def textsize(self, text, font=None):
if font is None:
font = self.getfont()
return font.getsize(text)
##
# A simple 2D drawing interface for PIL images.
#
# @param im The image to draw in.
# @param mode Optional mode to use for color values. For RGB
# images, this argument can be RGB or RGBA (to blend the
# drawing into the image). For all other modes, this argument
# must be the same as the image mode. If omitted, the mode
# defaults to the mode of the image.
def Draw(im, mode=None):
try:
return im.getdraw(mode)
except AttributeError:
return ImageDraw(im, mode)
# experimental access to the outline API
try:
Outline = Image.core.outline
except:
Outline = None
##
# (Experimental) A more advanced 2D drawing interface for PIL images,
# based on the WCK interface.
#
# @param im The image to draw in.
# @param hints An optional list of hints.
# @return A (drawing context, drawing resource factory) tuple.
def getdraw(im=None, hints=None):
# FIXME: this needs more work!
# FIXME: come up with a better 'hints' scheme.
handler = None
if not hints or "nicest" in hints:
try:
import _imagingagg
handler = _imagingagg
except ImportError:
pass
if handler is None:
import ImageDraw2
handler = ImageDraw2
if im:
im = handler.Draw(im)
return im, handler
##
# (experimental) Fills a bounded region with a given color.
#
# @param image Target image.
# @param xy Seed position (a 2-item coordinate tuple).
# @param value Fill color.
# @param border Optional border value. If given, the region consists of
# pixels with a color different from the border color. If not given,
# the region consists of pixels having the same color as the seed
# pixel.
def floodfill(image, xy, value, border=None):
"Fill bounded region."
# based on an implementation by Eric S. Raymond
pixel = image.load()
x, y = xy
try:
background = pixel[x, y]
if background == value:
return # seed point already has fill color
pixel[x, y] = value
except IndexError:
return # seed point outside image
edge = [(x, y)]
if border is None:
while edge:
newedge = []
for (x, y) in edge:
for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)):
try:
p = pixel[s, t]
except IndexError:
pass
else:
if p == background:
pixel[s, t] = value
newedge.append((s, t))
edge = newedge
else:
while edge:
newedge = []
for (x, y) in edge:
for (s, t) in ((x+1, y), (x-1, y), (x, y+1), (x, y-1)):
try:
p = pixel[s, t]
except IndexError:
pass
else:
if p != value and p != border:
pixel[s, t] = value
newedge.append((s, t))
edge = newedge
| mit |
epsilonorion/python-xbee | scripts/shell.py | 48 | 1646 | #!/usr/bin/env python
"""
shell.py
Amit Snyderman, 2009
<[email protected]>
Updated by Paul Malmsten, 2010
[email protected]
Provides a simple shell for testing XBee devices. Currently, the shell
only allows one to parse and print received data; sending is not
supported.
"""
# $Id: xbee-serial-terminal.py 7 2009-12-30 16:25:08Z amitsnyderman $
import sys, time, cmd, serial, binascii
from xbee import XBee1
class XBeeShell(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
self.prompt = "xbee% "
self.serial = serial.Serial()
def default(self, p):
if not self.serial.isOpen():
print "You must set a serial port first."
else:
if p == '+++':
self.serial.write(p)
time.sleep(2)
else:
self.serial.write('%s\r' % p)
time.sleep(0.5)
output = ''
while self.serial.inWaiting():
output += self.serial.read()
print output.replace('\r', '\n').rstrip()
def do_serial(self, p):
"""Set the serial port, e.g.: /dev/tty.usbserial-A4001ib8"""
try:
self.serial.port = p
self.serial.open()
print 'Opening serial port: %s' % p
except Exception, e:
print 'Unable to open serial port: %s' % p
def do_baudrate(self, p):
"""Set the serial port's baud rate, e.g.: 19200"""
self.serial.baudrate = p
def do_watch(self, p):
if not self.serial.isOpen():
print "You must set a serial port first."
else:
while 1:
xbee = XBee1(self.serial)
packet = xbee.wait_read_frame()
print packet
def do_exit(self, p):
"""Exits from the XBee serial console"""
self.serial.close()
return 1
if __name__ == '__main__':
shell = XBeeShell()
shell.cmdloop()
| mit |
yg257/Pangea | lib/boto-2.34.0/tests/mturk/selenium_support.py | 115 | 1741 | from __future__ import absolute_import
from boto.mturk.test.support import unittest
sel_args = ('localhost', 4444, '*chrome', 'https://workersandbox.mturk.com')
class SeleniumFailed(object):
def __init__(self, message):
self.message = message
def __nonzero__(self):
return False
def has_selenium():
try:
from selenium import selenium
globals().update(selenium=selenium)
sel = selenium(*sel_args)
# a little trick to see if the server is responding
try:
sel.do_command('shutdown', '')
except Exception, e:
if not 'Server Exception' in str(e):
raise
result = True
except ImportError:
result = SeleniumFailed('selenium RC not installed')
except Exception:
msg = 'Error occurred initializing selenium: %s' % e
result = SeleniumFailed(msg)
# overwrite has_selenium, so the same result is returned every time
globals().update(has_selenium=lambda: result)
return result
identity = lambda x: x
def skip_unless_has_selenium():
res = has_selenium()
if not res:
return unittest.skip(res.message)
return identity
def complete_hit(hit_type_id, response='Some Response'):
verificationErrors = []
sel = selenium(*sel_args)
sel.start()
sel.open("/mturk/welcome")
sel.click("lnkWorkerSignin")
sel.wait_for_page_to_load("30000")
sel.type("email", "[email protected]")
sel.type("password", "BotoTest")
sel.click("Continue")
sel.wait_for_page_to_load("30000")
sel.open("/mturk/preview?groupId={hit_type_id}".format(**vars()))
sel.click("/accept")
sel.wait_for_page_to_load("30000")
sel.type("Answer_1_FreeText", response)
sel.click("//div[5]/table/tbody/tr[2]/td[1]/input")
sel.wait_for_page_to_load("30000")
sel.click("link=Sign Out")
sel.wait_for_page_to_load("30000")
sel.stop()
| apache-2.0 |
sumedhasingla/VTK | Imaging/Core/Testing/Python/TestAllMathematics.py | 26 | 3800 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
=========================================================================
Program: Visualization Toolkit
Module: TestNamedColorsIntegration.py
Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
All rights reserved.
See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
This software is distributed WITHOUT ANY WARRANTY; without even
the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
PURPOSE. See the above copyright notice for more information.
=========================================================================
'''
import vtk
import vtk.test.Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
class TestAllMathematics(vtk.test.Testing.vtkTest):
def testAllMathematics(self):
# append multiple displaced spheres into an RGB image.
# Image pipeline
renWin = vtk.vtkRenderWindow()
sphere1 = vtk.vtkImageEllipsoidSource()
sphere1.SetCenter(40, 20, 0)
sphere1.SetRadius(30, 30, 0)
sphere1.SetInValue(.75)
sphere1.SetOutValue(.3)
sphere1.SetOutputScalarTypeToFloat()
sphere1.SetWholeExtent(0, 99, 0, 74, 0, 0)
sphere1.Update()
sphere2 = vtk.vtkImageEllipsoidSource()
sphere2.SetCenter(60, 30, 0)
sphere2.SetRadius(20, 20, 20)
sphere2.SetInValue(.2)
sphere2.SetOutValue(.5)
sphere2.SetOutputScalarTypeToFloat()
sphere2.SetWholeExtent(0, 99, 0, 74, 0, 0)
sphere2.Update()
mathematics = [ "Add", "Subtract", "Multiply", "Divide", "Invert", "Sin", "Cos",
"Exp", "Log", "AbsoluteValue", "Square", "SquareRoot", "Min",
"Max", "ATAN", "ATAN2", "MultiplyByK", "ReplaceCByK", "AddConstant"]
mathematic = list()
mapper = list()
actor = list()
imager = list()
for idx, operator in enumerate(mathematics):
mathematic.append(vtk.vtkImageMathematics())
mathematic[idx].SetInput1Data(sphere1.GetOutput())
mathematic[idx].SetInput2Data(sphere2.GetOutput())
eval('mathematic[idx].SetOperationTo' + operator + '()')
mathematic[idx].SetConstantK(.3)
mathematic[idx].SetConstantC(.75)
mapper.append(vtk.vtkImageMapper())
mapper[idx].SetInputConnection(mathematic[idx].GetOutputPort())
mapper[idx].SetColorWindow(2.0)
mapper[idx].SetColorLevel(.75)
actor.append(vtk.vtkActor2D())
actor[idx].SetMapper(mapper[idx])
imager.append(vtk.vtkRenderer())
imager[idx].AddActor2D(actor[idx])
renWin.AddRenderer(imager[idx])
column = 1
row = 1
deltaX = 1.0 / 6.0
deltaY = 1.0 / 4.0
for idx, operator in enumerate(mathematics):
imager[idx].SetViewport((column - 1) * deltaX, (row - 1) * deltaY, column * deltaX, row * deltaY)
column += 1
if column > 6:
column = 1
row += 1
# Make the last operator finish the row
vp = imager[len(mathematics) - 1].GetViewport()
imager[len(mathematics) - 1].SetViewport(vp[0], vp[1], 1, 1)
renWin.SetSize(600, 300)
# render and interact with data
iRen = vtk.vtkRenderWindowInteractor()
iRen.SetRenderWindow(renWin);
renWin.Render()
img_file = "TestAllMathematics.png"
vtk.test.Testing.compareImage(iRen.GetRenderWindow(), vtk.test.Testing.getAbsImagePath(img_file), threshold=25)
vtk.test.Testing.interact()
if __name__ == "__main__":
vtk.test.Testing.main([(TestAllMathematics, 'test')])
| bsd-3-clause |
aktech/sympy | sympy/polys/fields.py | 29 | 19557 | """Sparse rational function fields. """
from __future__ import print_function, division
from operator import add, mul, lt, le, gt, ge
from sympy.core.compatibility import is_sequence, reduce, string_types
from sympy.core.expr import Expr
from sympy.core.symbol import Symbol
from sympy.core.sympify import CantSympify, sympify
from sympy.polys.rings import PolyElement
from sympy.polys.orderings import lex
from sympy.polys.polyerrors import CoercionFailed
from sympy.polys.polyoptions import build_options
from sympy.polys.polyutils import _parallel_dict_from_expr
from sympy.polys.domains.domainelement import DomainElement
from sympy.polys.domains.polynomialring import PolynomialRing
from sympy.polys.domains.fractionfield import FractionField
from sympy.polys.constructor import construct_domain
from sympy.printing.defaults import DefaultPrinting
from sympy.utilities import public
from sympy.utilities.magic import pollute
@public
def field(symbols, domain, order=lex):
"""Construct new rational function field returning (field, x1, ..., xn). """
_field = FracField(symbols, domain, order)
return (_field,) + _field.gens
@public
def xfield(symbols, domain, order=lex):
"""Construct new rational function field returning (field, (x1, ..., xn)). """
_field = FracField(symbols, domain, order)
return (_field, _field.gens)
@public
def vfield(symbols, domain, order=lex):
"""Construct new rational function field and inject generators into global namespace. """
_field = FracField(symbols, domain, order)
pollute([ sym.name for sym in _field.symbols ], _field.gens)
return _field
@public
def sfield(exprs, *symbols, **options):
"""Construct a field deriving generators and domain
from options and input expressions.
Parameters
----------
exprs : :class:`Expr` or sequence of :class:`Expr` (sympifiable)
symbols : sequence of :class:`Symbol`/:class:`Expr`
options : keyword arguments understood by :class:`Options`
Examples
========
>>> from sympy.core import symbols
>>> from sympy.functions import exp, log
>>> from sympy.polys.fields import sfield
>>> x = symbols("x")
>>> K, f = sfield((x*log(x) + 4*x**2)*exp(1/x + log(x)/3)/x**2)
>>> K
Rational function field in x, exp(1/x), log(x), x**(1/3) over ZZ with lex order
>>> f
(4*x**2*(exp(1/x)) + x*(exp(1/x))*(log(x)))/((x**(1/3))**5)
"""
single = False
if not is_sequence(exprs):
exprs, single = [exprs], True
exprs = list(map(sympify, exprs))
opt = build_options(symbols, options)
numdens = []
for expr in exprs:
numdens.extend(expr.as_numer_denom())
reps, opt = _parallel_dict_from_expr(numdens, opt)
if opt.domain is None:
# NOTE: this is inefficient because construct_domain() automatically
# performs conversion to the target domain. It shouldn't do this.
coeffs = sum([list(rep.values()) for rep in reps], [])
opt.domain, _ = construct_domain(coeffs, opt=opt)
_field = FracField(opt.gens, opt.domain, opt.order)
fracs = []
for i in range(0, len(reps), 2):
fracs.append(_field(tuple(reps[i:i+2])))
if single:
return (_field, fracs[0])
else:
return (_field, fracs)
_field_cache = {}
class FracField(DefaultPrinting):
"""Multivariate distributed rational function field. """
def __new__(cls, symbols, domain, order=lex):
from sympy.polys.rings import PolyRing
ring = PolyRing(symbols, domain, order)
symbols = ring.symbols
ngens = ring.ngens
domain = ring.domain
order = ring.order
_hash = hash((cls.__name__, symbols, ngens, domain, order))
obj = _field_cache.get(_hash)
if obj is None:
obj = object.__new__(cls)
obj._hash = _hash
obj.ring = ring
obj.dtype = type("FracElement", (FracElement,), {"field": obj})
obj.symbols = symbols
obj.ngens = ngens
obj.domain = domain
obj.order = order
obj.zero = obj.dtype(ring.zero)
obj.one = obj.dtype(ring.one)
obj.gens = obj._gens()
for symbol, generator in zip(obj.symbols, obj.gens):
if isinstance(symbol, Symbol):
name = symbol.name
if not hasattr(obj, name):
setattr(obj, name, generator)
_field_cache[_hash] = obj
return obj
def _gens(self):
"""Return a list of polynomial generators. """
return tuple([ self.dtype(gen) for gen in self.ring.gens ])
def __getnewargs__(self):
return (self.symbols, self.domain, self.order)
def __hash__(self):
return self._hash
def __eq__(self, other):
return self is other
def __ne__(self, other):
return self is not other
def raw_new(self, numer, denom=None):
return self.dtype(numer, denom)
def new(self, numer, denom=None):
if denom is None: denom = self.ring.one
numer, denom = numer.cancel(denom)
return self.raw_new(numer, denom)
def domain_new(self, element):
return self.domain.convert(element)
def ground_new(self, element):
try:
return self.new(self.ring.ground_new(element))
except CoercionFailed:
domain = self.domain
if not domain.has_Field and domain.has_assoc_Field:
ring = self.ring
ground_field = domain.get_field()
element = ground_field.convert(element)
numer = ring.ground_new(ground_field.numer(element))
denom = ring.ground_new(ground_field.denom(element))
return self.raw_new(numer, denom)
else:
raise
def field_new(self, element):
if isinstance(element, FracElement):
if self == element.field:
return element
else:
raise NotImplementedError("conversion")
elif isinstance(element, PolyElement):
denom, numer = element.clear_denoms()
numer = numer.set_ring(self.ring)
denom = self.ring.ground_new(denom)
return self.raw_new(numer, denom)
elif isinstance(element, tuple) and len(element) == 2:
numer, denom = list(map(self.ring.ring_new, element))
return self.new(numer, denom)
elif isinstance(element, string_types):
raise NotImplementedError("parsing")
elif isinstance(element, Expr):
return self.from_expr(element)
else:
return self.ground_new(element)
__call__ = field_new
def _rebuild_expr(self, expr, mapping):
domain = self.domain
def _rebuild(expr):
generator = mapping.get(expr)
if generator is not None:
return generator
elif expr.is_Add:
return reduce(add, list(map(_rebuild, expr.args)))
elif expr.is_Mul:
return reduce(mul, list(map(_rebuild, expr.args)))
elif expr.is_Pow and expr.exp.is_Integer:
return _rebuild(expr.base)**int(expr.exp)
else:
try:
return domain.convert(expr)
except CoercionFailed:
if not domain.has_Field and domain.has_assoc_Field:
return domain.get_field().convert(expr)
else:
raise
return _rebuild(sympify(expr))
def from_expr(self, expr):
mapping = dict(list(zip(self.symbols, self.gens)))
try:
frac = self._rebuild_expr(expr, mapping)
except CoercionFailed:
raise ValueError("expected an expression convertible to a rational function in %s, got %s" % (self, expr))
else:
return self.field_new(frac)
def to_domain(self):
return FractionField(self)
def to_ring(self):
from sympy.polys.rings import PolyRing
return PolyRing(self.symbols, self.domain, self.order)
class FracElement(DomainElement, DefaultPrinting, CantSympify):
"""Element of multivariate distributed rational function field. """
def __init__(self, numer, denom=None):
if denom is None:
denom = self.field.ring.one
elif not denom:
raise ZeroDivisionError("zero denominator")
self.numer = numer
self.denom = denom
def raw_new(f, numer, denom):
return f.__class__(numer, denom)
def new(f, numer, denom):
return f.raw_new(*numer.cancel(denom))
def to_poly(f):
if f.denom != 1:
raise ValueError("f.denom should be 1")
return f.numer
def parent(self):
return self.field.to_domain()
def __getnewargs__(self):
return (self.field, self.numer, self.denom)
_hash = None
def __hash__(self):
_hash = self._hash
if _hash is None:
self._hash = _hash = hash((self.field, self.numer, self.denom))
return _hash
def copy(self):
return self.raw_new(self.numer.copy(), self.denom.copy())
def set_field(self, new_field):
if self.field == new_field:
return self
else:
new_ring = new_field.ring
numer = self.numer.set_ring(new_ring)
denom = self.denom.set_ring(new_ring)
return new_field.new(numer, denom)
def as_expr(self, *symbols):
return self.numer.as_expr(*symbols)/self.denom.as_expr(*symbols)
def __eq__(f, g):
if isinstance(g, f.field.dtype):
return f.numer == g.numer and f.denom == g.denom
else:
return f.numer == g and f.denom == f.field.ring.one
def __ne__(f, g):
return not f.__eq__(g)
def __nonzero__(f):
return bool(f.numer)
__bool__ = __nonzero__
def sort_key(self):
return (self.denom.sort_key(), self.numer.sort_key())
def _cmp(f1, f2, op):
if isinstance(f2, f1.field.dtype):
return op(f1.sort_key(), f2.sort_key())
else:
return NotImplemented
def __lt__(f1, f2):
return f1._cmp(f2, lt)
def __le__(f1, f2):
return f1._cmp(f2, le)
def __gt__(f1, f2):
return f1._cmp(f2, gt)
def __ge__(f1, f2):
return f1._cmp(f2, ge)
def __pos__(f):
"""Negate all coefficients in ``f``. """
return f.raw_new(f.numer, f.denom)
def __neg__(f):
"""Negate all coefficients in ``f``. """
return f.raw_new(-f.numer, f.denom)
def _extract_ground(self, element):
domain = self.field.domain
try:
element = domain.convert(element)
except CoercionFailed:
if not domain.has_Field and domain.has_assoc_Field:
ground_field = domain.get_field()
try:
element = ground_field.convert(element)
except CoercionFailed:
pass
else:
return -1, ground_field.numer(element), ground_field.denom(element)
return 0, None, None
else:
return 1, element, None
def __add__(f, g):
"""Add rational functions ``f`` and ``g``. """
field = f.field
if not g:
return f
elif not f:
return g
elif isinstance(g, field.dtype):
if f.denom == g.denom:
return f.new(f.numer + g.numer, f.denom)
else:
return f.new(f.numer*g.denom + f.denom*g.numer, f.denom*g.denom)
elif isinstance(g, field.ring.dtype):
return f.new(f.numer + f.denom*g, f.denom)
else:
if isinstance(g, FracElement):
if isinstance(field.domain, FractionField) and field.domain.field == g.field:
pass
elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field:
return g.__radd__(f)
else:
return NotImplemented
elif isinstance(g, PolyElement):
if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring:
pass
else:
return g.__radd__(f)
return f.__radd__(g)
def __radd__(f, c):
if isinstance(c, f.field.ring.dtype):
return f.new(f.numer + f.denom*c, f.denom)
op, g_numer, g_denom = f._extract_ground(c)
if op == 1:
return f.new(f.numer + f.denom*g_numer, f.denom)
elif not op:
return NotImplemented
else:
return f.new(f.numer*g_denom + f.denom*g_numer, f.denom*g_denom)
def __sub__(f, g):
"""Subtract rational functions ``f`` and ``g``. """
field = f.field
if not g:
return f
elif not f:
return -g
elif isinstance(g, field.dtype):
if f.denom == g.denom:
return f.new(f.numer - g.numer, f.denom)
else:
return f.new(f.numer*g.denom - f.denom*g.numer, f.denom*g.denom)
elif isinstance(g, field.ring.dtype):
return f.new(f.numer - f.denom*g, f.denom)
else:
if isinstance(g, FracElement):
if isinstance(field.domain, FractionField) and field.domain.field == g.field:
pass
elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field:
return g.__rsub__(f)
else:
return NotImplemented
elif isinstance(g, PolyElement):
if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring:
pass
else:
return g.__rsub__(f)
op, g_numer, g_denom = f._extract_ground(g)
if op == 1:
return f.new(f.numer - f.denom*g_numer, f.denom)
elif not op:
return NotImplemented
else:
return f.new(f.numer*g_denom - f.denom*g_numer, f.denom*g_denom)
def __rsub__(f, c):
if isinstance(c, f.field.ring.dtype):
return f.new(-f.numer + f.denom*c, f.denom)
op, g_numer, g_denom = f._extract_ground(c)
if op == 1:
return f.new(-f.numer + f.denom*g_numer, f.denom)
elif not op:
return NotImplemented
else:
return f.new(-f.numer*g_denom + f.denom*g_numer, f.denom*g_denom)
def __mul__(f, g):
"""Multiply rational functions ``f`` and ``g``. """
field = f.field
if not f or not g:
return field.zero
elif isinstance(g, field.dtype):
return f.new(f.numer*g.numer, f.denom*g.denom)
elif isinstance(g, field.ring.dtype):
return f.new(f.numer*g, f.denom)
else:
if isinstance(g, FracElement):
if isinstance(field.domain, FractionField) and field.domain.field == g.field:
pass
elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field:
return g.__rmul__(f)
else:
return NotImplemented
elif isinstance(g, PolyElement):
if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring:
pass
else:
return g.__rmul__(f)
return f.__rmul__(g)
def __rmul__(f, c):
if isinstance(c, f.field.ring.dtype):
return f.new(f.numer*c, f.denom)
op, g_numer, g_denom = f._extract_ground(c)
if op == 1:
return f.new(f.numer*g_numer, f.denom)
elif not op:
return NotImplemented
else:
return f.new(f.numer*g_numer, f.denom*g_denom)
def __truediv__(f, g):
"""Computes quotient of fractions ``f`` and ``g``. """
field = f.field
if not g:
raise ZeroDivisionError
elif isinstance(g, field.dtype):
return f.new(f.numer*g.denom, f.denom*g.numer)
elif isinstance(g, field.ring.dtype):
return f.new(f.numer, f.denom*g)
else:
if isinstance(g, FracElement):
if isinstance(field.domain, FractionField) and field.domain.field == g.field:
pass
elif isinstance(g.field.domain, FractionField) and g.field.domain.field == field:
return g.__rtruediv__(f)
else:
return NotImplemented
elif isinstance(g, PolyElement):
if isinstance(field.domain, PolynomialRing) and field.domain.ring == g.ring:
pass
else:
return g.__rtruediv__(f)
op, g_numer, g_denom = f._extract_ground(g)
if op == 1:
return f.new(f.numer, f.denom*g_numer)
elif not op:
return NotImplemented
else:
return f.new(f.numer*g_denom, f.denom*g_numer)
__div__ = __truediv__
def __rtruediv__(f, c):
if not f:
raise ZeroDivisionError
elif isinstance(c, f.field.ring.dtype):
return f.new(f.denom*c, f.numer)
op, g_numer, g_denom = f._extract_ground(c)
if op == 1:
return f.new(f.denom*g_numer, f.numer)
elif not op:
return NotImplemented
else:
return f.new(f.denom*g_numer, f.numer*g_denom)
__rdiv__ = __rtruediv__
def __pow__(f, n):
"""Raise ``f`` to a non-negative power ``n``. """
if n >= 0:
return f.raw_new(f.numer**n, f.denom**n)
elif not f:
raise ZeroDivisionError
else:
return f.raw_new(f.denom**-n, f.numer**-n)
def diff(f, x):
"""Computes partial derivative in ``x``.
Examples
========
>>> from sympy.polys.fields import field
>>> from sympy.polys.domains import ZZ
>>> _, x, y, z = field("x,y,z", ZZ)
>>> ((x**2 + y)/(z + 1)).diff(x)
2*x/(z + 1)
"""
x = x.to_poly()
return f.new(f.numer.diff(x)*f.denom - f.numer*f.denom.diff(x), f.denom**2)
def __call__(f, *values):
if 0 < len(values) <= f.field.ngens:
return f.evaluate(list(zip(f.field.gens, values)))
else:
raise ValueError("expected at least 1 and at most %s values, got %s" % (f.field.ngens, len(values)))
def evaluate(f, x, a=None):
if isinstance(x, list) and a is None:
x = [ (X.to_poly(), a) for X, a in x ]
numer, denom = f.numer.evaluate(x), f.denom.evaluate(x)
else:
x = x.to_poly()
numer, denom = f.numer.evaluate(x, a), f.denom.evaluate(x, a)
field = numer.ring.to_field()
return field.new(numer, denom)
def subs(f, x, a=None):
if isinstance(x, list) and a is None:
x = [ (X.to_poly(), a) for X, a in x ]
numer, denom = f.numer.subs(x), f.denom.subs(x)
else:
x = x.to_poly()
numer, denom = f.numer.subs(x, a), f.denom.subs(x, a)
return f.new(numer, denom)
def compose(f, x, a=None):
raise NotImplementedError
| bsd-3-clause |
lichong012245/django-lfs-0.7.8 | lfs/portlet/models/recent_products.py | 1 | 1892 | # django imports
from django import forms
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.template import RequestContext
from django.template.loader import render_to_string
# portlets imports
from portlets.models import Portlet
# lfs imports
from lfs.catalog.models import Product
from lfs.caching.utils import lfs_get_object
class RecentProductsPortlet(Portlet):
"""Portlet to display recent visited products.
"""
class Meta:
app_label = 'portlet'
def __unicode__(self):
return u"%s" % self.id
def render(self, context):
"""Renders the portlet as html.
"""
object = context.get("product")
slug_not_to_display = ""
limit = settings.LFS_RECENT_PRODUCTS_LIMIT
if object:
ctype = ContentType.objects.get_for_model(object)
if ctype.name == u"product":
slug_not_to_display = object.slug
limit = settings.LFS_RECENT_PRODUCTS_LIMIT + 1
request = context.get("request")
products = []
for slug in request.session.get("RECENT_PRODUCTS", [])[:limit]:
if slug == slug_not_to_display:
continue
product = lfs_get_object(Product, slug=slug)
if product and product.is_product_with_variants() and product.has_variants():
product = product.get_default_variant()
products.append(product)
return render_to_string("lfs/portlets/recent_products.html", RequestContext(request, {
"title": self.title,
"products": products,
}))
def form(self, **kwargs):
return RecentProductsForm(instance=self, **kwargs)
class RecentProductsForm(forms.ModelForm):
"""Form for the RecentProductsPortlet.
"""
class Meta:
model = RecentProductsPortlet
| bsd-3-clause |
ColinIanKing/autotest | cli/action_common_unittest.py | 4 | 23407 | #!/usr/bin/python
#
# Copyright 2008 Google Inc. All Rights Reserved.
"""Tests for action_common."""
import unittest, sys, copy
try:
import autotest.common as common
except ImportError:
import common
from autotest.cli import cli_mock, action_common, rpc
#
# List action
#
class atest_list_unittest(cli_mock.cli_unittest):
def test_check_for_wilcard_none(self):
orig_filters = {'name__in': ['item0', 'item1']}
orig_checks = {'name__in': ['item0', 'item1']}
mytest = action_common.atest_list()
filters = copy.deepcopy(orig_filters)
checks = copy.deepcopy(orig_checks)
mytest.check_for_wildcard(filters, checks)
self.assertEqual(filters, orig_filters)
self.assertEqual(checks, orig_checks)
def test_check_for_wilcard_none_list(self):
orig_filters = {'name__in': ['item0']}
orig_checks = {'name__in': ['item0']}
mytest = action_common.atest_list()
filters = copy.deepcopy(orig_filters)
checks = copy.deepcopy(orig_checks)
mytest.check_for_wildcard(filters, checks)
self.assertEqual(filters, orig_filters)
self.assertEqual(checks, orig_checks)
def test_check_for_wilcard_one_list(self):
filters = {'something__in': ['item*']}
checks = {'something__in': ['item*']}
mytest = action_common.atest_list()
mytest.check_for_wildcard(filters, checks)
self.assertEqual(filters, {'something__startswith': 'item'})
self.assertEqual(checks, {'something__startswith': None})
def test_check_for_wilcard_one_string(self):
filters = {'something__name': 'item*'}
checks = {'something__name': 'item*'}
mytest = action_common.atest_list()
mytest.check_for_wildcard(filters, checks)
self.assertEqual(filters, {'something__name__startswith': 'item'})
self.assertEqual(checks, {'something__name__startswith': None})
def test_check_for_wilcard_one_string_login(self):
filters = {'something__login': 'item*'}
checks = {'something__login': 'item*'}
mytest = action_common.atest_list()
mytest.check_for_wildcard(filters, checks)
self.assertEqual(filters, {'something__login__startswith': 'item'})
self.assertEqual(checks, {'something__login__startswith': None})
def test_check_for_wilcard_two(self):
orig_filters = {'something__in': ['item0*', 'item1*']}
orig_checks = {'something__in': ['item0*', 'item1*']}
mytest = action_common.atest_list()
filters = copy.deepcopy(orig_filters)
checks = copy.deepcopy(orig_checks)
self.god.stub_function(sys, 'exit')
sys.exit.expect_call(1).and_raises(cli_mock.ExitException)
self.god.mock_io()
self.assertRaises(cli_mock.ExitException,
mytest.check_for_wildcard, filters, checks)
(out, err) = self.god.unmock_io()
self.god.check_playback()
self.assertEqual(filters, orig_filters)
self.assertEqual(checks, orig_checks)
def _atest_list_execute(self, filters={}, check_results={}):
values = [{u'id': 180,
u'platform': 0,
u'name': u'label0',
u'invalid': 0,
u'kernel_config': u''},
{u'id': 338,
u'platform': 0,
u'name': u'label1',
u'invalid': 0,
u'kernel_config': u''}]
mytest = action_common.atest_list()
mytest.afe = rpc.afe_comm()
self.mock_rpcs([('get_labels',
filters,
True,
values)])
self.god.mock_io()
self.assertEqual(values,
mytest.execute(op='get_labels',
filters=filters,
check_results=check_results))
errors = mytest.failed
(out, err) = self.god.unmock_io()
self.god.check_playback()
return (out, err, errors)
def test_atest_list_execute_no_filters(self):
self._atest_list_execute()
def test_atest_list_execute_filters_all_good(self):
filters = {}
check_results = {}
filters['name__in'] = ['label0', 'label1']
check_results['name__in'] = 'name'
(out, err, errors) = self._atest_list_execute(filters, check_results)
self.assertEqual(err, '')
def test_atest_list_execute_filters_good_and_bad(self):
filters = {}
check_results = {}
filters['name__in'] = ['label0', 'label1', 'label2']
check_results['name__in'] = 'name'
(out, err, errors) = self._atest_list_execute(filters, check_results)
K = errors.keys()[0]
V = errors.values()[0].keys()[0]
self.assertTrue('Unknown' in K)
self.assertTrue('label2' in V)
def test_atest_list_execute_items_good_and_bad_no_check(self):
filters = {}
check_results = {}
filters['name__in'] = ['label0', 'label1', 'label2']
check_results['name__in'] = None
(out, err, errors) = self._atest_list_execute(filters, check_results)
self.assertEqual(err, '')
def test_atest_list_execute_filters_wildcard(self):
filters = {}
check_results = {}
filters['name__in'] = ['label*']
check_results['name__in'] = 'name'
values = [{u'id': 180,
u'platform': False,
u'name': u'label0',
u'invalid': False,
u'kernel_config': u''},
{u'id': 338,
u'platform': False,
u'name': u'label1',
u'invalid': False,
u'kernel_config': u''}]
mytest = action_common.atest_list()
mytest.afe = rpc.afe_comm()
self.mock_rpcs([('get_labels', {'name__startswith': 'label'},
True, values)])
self.god.mock_io()
self.assertEqual(values,
mytest.execute(op='get_labels',
filters=filters,
check_results=check_results))
(out, err) = self.god.unmock_io()
self.god.check_playback()
self.assertEqual(err, '')
#
# Creation & Deletion of a topic (ACL, label, user)
#
class atest_create_or_delete_unittest(cli_mock.cli_unittest):
def _create_cr_del(self, items):
def _items():
return items
crdel = action_common.atest_create_or_delete()
crdel.afe = rpc.afe_comm()
crdel.topic = crdel.usage_topic = 'label'
crdel.op_action = 'add'
crdel.get_items = _items
crdel.data['platform'] = False
crdel.data_item_key = 'name'
return crdel
def test_execute_create_one_topic(self):
acr = self._create_cr_del(['label0'])
self.mock_rpcs([('add_label',
{'name': 'label0', 'platform': False},
True, 42)])
ret = acr.execute()
self.god.check_playback()
self.assert_(['label0'], ret)
def test_execute_create_two_topics(self):
acr = self._create_cr_del(['label0', 'label1'])
self.mock_rpcs([('add_label',
{'name': 'label0', 'platform': False},
True, 42),
('add_label',
{'name': 'label1', 'platform': False},
True, 43)])
ret = acr.execute()
self.god.check_playback()
self.assertEqualNoOrder(['label0', 'label1'], ret)
def test_execute_create_error(self):
acr = self._create_cr_del(['label0'])
self.mock_rpcs([('add_label',
{'name': 'label0', 'platform': False},
False,
'''ValidationError:
{'name': 'This value must be unique (label0)'}''')])
ret = acr.execute()
self.god.check_playback()
self.assertEqualNoOrder([], ret)
#
# Adding or Removing users or hosts from a topic(ACL or label)
#
class atest_add_or_remove_unittest(cli_mock.cli_unittest):
def _create_add_remove(self, items, users=None, hosts=None):
def _items():
return [items]
addrm = action_common.atest_add_or_remove()
addrm.afe = rpc.afe_comm()
if users:
addrm.users = users
if hosts:
addrm.hosts = hosts
addrm.topic = 'acl_group'
addrm.msg_topic = 'ACL'
addrm.op_action = 'add'
addrm.msg_done = 'Added to'
addrm.get_items = _items
return addrm
def test__add_remove_uh_to_topic(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'])
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
True,
None)])
acl_addrm._add_remove_uh_to_topic('acl0', 'users')
self.god.check_playback()
def test__add_remove_uh_to_topic_raise(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'])
self.assertRaises(AttributeError,
acl_addrm._add_remove_uh_to_topic,
'acl0', 'hosts')
def test_execute_add_or_remove_uh_to_topic_acl_users(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'])
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
True,
None)])
execute_result = acl_addrm.execute()
self.god.check_playback()
self.assertEqualNoOrder(['acl0'], execute_result['users'])
self.assertEqual([], execute_result['hosts'])
def test_execute_add_or_remove_uh_to_topic_acl_users_hosts(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'],
hosts=['host0', 'host1'])
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
True,
None),
('acl_group_add_hosts',
{'id': 'acl0',
'hosts': ['host0', 'host1']},
True,
None)])
execute_result = acl_addrm.execute()
self.god.check_playback()
self.assertEqualNoOrder(['acl0'], execute_result['users'])
self.assertEqualNoOrder(['acl0'], execute_result['hosts'])
def test_execute_add_or_remove_uh_to_topic_acl_bad_users(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'])
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
False,
'DoesNotExist: The following users do not exist: '
'user0, user1')])
execute_result = acl_addrm.execute()
self.god.check_playback()
self.assertEqual([], execute_result['users'])
self.assertEqual([], execute_result['hosts'])
self.assertOutput(acl_addrm, execute_result,
err_words_ok=['DoesNotExist',
'acl_group_add_users',
'user0', 'user1'],
err_words_no = ['acl_group_add_hosts'])
def test_execute_add_or_remove_uh_to_topic_acl_bad_users_partial(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'])
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
False,
'DoesNotExist: The following users do not exist: '
'user0'),
('acl_group_add_users',
{'id': 'acl0',
'users': ['user1']},
True,
None)])
execute_result = acl_addrm.execute()
self.god.check_playback()
self.assertEqual(['acl0'], execute_result['users'])
self.assertEqual([], execute_result['hosts'])
self.assertOutput(acl_addrm, execute_result,
out_words_ok=["Added to ACL 'acl0'", 'user1'],
err_words_ok=['DoesNotExist',
'acl_group_add_users',
'user0'],
err_words_no = ['acl_group_add_hosts'])
def test_execute_add_or_remove_uh_to_topic_acl_bad_u_partial_kill(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'])
acl_addrm.kill_on_failure = True
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
False,
'DoesNotExist: The following users do not exist: '
'user0')])
sys.exit.expect_call(1).and_raises(cli_mock.ExitException)
self.god.mock_io()
self.assertRaises(cli_mock.ExitException, acl_addrm.execute)
(out, err) = self.god.unmock_io()
self.god.check_playback()
self._check_output(out=out, err=err,
err_words_ok=['DoesNotExist',
'acl_group_add_users',
'user0'],
err_words_no = ['acl_group_add_hosts'])
def test_execute_add_or_remove_uh_to_topic_acl_bad_users_good_hosts(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'],
hosts=['host0', 'host1'])
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
False,
'DoesNotExist: The following users do not exist: '
'user0, user1'),
('acl_group_add_hosts',
{'id': 'acl0',
'hosts': ['host0', 'host1']},
True,
None)])
execute_result = acl_addrm.execute()
self.god.check_playback()
self.assertEqual([], execute_result['users'])
self.assertEqual(['acl0'], execute_result['hosts'])
self.assertOutput(acl_addrm, execute_result,
out_words_ok=["Added to ACL 'acl0' hosts:",
"host0", "host1"],
err_words_ok=['DoesNotExist',
'acl_group_add_users',
'user0', 'user1'],
err_words_no = ['acl_group_add_hosts'])
def test_execute_add_or_remove_uh_to_topic_acl_good_users_bad_hosts(self):
acl_addrm = self._create_add_remove('acl0 with space',
users=['user0', 'user1'],
hosts=['host0', 'host1'])
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0 with space',
'users': ['user0', 'user1']},
True,
None),
('acl_group_add_hosts',
{'id': 'acl0 with space',
'hosts': ['host0', 'host1']},
False,
'DoesNotExist: The following hosts do not exist: '
'host0, host1')])
execute_result = acl_addrm.execute()
self.god.check_playback()
self.assertEqual(['acl0 with space'], execute_result['users'])
self.assertEqual([], execute_result['hosts'])
self.assertOutput(acl_addrm, execute_result,
out_words_ok=["Added to ACL 'acl0 with space' users:",
"user0", "user1"],
err_words_ok=['DoesNotExist',
'acl_group_add_hosts',
'host0', 'host1'],
err_words_no = ['acl_group_add_users'])
def test_exe_add_or_remove_uh_to_topic_acl_good_u_bad_hosts_partial(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'],
hosts=['host0', 'host1'])
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
True,
None),
('acl_group_add_hosts',
{'id': 'acl0',
'hosts': ['host0', 'host1']},
False,
'DoesNotExist: The following hosts do not exist: '
'host1'),
('acl_group_add_hosts',
{'id': 'acl0',
'hosts': ['host0']},
True,
None)])
execute_result = acl_addrm.execute()
self.god.check_playback()
self.assertEqual(['acl0'], execute_result['users'])
self.assertEqual(['acl0'], execute_result['hosts'])
self.assertOutput(acl_addrm, execute_result,
out_words_ok=["Added to ACL 'acl0' users:",
"user0", "user1", "host0"],
err_words_ok=['DoesNotExist',
'acl_group_add_hosts',
'host1'],
err_words_no = ['acl_group_add_users'])
def test_execute_add_or_remove_uh_to_topic_acl_bad_users_bad_hosts(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'],
hosts=['host0', 'host1'])
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
False,
'DoesNotExist: The following users do not exist: '
'user0, user1'),
('acl_group_add_hosts',
{'id': 'acl0',
'hosts': ['host0', 'host1']},
False,
'DoesNotExist: The following hosts do not exist: '
'host0, host1')])
execute_result = acl_addrm.execute()
self.god.check_playback()
self.assertEqual([], execute_result['users'])
self.assertEqual([], execute_result['hosts'])
self.assertOutput(acl_addrm, execute_result,
err_words_ok=['DoesNotExist',
'acl_group_add_hosts',
'host0', 'host1',
'acl_group_add_users',
'user0', 'user1'])
def test_execute_add_or_remove_uh_to_topic_acl_bad_u_bad_h_partial(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'],
hosts=['host0', 'host1'])
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
False,
'DoesNotExist: The following users do not exist: '
'user0'),
('acl_group_add_users',
{'id': 'acl0',
'users': ['user1']},
True,
None),
('acl_group_add_hosts',
{'id': 'acl0',
'hosts': ['host0', 'host1']},
False,
'DoesNotExist: The following hosts do not exist: '
'host1'),
('acl_group_add_hosts',
{'id': 'acl0',
'hosts': ['host0']},
True,
None)])
execute_result = acl_addrm.execute()
self.god.check_playback()
self.assertEqual(['acl0'], execute_result['users'])
self.assertEqual(['acl0'], execute_result['hosts'])
self.assertOutput(acl_addrm, execute_result,
out_words_ok=["Added to ACL 'acl0' user:",
"Added to ACL 'acl0' host:",
'user1', 'host0'],
err_words_ok=['DoesNotExist',
'acl_group_add_hosts',
'host1',
'acl_group_add_users',
'user0'])
def test_execute_add_or_remove_to_topic_bad_acl_uh(self):
acl_addrm = self._create_add_remove('acl0',
users=['user0', 'user1'],
hosts=['host0', 'host1'])
self.mock_rpcs([('acl_group_add_users',
{'id': 'acl0',
'users': ['user0', 'user1']},
False,
'DoesNotExist: acl_group matching '
'query does not exist.'),
('acl_group_add_hosts',
{'id': 'acl0',
'hosts': ['host0', 'host1']},
False,
'DoesNotExist: acl_group matching '
'query does not exist.')])
execute_result = acl_addrm.execute()
self.god.check_playback()
self.assertEqual([], execute_result['users'])
self.assertEqual([], execute_result['hosts'])
self.assertOutput(acl_addrm, execute_result,
err_words_ok=['DoesNotExist',
'acl_group_add_hosts',
'acl_group_add_users'])
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
eoinmurray/icarus | notebooks/fid_v_dephase_helpers.py | 1 | 3271 | import os
import sys
import numpy as np
import matplotlib.pyplot as plt
def calculate_g2(delay_peak, hold_int):
if np.array(hold_int).mean() > 0:
g2 = delay_peak/np.array(hold_int).mean()
g2 = np.around(g2, decimals=3)
return g2
else:
return 0
def normalize(arr):
x = arr[:,0]
y = arr[:,1]
hold_max = []
hold_int = []
pulse_width = 25
for j in xrange(int(x.max()/pulse_width)):
minIdx = np.abs(x - pulse_width*j).argmin()
maxIdx = np.abs(x - pulse_width*(j+1)).argmin()
peakX = x[minIdx: maxIdx]
peakY = y[minIdx:maxIdx]
if j != 6:
hold_max.append( np.max(peakY) )
hold_int.append( np.sum(peakY) )
else:
delay_peak = np.sum(peakY)
y = y/np.mean(hold_max)
return np.array(np.vstack((x,y)).T), calculate_g2(delay_peak, hold_int)
def get_deg_of_corr(arr11, arr12, arr21, arr22, arr31, arr32):
delay = 180 - 20
x_min = - 100
x_max = 100
arr11, g2_11 = normalize(arr11)
arr12, g2_12 = normalize(arr12)
arr21, g2_21 = normalize(arr21)
arr22, g2_22 = normalize(arr22)
arr31, g2_31 = normalize(arr31)
arr32, g2_32 = normalize(arr32)
grect = (g2_11 - g2_12) / (g2_11 + g2_12)
gdiag = (g2_21 - g2_22) / (g2_21 + g2_22)
gcirc = (g2_31 - g2_32) / (g2_31 + g2_32)
return [grect, gdiag, gcirc]
def get_corr_by_folder(full_name):
return get_deg_of_corr(
np.loadtxt(full_name + '/linear D1D3.txt', delimiter=','),
np.loadtxt(full_name + '/linear D2D3.txt', delimiter=','),
np.loadtxt(full_name + '/diag D1D3.txt', delimiter=','),
np.loadtxt(full_name + '/diag D2D3.txt', delimiter=','),
np.loadtxt(full_name + '/circ D1D3.txt', delimiter=','),
np.loadtxt(full_name + '/circ D2D3.txt', delimiter=','),
)
def get_data(rootdir, param_name):
hold_outfile = []
hold_crosstau = []
for root, subFolders, files in os.walk(rootdir):
outfileName = os.path.join(root, "params.txt")
if os.path.exists(outfileName):
params = np.genfromtxt(outfileName, dtype=str, delimiter=',')
crosstau = float(params[:,1][params[:,0]==param_name][0])
hold_crosstau.append(crosstau)
hold_outfile.append(root)
data = np.vstack((hold_crosstau, hold_outfile)).T
idx = np.argsort(data[:,0].astype(float))
data = data[idx]
degree_of_corrs = np.array([get_corr_by_folder(d[1]) for d in data])
hold_crosstau = data[:,0]
grect = degree_of_corrs[:,0]
gdiag = degree_of_corrs[:,1]
gcirc = degree_of_corrs[:,2]
return [hold_crosstau, [grect, gdiag, gcirc]]
def plot_deg_of_corr(dat):
hold_crosstau = dat[0]
grect = dat[1][0]
gdiag = dat[1][1]
gcirc = dat[1][2]
plt.figure(figsize = (16/1.5, 9/1.5))
plt.plot(hold_crosstau, grect)
plt.plot(hold_crosstau, gdiag)
plt.plot(hold_crosstau, - gcirc)
plt.xlabel('$\\tau_{HV} (ns)$') ; plt.ylabel('Degree of corrolation')
plt.ylim([0,1.1])
plt.legend(['Rect', 'Diag', '- Circ']) | mit |
cherrypy/cherrypy | cherrypy/tutorial/tut10_http_errors.py | 6 | 2706 | """
Tutorial: HTTP errors
HTTPError is used to return an error response to the client.
CherryPy has lots of options regarding how such errors are
logged, displayed, and formatted.
"""
import os
import os.path
import cherrypy
localDir = os.path.dirname(__file__)
curpath = os.path.normpath(os.path.join(os.getcwd(), localDir))
class HTTPErrorDemo(object):
# Set a custom response for 403 errors.
_cp_config = {'error_page.403':
os.path.join(curpath, 'custom_error.html')}
@cherrypy.expose
def index(self):
# display some links that will result in errors
tracebacks = cherrypy.request.show_tracebacks
if tracebacks:
trace = 'off'
else:
trace = 'on'
return """
<html><body>
<p>Toggle tracebacks <a href="toggleTracebacks">%s</a></p>
<p><a href="/doesNotExist">Click me; I'm a broken link!</a></p>
<p>
<a href="/error?code=403">
Use a custom error page from a file.
</a>
</p>
<p>These errors are explicitly raised by the application:</p>
<ul>
<li><a href="/error?code=400">400</a></li>
<li><a href="/error?code=401">401</a></li>
<li><a href="/error?code=402">402</a></li>
<li><a href="/error?code=500">500</a></li>
</ul>
<p><a href="/messageArg">You can also set the response body
when you raise an error.</a></p>
</body></html>
""" % trace
@cherrypy.expose
def toggleTracebacks(self):
# simple function to toggle tracebacks on and off
tracebacks = cherrypy.request.show_tracebacks
cherrypy.config.update({'request.show_tracebacks': not tracebacks})
# redirect back to the index
raise cherrypy.HTTPRedirect('/')
@cherrypy.expose
def error(self, code):
# raise an error based on the get query
raise cherrypy.HTTPError(status=code)
@cherrypy.expose
def messageArg(self):
message = ("If you construct an HTTPError with a 'message' "
'argument, it wil be placed on the error page '
'(underneath the status line by default).')
raise cherrypy.HTTPError(500, message=message)
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(HTTPErrorDemo(), config=tutconf)
| bsd-3-clause |
NunoEdgarGub1/nupic | tests/swarming/nupic/swarming/experiments/field_contrib_temporal/description.py | 8 | 15369 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Template file used by the OPF Experiment Generator to generate the actual
description.py file by replacing $XXXXXXXX tokens with desired values.
This description.py file was generated by:
'/Users/ronmarianetti/nupic/eng/lib/python2.6/site-packages/nupic/frameworks/opf/expGenerator/ExpGenerator.py'
"""
from nupic.frameworks.opf.expdescriptionapi import ExperimentDescriptionAPI
from nupic.frameworks.opf.expdescriptionhelpers import (
updateConfigFromSubConfig,
applyValueGettersToContainer,
DeferredDictLookup)
from nupic.frameworks.opf.clamodelcallbacks import *
from nupic.frameworks.opf.metrics import MetricSpec
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement)
from nupic.support import aggregationDivide
from nupic.frameworks.opf.opftaskdriver import (
IterationPhaseSpecLearnOnly,
IterationPhaseSpecInferOnly,
IterationPhaseSpecLearnAndInfer)
# Model Configuration Dictionary:
#
# Define the model parameters and adjust for any modifications if imported
# from a sub-experiment.
#
# These fields might be modified by a sub-experiment; this dict is passed
# between the sub-experiment and base experiment
#
#
# NOTE: Use of DEFERRED VALUE-GETTERs: dictionary fields and list elements
# within the config dictionary may be assigned futures derived from the
# ValueGetterBase class, such as DeferredDictLookup.
# This facility is particularly handy for enabling substitution of values in
# the config dictionary from other values in the config dictionary, which is
# needed by permutation.py-based experiments. These values will be resolved
# during the call to applyValueGettersToContainer(),
# which we call after the base experiment's config dictionary is updated from
# the sub-experiment. See ValueGetterBase and
# DeferredDictLookup for more details about value-getters.
#
# For each custom encoder parameter to be exposed to the sub-experiment/
# permutation overrides, define a variable in this section, using key names
# beginning with a single underscore character to avoid collisions with
# pre-defined keys (e.g., _dsEncoderFieldName2_N).
#
# Example:
# config = dict(
# _dsEncoderFieldName2_N = 70,
# _dsEncoderFieldName2_W = 5,
# dsEncoderSchema = [
# base=dict(
# fieldname='Name2', type='ScalarEncoder',
# name='Name2', minval=0, maxval=270, clipInput=True,
# n=DeferredDictLookup('_dsEncoderFieldName2_N'),
# w=DeferredDictLookup('_dsEncoderFieldName2_W')),
# ],
# )
# updateConfigFromSubConfig(config)
# applyValueGettersToContainer(config)
config = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [ (u'timestamp', 'first'),
(u'gym', 'first'),
(u'consumption', 'mean'),
(u'address', 'first')],
'hours': 0,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalNextStep',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': { 'address': { 'fieldname': u'address',
'n': 300,
'name': u'address',
'type': 'SDRCategoryEncoder',
'w': 21},
'consumption': { 'clipInput': True,
'fieldname': u'consumption',
'maxval': 200,
'minval': 0,
'n': 1500,
'name': u'consumption',
'type': 'ScalarEncoder',
'w': 21},
'gym': { 'fieldname': u'gym',
'n': 300,
'name': u'gym',
'type': 'SDRCategoryEncoder',
'w': 21},
'timestamp_dayOfWeek': { 'dayOfWeek': (7, 3),
'fieldname': u'timestamp',
'name': u'timestamp_dayOfWeek',
'type': 'DateEncoder'},
'timestamp_timeOfDay': { 'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (7, 8),
'type': 'DateEncoder'}},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
#
# (value generated from SENSOR_AUTO_RESET)
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
'globalInhibition': 1,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses. At initialization time, we will
# choose potentialPct * (2*potentialRadius+1)^2
'potentialPct': 0.5,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10. Cells whose activity
# level before inhibition falls below minDutyCycleBeforeInh
# will have their own internal synPermConnectedCell
# threshold set below this default value.
# (This concept applies to both SP and TP and so 'cells'
# is correct here as opposed to 'columns')
'synPermConnected': 0.1,
'synPermActiveInc': 0.1,
'synPermInactiveDec': 0.01,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 15,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 12,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 16,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 1,
},
'clParams': {
'regionName' : 'CLAClassifierRegion',
# Classifier diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
'clVerbosity' : 0,
# This controls how fast the classifier learns/forgets. Higher values
# make it adapt faster and forget older patterns faster.
'alpha': 0.001,
# This is set after the call to updateConfigFromSubConfig and is
# computed from the aggregationInfo and predictAheadTime.
'steps': '1',
},
'trainSPNetOnlyIfRequested': False,
},
}
# end of config dictionary
# Adjust base config dictionary for any modifications if imported from a
# sub-experiment
updateConfigFromSubConfig(config)
# Compute predictionSteps based on the predictAheadTime and the aggregation
# period, which may be permuted over.
if config['predictAheadTime'] is not None:
predictionSteps = int(round(aggregationDivide(
config['predictAheadTime'], config['aggregationInfo'])))
assert (predictionSteps >= 1)
config['modelParams']['clParams']['steps'] = str(predictionSteps)
# Adjust config by applying ValueGetterBase-derived
# futures. NOTE: this MUST be called after updateConfigFromSubConfig() in order
# to support value-getter-based substitutions from the sub-experiment (if any)
applyValueGettersToContainer(config)
control = {
# The environment that the current model is being run in
"environment": 'nupic',
# Input stream specification per py/nupicengine/cluster/database/StreamDef.json.
#
'dataset' : { u'info': u'test_NoProviders',
u'streams': [ { u'columns': [u'*'],
u'info': u'test data',
u'source': u'file://swarming/test_data.csv'}],
u'version': 1},
# Iteration count: maximum number of iterations. Each iteration corresponds
# to one record from the (possibly aggregated) dataset. The task is
# terminated when either number of iterations reaches iterationCount or
# all records in the (possibly aggregated) database have been processed,
# whichever occurs first.
#
# iterationCount of -1 = iterate over the entire dataset
#'iterationCount' : ITERATION_COUNT,
# Metrics: A list of MetricSpecs that instantiate the metrics that are
# computed for this experiment
'metrics':[
MetricSpec(field=u'consumption',
inferenceElement=InferenceElement.prediction,
metric='rmse'),
],
# Logged Metrics: A sequence of regular expressions that specify which of
# the metrics from the Inference Specifications section MUST be logged for
# every prediction. The regex's correspond to the automatically generated
# metric labels. This is similar to the way the optimization metric is
# specified in permutations.py.
'loggedMetrics': ['.*nupicScore.*'],
}
descriptionInterface = ExperimentDescriptionAPI(modelConfig=config,
control=control)
| gpl-3.0 |
SlimRemix/android_external_chromium_org | third_party/cython/src/Cython/Debugger/libcython.py | 101 | 44489 | """
GDB extension that adds Cython support.
"""
from __future__ import with_statement
import sys
import textwrap
import traceback
import functools
import itertools
import collections
import gdb
try:
from lxml import etree
have_lxml = True
except ImportError:
have_lxml = False
try:
# Python 2.5
from xml.etree import cElementTree as etree
except ImportError:
try:
# Python 2.5
from xml.etree import ElementTree as etree
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree
except ImportError:
# normal ElementTree install
import elementtree.ElementTree as etree
try:
import pygments.lexers
import pygments.formatters
except ImportError:
pygments = None
sys.stderr.write("Install pygments for colorized source code.\n")
if hasattr(gdb, 'string_to_argv'):
from gdb import string_to_argv
else:
from shlex import split as string_to_argv
from Cython.Debugger import libpython
# C or Python type
CObject = 'CObject'
PythonObject = 'PythonObject'
_data_types = dict(CObject=CObject, PythonObject=PythonObject)
_filesystemencoding = sys.getfilesystemencoding() or 'UTF-8'
# decorators
def dont_suppress_errors(function):
"*sigh*, readline"
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except Exception:
traceback.print_exc()
raise
return wrapper
def default_selected_gdb_frame(err=True):
def decorator(function):
@functools.wraps(function)
def wrapper(self, frame=None, *args, **kwargs):
try:
frame = frame or gdb.selected_frame()
except RuntimeError:
raise gdb.GdbError("No frame is currently selected.")
if err and frame.name() is None:
raise NoFunctionNameInFrameError()
return function(self, frame, *args, **kwargs)
return wrapper
return decorator
def require_cython_frame(function):
@functools.wraps(function)
@require_running_program
def wrapper(self, *args, **kwargs):
frame = kwargs.get('frame') or gdb.selected_frame()
if not self.is_cython_function(frame):
raise gdb.GdbError('Selected frame does not correspond with a '
'Cython function we know about.')
return function(self, *args, **kwargs)
return wrapper
def dispatch_on_frame(c_command, python_command=None):
def decorator(function):
@functools.wraps(function)
def wrapper(self, *args, **kwargs):
is_cy = self.is_cython_function()
is_py = self.is_python_function()
if is_cy or (is_py and not python_command):
function(self, *args, **kwargs)
elif is_py:
gdb.execute(python_command)
elif self.is_relevant_function():
gdb.execute(c_command)
else:
raise gdb.GdbError("Not a function cygdb knows about. "
"Use the normal GDB commands instead.")
return wrapper
return decorator
def require_running_program(function):
@functools.wraps(function)
def wrapper(*args, **kwargs):
try:
gdb.selected_frame()
except RuntimeError:
raise gdb.GdbError("No frame is currently selected.")
return function(*args, **kwargs)
return wrapper
def gdb_function_value_to_unicode(function):
@functools.wraps(function)
def wrapper(self, string, *args, **kwargs):
if isinstance(string, gdb.Value):
string = string.string()
return function(self, string, *args, **kwargs)
return wrapper
# Classes that represent the debug information
# Don't rename the parameters of these classes, they come directly from the XML
class CythonModule(object):
def __init__(self, module_name, filename, c_filename):
self.name = module_name
self.filename = filename
self.c_filename = c_filename
self.globals = {}
# {cython_lineno: min(c_linenos)}
self.lineno_cy2c = {}
# {c_lineno: cython_lineno}
self.lineno_c2cy = {}
self.functions = {}
class CythonVariable(object):
def __init__(self, name, cname, qualified_name, type, lineno):
self.name = name
self.cname = cname
self.qualified_name = qualified_name
self.type = type
self.lineno = int(lineno)
class CythonFunction(CythonVariable):
def __init__(self,
module,
name,
cname,
pf_cname,
qualified_name,
lineno,
type=CObject,
is_initmodule_function="False"):
super(CythonFunction, self).__init__(name,
cname,
qualified_name,
type,
lineno)
self.module = module
self.pf_cname = pf_cname
self.is_initmodule_function = is_initmodule_function == "True"
self.locals = {}
self.arguments = []
self.step_into_functions = set()
# General purpose classes
class CythonBase(object):
@default_selected_gdb_frame(err=False)
def is_cython_function(self, frame):
return frame.name() in self.cy.functions_by_cname
@default_selected_gdb_frame(err=False)
def is_python_function(self, frame):
"""
Tells if a frame is associated with a Python function.
If we can't read the Python frame information, don't regard it as such.
"""
if frame.name() == 'PyEval_EvalFrameEx':
pyframe = libpython.Frame(frame).get_pyop()
return pyframe and not pyframe.is_optimized_out()
return False
@default_selected_gdb_frame()
def get_c_function_name(self, frame):
return frame.name()
@default_selected_gdb_frame()
def get_c_lineno(self, frame):
return frame.find_sal().line
@default_selected_gdb_frame()
def get_cython_function(self, frame):
result = self.cy.functions_by_cname.get(frame.name())
if result is None:
raise NoCythonFunctionInFrameError()
return result
@default_selected_gdb_frame()
def get_cython_lineno(self, frame):
"""
Get the current Cython line number. Returns 0 if there is no
correspondence between the C and Cython code.
"""
cyfunc = self.get_cython_function(frame)
return cyfunc.module.lineno_c2cy.get(self.get_c_lineno(frame), 0)
@default_selected_gdb_frame()
def get_source_desc(self, frame):
filename = lineno = lexer = None
if self.is_cython_function(frame):
filename = self.get_cython_function(frame).module.filename
lineno = self.get_cython_lineno(frame)
if pygments:
lexer = pygments.lexers.CythonLexer(stripall=False)
elif self.is_python_function(frame):
pyframeobject = libpython.Frame(frame).get_pyop()
if not pyframeobject:
raise gdb.GdbError(
'Unable to read information on python frame')
filename = pyframeobject.filename()
lineno = pyframeobject.current_line_num()
if pygments:
lexer = pygments.lexers.PythonLexer(stripall=False)
else:
symbol_and_line_obj = frame.find_sal()
if not symbol_and_line_obj or not symbol_and_line_obj.symtab:
filename = None
lineno = 0
else:
filename = symbol_and_line_obj.symtab.fullname()
lineno = symbol_and_line_obj.line
if pygments:
lexer = pygments.lexers.CLexer(stripall=False)
return SourceFileDescriptor(filename, lexer), lineno
@default_selected_gdb_frame()
def get_source_line(self, frame):
source_desc, lineno = self.get_source_desc()
return source_desc.get_source(lineno)
@default_selected_gdb_frame()
def is_relevant_function(self, frame):
"""
returns whether we care about a frame on the user-level when debugging
Cython code
"""
name = frame.name()
older_frame = frame.older()
if self.is_cython_function(frame) or self.is_python_function(frame):
return True
elif older_frame and self.is_cython_function(older_frame):
# check for direct C function call from a Cython function
cython_func = self.get_cython_function(older_frame)
return name in cython_func.step_into_functions
return False
@default_selected_gdb_frame(err=False)
def print_stackframe(self, frame, index, is_c=False):
"""
Print a C, Cython or Python stack frame and the line of source code
if available.
"""
# do this to prevent the require_cython_frame decorator from
# raising GdbError when calling self.cy.cy_cvalue.invoke()
selected_frame = gdb.selected_frame()
frame.select()
try:
source_desc, lineno = self.get_source_desc(frame)
except NoFunctionNameInFrameError:
print '#%-2d Unknown Frame (compile with -g)' % index
return
if not is_c and self.is_python_function(frame):
pyframe = libpython.Frame(frame).get_pyop()
if pyframe is None or pyframe.is_optimized_out():
# print this python function as a C function
return self.print_stackframe(frame, index, is_c=True)
func_name = pyframe.co_name
func_cname = 'PyEval_EvalFrameEx'
func_args = []
elif self.is_cython_function(frame):
cyfunc = self.get_cython_function(frame)
f = lambda arg: self.cy.cy_cvalue.invoke(arg, frame=frame)
func_name = cyfunc.name
func_cname = cyfunc.cname
func_args = [] # [(arg, f(arg)) for arg in cyfunc.arguments]
else:
source_desc, lineno = self.get_source_desc(frame)
func_name = frame.name()
func_cname = func_name
func_args = []
try:
gdb_value = gdb.parse_and_eval(func_cname)
except RuntimeError:
func_address = 0
else:
# Seriously? Why is the address not an int?
func_address = int(str(gdb_value.address).split()[0], 0)
a = ', '.join('%s=%s' % (name, val) for name, val in func_args)
print '#%-2d 0x%016x in %s(%s)' % (index, func_address, func_name, a),
if source_desc.filename is not None:
print 'at %s:%s' % (source_desc.filename, lineno),
print
try:
print ' ' + source_desc.get_source(lineno)
except gdb.GdbError:
pass
selected_frame.select()
def get_remote_cython_globals_dict(self):
m = gdb.parse_and_eval('__pyx_m')
try:
PyModuleObject = gdb.lookup_type('PyModuleObject')
except RuntimeError:
raise gdb.GdbError(textwrap.dedent("""\
Unable to lookup type PyModuleObject, did you compile python
with debugging support (-g)?"""))
m = m.cast(PyModuleObject.pointer())
return m['md_dict']
def get_cython_globals_dict(self):
"""
Get the Cython globals dict where the remote names are turned into
local strings.
"""
remote_dict = self.get_remote_cython_globals_dict()
pyobject_dict = libpython.PyObjectPtr.from_pyobject_ptr(remote_dict)
result = {}
seen = set()
for k, v in pyobject_dict.iteritems():
result[k.proxyval(seen)] = v
return result
def print_gdb_value(self, name, value, max_name_length=None, prefix=''):
if libpython.pretty_printer_lookup(value):
typename = ''
else:
typename = '(%s) ' % (value.type,)
if max_name_length is None:
print '%s%s = %s%s' % (prefix, name, typename, value)
else:
print '%s%-*s = %s%s' % (prefix, max_name_length, name, typename,
value)
def is_initialized(self, cython_func, local_name):
cyvar = cython_func.locals[local_name]
cur_lineno = self.get_cython_lineno()
if '->' in cyvar.cname:
# Closed over free variable
if cur_lineno > cython_func.lineno:
if cyvar.type == PythonObject:
return long(gdb.parse_and_eval(cyvar.cname))
return True
return False
return cur_lineno > cyvar.lineno
class SourceFileDescriptor(object):
def __init__(self, filename, lexer, formatter=None):
self.filename = filename
self.lexer = lexer
self.formatter = formatter
def valid(self):
return self.filename is not None
def lex(self, code):
if pygments and self.lexer and parameters.colorize_code:
bg = parameters.terminal_background.value
if self.formatter is None:
formatter = pygments.formatters.TerminalFormatter(bg=bg)
else:
formatter = self.formatter
return pygments.highlight(code, self.lexer, formatter)
return code
def _get_source(self, start, stop, lex_source, mark_line, lex_entire):
with open(self.filename) as f:
# to provide "correct" colouring, the entire code needs to be
# lexed. However, this makes a lot of things terribly slow, so
# we decide not to. Besides, it's unlikely to matter.
if lex_source and lex_entire:
f = self.lex(f.read()).splitlines()
slice = itertools.islice(f, start - 1, stop - 1)
for idx, line in enumerate(slice):
if start + idx == mark_line:
prefix = '>'
else:
prefix = ' '
if lex_source and not lex_entire:
line = self.lex(line)
yield '%s %4d %s' % (prefix, start + idx, line.rstrip())
def get_source(self, start, stop=None, lex_source=True, mark_line=0,
lex_entire=False):
exc = gdb.GdbError('Unable to retrieve source code')
if not self.filename:
raise exc
start = max(start, 1)
if stop is None:
stop = start + 1
try:
return '\n'.join(
self._get_source(start, stop, lex_source, mark_line, lex_entire))
except IOError:
raise exc
# Errors
class CyGDBError(gdb.GdbError):
"""
Base class for Cython-command related erorrs
"""
def __init__(self, *args):
args = args or (self.msg,)
super(CyGDBError, self).__init__(*args)
class NoCythonFunctionInFrameError(CyGDBError):
"""
raised when the user requests the current cython function, which is
unavailable
"""
msg = "Current function is a function cygdb doesn't know about"
class NoFunctionNameInFrameError(NoCythonFunctionInFrameError):
"""
raised when the name of the C function could not be determined
in the current C stack frame
"""
msg = ('C function name could not be determined in the current C stack '
'frame')
# Parameters
class CythonParameter(gdb.Parameter):
"""
Base class for cython parameters
"""
def __init__(self, name, command_class, parameter_class, default=None):
self.show_doc = self.set_doc = self.__class__.__doc__
super(CythonParameter, self).__init__(name, command_class,
parameter_class)
if default is not None:
self.value = default
def __nonzero__(self):
return bool(self.value)
__bool__ = __nonzero__ # python 3
class CompleteUnqualifiedFunctionNames(CythonParameter):
"""
Have 'cy break' complete unqualified function or method names.
"""
class ColorizeSourceCode(CythonParameter):
"""
Tell cygdb whether to colorize source code.
"""
class TerminalBackground(CythonParameter):
"""
Tell cygdb about the user's terminal background (light or dark).
"""
class CythonParameters(object):
"""
Simple container class that might get more functionality in the distant
future (mostly to remind us that we're dealing with parameters).
"""
def __init__(self):
self.complete_unqualified = CompleteUnqualifiedFunctionNames(
'cy_complete_unqualified',
gdb.COMMAND_BREAKPOINTS,
gdb.PARAM_BOOLEAN,
True)
self.colorize_code = ColorizeSourceCode(
'cy_colorize_code',
gdb.COMMAND_FILES,
gdb.PARAM_BOOLEAN,
True)
self.terminal_background = TerminalBackground(
'cy_terminal_background_color',
gdb.COMMAND_FILES,
gdb.PARAM_STRING,
"dark")
parameters = CythonParameters()
# Commands
class CythonCommand(gdb.Command, CythonBase):
"""
Base class for Cython commands
"""
command_class = gdb.COMMAND_NONE
@classmethod
def _register(cls, clsname, args, kwargs):
if not hasattr(cls, 'completer_class'):
return cls(clsname, cls.command_class, *args, **kwargs)
else:
return cls(clsname, cls.command_class, cls.completer_class,
*args, **kwargs)
@classmethod
def register(cls, *args, **kwargs):
alias = getattr(cls, 'alias', None)
if alias:
cls._register(cls.alias, args, kwargs)
return cls._register(cls.name, args, kwargs)
class CyCy(CythonCommand):
"""
Invoke a Cython command. Available commands are:
cy import
cy break
cy step
cy next
cy run
cy cont
cy finish
cy up
cy down
cy select
cy bt / cy backtrace
cy list
cy print
cy set
cy locals
cy globals
cy exec
"""
name = 'cy'
command_class = gdb.COMMAND_NONE
completer_class = gdb.COMPLETE_COMMAND
def __init__(self, name, command_class, completer_class):
# keep the signature 2.5 compatible (i.e. do not use f(*a, k=v)
super(CythonCommand, self).__init__(name, command_class,
completer_class, prefix=True)
commands = dict(
# GDB commands
import_ = CyImport.register(),
break_ = CyBreak.register(),
step = CyStep.register(),
next = CyNext.register(),
run = CyRun.register(),
cont = CyCont.register(),
finish = CyFinish.register(),
up = CyUp.register(),
down = CyDown.register(),
select = CySelect.register(),
bt = CyBacktrace.register(),
list = CyList.register(),
print_ = CyPrint.register(),
locals = CyLocals.register(),
globals = CyGlobals.register(),
exec_ = libpython.FixGdbCommand('cy exec', '-cy-exec'),
_exec = CyExec.register(),
set = CySet.register(),
# GDB functions
cy_cname = CyCName('cy_cname'),
cy_cvalue = CyCValue('cy_cvalue'),
cy_lineno = CyLine('cy_lineno'),
cy_eval = CyEval('cy_eval'),
)
for command_name, command in commands.iteritems():
command.cy = self
setattr(self, command_name, command)
self.cy = self
# Cython module namespace
self.cython_namespace = {}
# maps (unique) qualified function names (e.g.
# cythonmodule.ClassName.method_name) to the CythonFunction object
self.functions_by_qualified_name = {}
# unique cnames of Cython functions
self.functions_by_cname = {}
# map function names like method_name to a list of all such
# CythonFunction objects
self.functions_by_name = collections.defaultdict(list)
class CyImport(CythonCommand):
"""
Import debug information outputted by the Cython compiler
Example: cy import FILE...
"""
name = 'cy import'
command_class = gdb.COMMAND_STATUS
completer_class = gdb.COMPLETE_FILENAME
def invoke(self, args, from_tty):
args = args.encode(_filesystemencoding)
for arg in string_to_argv(args):
try:
f = open(arg)
except OSError, e:
raise gdb.GdbError('Unable to open file %r: %s' %
(args, e.args[1]))
t = etree.parse(f)
for module in t.getroot():
cython_module = CythonModule(**module.attrib)
self.cy.cython_namespace[cython_module.name] = cython_module
for variable in module.find('Globals'):
d = variable.attrib
cython_module.globals[d['name']] = CythonVariable(**d)
for function in module.find('Functions'):
cython_function = CythonFunction(module=cython_module,
**function.attrib)
# update the global function mappings
name = cython_function.name
qname = cython_function.qualified_name
self.cy.functions_by_name[name].append(cython_function)
self.cy.functions_by_qualified_name[
cython_function.qualified_name] = cython_function
self.cy.functions_by_cname[
cython_function.cname] = cython_function
d = cython_module.functions[qname] = cython_function
for local in function.find('Locals'):
d = local.attrib
cython_function.locals[d['name']] = CythonVariable(**d)
for step_into_func in function.find('StepIntoFunctions'):
d = step_into_func.attrib
cython_function.step_into_functions.add(d['name'])
cython_function.arguments.extend(
funcarg.tag for funcarg in function.find('Arguments'))
for marker in module.find('LineNumberMapping'):
cython_lineno = int(marker.attrib['cython_lineno'])
c_linenos = map(int, marker.attrib['c_linenos'].split())
cython_module.lineno_cy2c[cython_lineno] = min(c_linenos)
for c_lineno in c_linenos:
cython_module.lineno_c2cy[c_lineno] = cython_lineno
class CyBreak(CythonCommand):
"""
Set a breakpoint for Cython code using Cython qualified name notation, e.g.:
cy break cython_modulename.ClassName.method_name...
or normal notation:
cy break function_or_method_name...
or for a line number:
cy break cython_module:lineno...
Set a Python breakpoint:
Break on any function or method named 'func' in module 'modname'
cy break -p modname.func...
Break on any function or method named 'func'
cy break -p func...
"""
name = 'cy break'
command_class = gdb.COMMAND_BREAKPOINTS
def _break_pyx(self, name):
modulename, _, lineno = name.partition(':')
lineno = int(lineno)
if modulename:
cython_module = self.cy.cython_namespace[modulename]
else:
cython_module = self.get_cython_function().module
if lineno in cython_module.lineno_cy2c:
c_lineno = cython_module.lineno_cy2c[lineno]
breakpoint = '%s:%s' % (cython_module.c_filename, c_lineno)
gdb.execute('break ' + breakpoint)
else:
raise gdb.GdbError("Not a valid line number. "
"Does it contain actual code?")
def _break_funcname(self, funcname):
func = self.cy.functions_by_qualified_name.get(funcname)
if func and func.is_initmodule_function:
func = None
break_funcs = [func]
if not func:
funcs = self.cy.functions_by_name.get(funcname) or []
funcs = [f for f in funcs if not f.is_initmodule_function]
if not funcs:
gdb.execute('break ' + funcname)
return
if len(funcs) > 1:
# multiple functions, let the user pick one
print 'There are multiple such functions:'
for idx, func in enumerate(funcs):
print '%3d) %s' % (idx, func.qualified_name)
while True:
try:
result = raw_input(
"Select a function, press 'a' for all "
"functions or press 'q' or '^D' to quit: ")
except EOFError:
return
else:
if result.lower() == 'q':
return
elif result.lower() == 'a':
break_funcs = funcs
break
elif (result.isdigit() and
0 <= int(result) < len(funcs)):
break_funcs = [funcs[int(result)]]
break
else:
print 'Not understood...'
else:
break_funcs = [funcs[0]]
for func in break_funcs:
gdb.execute('break %s' % func.cname)
if func.pf_cname:
gdb.execute('break %s' % func.pf_cname)
def invoke(self, function_names, from_tty):
argv = string_to_argv(function_names.encode('UTF-8'))
if function_names.startswith('-p'):
argv = argv[1:]
python_breakpoints = True
else:
python_breakpoints = False
for funcname in argv:
if python_breakpoints:
gdb.execute('py-break %s' % funcname)
elif ':' in funcname:
self._break_pyx(funcname)
else:
self._break_funcname(funcname)
@dont_suppress_errors
def complete(self, text, word):
# Filter init-module functions (breakpoints can be set using
# modulename:linenumber).
names = [n for n, L in self.cy.functions_by_name.iteritems()
if any(not f.is_initmodule_function for f in L)]
qnames = [n for n, f in self.cy.functions_by_qualified_name.iteritems()
if not f.is_initmodule_function]
if parameters.complete_unqualified:
all_names = itertools.chain(qnames, names)
else:
all_names = qnames
words = text.strip().split()
if not words or '.' not in words[-1]:
# complete unqualified
seen = set(text[:-len(word)].split())
return [n for n in all_names
if n.startswith(word) and n not in seen]
# complete qualified name
lastword = words[-1]
compl = [n for n in qnames if n.startswith(lastword)]
if len(lastword) > len(word):
# readline sees something (e.g. a '.') as a word boundary, so don't
# "recomplete" this prefix
strip_prefix_length = len(lastword) - len(word)
compl = [n[strip_prefix_length:] for n in compl]
return compl
class CythonInfo(CythonBase, libpython.PythonInfo):
"""
Implementation of the interface dictated by libpython.LanguageInfo.
"""
def lineno(self, frame):
# Take care of the Python and Cython levels. We need to care for both
# as we can't simply dispath to 'py-step', since that would work for
# stepping through Python code, but it would not step back into Cython-
# related code. The C level should be dispatched to the 'step' command.
if self.is_cython_function(frame):
return self.get_cython_lineno(frame)
return super(CythonInfo, self).lineno(frame)
def get_source_line(self, frame):
try:
line = super(CythonInfo, self).get_source_line(frame)
except gdb.GdbError:
return None
else:
return line.strip() or None
def exc_info(self, frame):
if self.is_python_function:
return super(CythonInfo, self).exc_info(frame)
def runtime_break_functions(self):
if self.is_cython_function():
return self.get_cython_function().step_into_functions
return ()
def static_break_functions(self):
result = ['PyEval_EvalFrameEx']
result.extend(self.cy.functions_by_cname)
return result
class CythonExecutionControlCommand(CythonCommand,
libpython.ExecutionControlCommandBase):
@classmethod
def register(cls):
return cls(cls.name, cython_info)
class CyStep(CythonExecutionControlCommand, libpython.PythonStepperMixin):
"Step through Cython, Python or C code."
name = 'cy -step'
stepinto = True
def invoke(self, args, from_tty):
if self.is_python_function():
self.python_step(self.stepinto)
elif not self.is_cython_function():
if self.stepinto:
command = 'step'
else:
command = 'next'
self.finish_executing(gdb.execute(command, to_string=True))
else:
self.step(stepinto=self.stepinto)
class CyNext(CyStep):
"Step-over Cython, Python or C code."
name = 'cy -next'
stepinto = False
class CyRun(CythonExecutionControlCommand):
"""
Run a Cython program. This is like the 'run' command, except that it
displays Cython or Python source lines as well
"""
name = 'cy run'
invoke = CythonExecutionControlCommand.run
class CyCont(CythonExecutionControlCommand):
"""
Continue a Cython program. This is like the 'run' command, except that it
displays Cython or Python source lines as well.
"""
name = 'cy cont'
invoke = CythonExecutionControlCommand.cont
class CyFinish(CythonExecutionControlCommand):
"""
Execute until the function returns.
"""
name = 'cy finish'
invoke = CythonExecutionControlCommand.finish
class CyUp(CythonCommand):
"""
Go up a Cython, Python or relevant C frame.
"""
name = 'cy up'
_command = 'up'
def invoke(self, *args):
try:
gdb.execute(self._command, to_string=True)
while not self.is_relevant_function(gdb.selected_frame()):
gdb.execute(self._command, to_string=True)
except RuntimeError, e:
raise gdb.GdbError(*e.args)
frame = gdb.selected_frame()
index = 0
while frame:
frame = frame.older()
index += 1
self.print_stackframe(index=index - 1)
class CyDown(CyUp):
"""
Go down a Cython, Python or relevant C frame.
"""
name = 'cy down'
_command = 'down'
class CySelect(CythonCommand):
"""
Select a frame. Use frame numbers as listed in `cy backtrace`.
This command is useful because `cy backtrace` prints a reversed backtrace.
"""
name = 'cy select'
def invoke(self, stackno, from_tty):
try:
stackno = int(stackno)
except ValueError:
raise gdb.GdbError("Not a valid number: %r" % (stackno,))
frame = gdb.selected_frame()
while frame.newer():
frame = frame.newer()
stackdepth = libpython.stackdepth(frame)
try:
gdb.execute('select %d' % (stackdepth - stackno - 1,))
except RuntimeError, e:
raise gdb.GdbError(*e.args)
class CyBacktrace(CythonCommand):
'Print the Cython stack'
name = 'cy bt'
alias = 'cy backtrace'
command_class = gdb.COMMAND_STACK
completer_class = gdb.COMPLETE_NONE
@require_running_program
def invoke(self, args, from_tty):
# get the first frame
frame = gdb.selected_frame()
while frame.older():
frame = frame.older()
print_all = args == '-a'
index = 0
while frame:
try:
is_relevant = self.is_relevant_function(frame)
except CyGDBError:
is_relevant = False
if print_all or is_relevant:
self.print_stackframe(frame, index)
index += 1
frame = frame.newer()
class CyList(CythonCommand):
"""
List Cython source code. To disable to customize colouring see the cy_*
parameters.
"""
name = 'cy list'
command_class = gdb.COMMAND_FILES
completer_class = gdb.COMPLETE_NONE
# @dispatch_on_frame(c_command='list')
def invoke(self, _, from_tty):
sd, lineno = self.get_source_desc()
source = sd.get_source(lineno - 5, lineno + 5, mark_line=lineno,
lex_entire=True)
print source
class CyPrint(CythonCommand):
"""
Print a Cython variable using 'cy-print x' or 'cy-print module.function.x'
"""
name = 'cy print'
command_class = gdb.COMMAND_DATA
def invoke(self, name, from_tty, max_name_length=None):
if self.is_python_function():
return gdb.execute('py-print ' + name)
elif self.is_cython_function():
value = self.cy.cy_cvalue.invoke(name.lstrip('*'))
for c in name:
if c == '*':
value = value.dereference()
else:
break
self.print_gdb_value(name, value, max_name_length)
else:
gdb.execute('print ' + name)
def complete(self):
if self.is_cython_function():
f = self.get_cython_function()
return list(itertools.chain(f.locals, f.globals))
else:
return []
sortkey = lambda (name, value): name.lower()
class CyLocals(CythonCommand):
"""
List the locals from the current Cython frame.
"""
name = 'cy locals'
command_class = gdb.COMMAND_STACK
completer_class = gdb.COMPLETE_NONE
@dispatch_on_frame(c_command='info locals', python_command='py-locals')
def invoke(self, args, from_tty):
cython_function = self.get_cython_function()
if cython_function.is_initmodule_function:
self.cy.globals.invoke(args, from_tty)
return
local_cython_vars = cython_function.locals
max_name_length = len(max(local_cython_vars, key=len))
for name, cyvar in sorted(local_cython_vars.iteritems(), key=sortkey):
if self.is_initialized(self.get_cython_function(), cyvar.name):
value = gdb.parse_and_eval(cyvar.cname)
if not value.is_optimized_out:
self.print_gdb_value(cyvar.name, value,
max_name_length, '')
class CyGlobals(CyLocals):
"""
List the globals from the current Cython module.
"""
name = 'cy globals'
command_class = gdb.COMMAND_STACK
completer_class = gdb.COMPLETE_NONE
@dispatch_on_frame(c_command='info variables', python_command='py-globals')
def invoke(self, args, from_tty):
global_python_dict = self.get_cython_globals_dict()
module_globals = self.get_cython_function().module.globals
max_globals_len = 0
max_globals_dict_len = 0
if module_globals:
max_globals_len = len(max(module_globals, key=len))
if global_python_dict:
max_globals_dict_len = len(max(global_python_dict))
max_name_length = max(max_globals_len, max_globals_dict_len)
seen = set()
print 'Python globals:'
for k, v in sorted(global_python_dict.iteritems(), key=sortkey):
v = v.get_truncated_repr(libpython.MAX_OUTPUT_LEN)
seen.add(k)
print ' %-*s = %s' % (max_name_length, k, v)
print 'C globals:'
for name, cyvar in sorted(module_globals.iteritems(), key=sortkey):
if name not in seen:
try:
value = gdb.parse_and_eval(cyvar.cname)
except RuntimeError:
pass
else:
if not value.is_optimized_out:
self.print_gdb_value(cyvar.name, value,
max_name_length, ' ')
class EvaluateOrExecuteCodeMixin(object):
"""
Evaluate or execute Python code in a Cython or Python frame. The 'evalcode'
method evaluations Python code, prints a traceback if an exception went
uncaught, and returns any return value as a gdb.Value (NULL on exception).
"""
def _fill_locals_dict(self, executor, local_dict_pointer):
"Fill a remotely allocated dict with values from the Cython C stack"
cython_func = self.get_cython_function()
for name, cyvar in cython_func.locals.iteritems():
if (cyvar.type == PythonObject and
self.is_initialized(cython_func, name)):
try:
val = gdb.parse_and_eval(cyvar.cname)
except RuntimeError:
continue
else:
if val.is_optimized_out:
continue
pystringp = executor.alloc_pystring(name)
code = '''
(PyObject *) PyDict_SetItem(
(PyObject *) %d,
(PyObject *) %d,
(PyObject *) %s)
''' % (local_dict_pointer, pystringp, cyvar.cname)
try:
if gdb.parse_and_eval(code) < 0:
gdb.parse_and_eval('PyErr_Print()')
raise gdb.GdbError("Unable to execute Python code.")
finally:
# PyDict_SetItem doesn't steal our reference
executor.xdecref(pystringp)
def _find_first_cython_or_python_frame(self):
frame = gdb.selected_frame()
while frame:
if (self.is_cython_function(frame) or
self.is_python_function(frame)):
frame.select()
return frame
frame = frame.older()
raise gdb.GdbError("There is no Cython or Python frame on the stack.")
def _evalcode_cython(self, executor, code, input_type):
with libpython.FetchAndRestoreError():
# get the dict of Cython globals and construct a dict in the
# inferior with Cython locals
global_dict = gdb.parse_and_eval(
'(PyObject *) PyModule_GetDict(__pyx_m)')
local_dict = gdb.parse_and_eval('(PyObject *) PyDict_New()')
try:
self._fill_locals_dict(executor,
libpython.pointervalue(local_dict))
result = executor.evalcode(code, input_type, global_dict,
local_dict)
finally:
executor.xdecref(libpython.pointervalue(local_dict))
return result
def evalcode(self, code, input_type):
"""
Evaluate `code` in a Python or Cython stack frame using the given
`input_type`.
"""
frame = self._find_first_cython_or_python_frame()
executor = libpython.PythonCodeExecutor()
if self.is_python_function(frame):
return libpython._evalcode_python(executor, code, input_type)
return self._evalcode_cython(executor, code, input_type)
class CyExec(CythonCommand, libpython.PyExec, EvaluateOrExecuteCodeMixin):
"""
Execute Python code in the nearest Python or Cython frame.
"""
name = '-cy-exec'
command_class = gdb.COMMAND_STACK
completer_class = gdb.COMPLETE_NONE
def invoke(self, expr, from_tty):
expr, input_type = self.readcode(expr)
executor = libpython.PythonCodeExecutor()
executor.xdecref(self.evalcode(expr, executor.Py_single_input))
class CySet(CythonCommand):
"""
Set a Cython variable to a certain value
cy set my_cython_c_variable = 10
cy set my_cython_py_variable = $cy_eval("{'doner': 'kebab'}")
This is equivalent to
set $cy_value("my_cython_variable") = 10
"""
name = 'cy set'
command_class = gdb.COMMAND_DATA
completer_class = gdb.COMPLETE_NONE
@require_cython_frame
def invoke(self, expr, from_tty):
name_and_expr = expr.split('=', 1)
if len(name_and_expr) != 2:
raise gdb.GdbError("Invalid expression. Use 'cy set var = expr'.")
varname, expr = name_and_expr
cname = self.cy.cy_cname.invoke(varname.strip())
gdb.execute("set %s = %s" % (cname, expr))
# Functions
class CyCName(gdb.Function, CythonBase):
"""
Get the C name of a Cython variable in the current context.
Examples:
print $cy_cname("function")
print $cy_cname("Class.method")
print $cy_cname("module.function")
"""
@require_cython_frame
@gdb_function_value_to_unicode
def invoke(self, cyname, frame=None):
frame = frame or gdb.selected_frame()
cname = None
if self.is_cython_function(frame):
cython_function = self.get_cython_function(frame)
if cyname in cython_function.locals:
cname = cython_function.locals[cyname].cname
elif cyname in cython_function.module.globals:
cname = cython_function.module.globals[cyname].cname
else:
qname = '%s.%s' % (cython_function.module.name, cyname)
if qname in cython_function.module.functions:
cname = cython_function.module.functions[qname].cname
if not cname:
cname = self.cy.functions_by_qualified_name.get(cyname)
if not cname:
raise gdb.GdbError('No such Cython variable: %s' % cyname)
return cname
class CyCValue(CyCName):
"""
Get the value of a Cython variable.
"""
@require_cython_frame
@gdb_function_value_to_unicode
def invoke(self, cyname, frame=None):
globals_dict = self.get_cython_globals_dict()
cython_function = self.get_cython_function(frame)
if self.is_initialized(cython_function, cyname):
cname = super(CyCValue, self).invoke(cyname, frame=frame)
return gdb.parse_and_eval(cname)
elif cyname in globals_dict:
return globals_dict[cyname]._gdbval
else:
raise gdb.GdbError("Variable %s is not initialized." % cyname)
class CyLine(gdb.Function, CythonBase):
"""
Get the current Cython line.
"""
@require_cython_frame
def invoke(self):
return self.get_cython_lineno()
class CyEval(gdb.Function, CythonBase, EvaluateOrExecuteCodeMixin):
"""
Evaluate Python code in the nearest Python or Cython frame and return
"""
@gdb_function_value_to_unicode
def invoke(self, python_expression):
input_type = libpython.PythonCodeExecutor.Py_eval_input
return self.evalcode(python_expression, input_type)
cython_info = CythonInfo()
cy = CyCy.register()
cython_info.cy = cy
def register_defines():
libpython.source_gdb_script(textwrap.dedent("""\
define cy step
cy -step
end
define cy next
cy -next
end
document cy step
%s
end
document cy next
%s
end
""") % (CyStep.__doc__, CyNext.__doc__))
register_defines()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.