repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
hynnet/openwrt-mt7620 | staging_dir/target-mipsel_r2_uClibc-0.9.33.2/root-ralink/usr/lib/python2.7/encodings/unicode_escape.py | 852 | 1184 | """ Python 'unicode-escape' Codec
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.unicode_escape_encode
decode = codecs.unicode_escape_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.unicode_escape_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.unicode_escape_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='unicode-escape',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| gpl-2.0 |
quanghieu/linux-DFI | tools/perf/scripts/python/sctop.py | 1996 | 2102 | # system call top
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
raw_syscalls__sys_enter(**locals())
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
orymeyer/Flask-Python-GAE-Login-Registration | lib/Werkzeug-0.10.4.dist-info/werkzeug/_reloader.py | 116 | 7938 | import os
import sys
import time
import subprocess
import threading
from itertools import chain
from werkzeug._internal import _log
from werkzeug._compat import PY2, iteritems, text_type
def _iter_module_files():
"""This iterates over all relevant Python files. It goes through all
loaded files from modules, all files in folders of already loaded modules
as well as all files reachable through a package.
"""
# The list call is necessary on Python 3 in case the module
# dictionary modifies during iteration.
for module in list(sys.modules.values()):
if module is None:
continue
filename = getattr(module, '__file__', None)
if filename:
old = None
while not os.path.isfile(filename):
old = filename
filename = os.path.dirname(filename)
if filename == old:
break
else:
if filename[-4:] in ('.pyc', '.pyo'):
filename = filename[:-1]
yield filename
def _find_observable_paths(extra_files=None):
"""Finds all paths that should be observed."""
rv = set(os.path.abspath(x) for x in sys.path)
for filename in extra_files or ():
rv.add(os.path.dirname(os.path.abspath(filename)))
for module in list(sys.modules.values()):
fn = getattr(module, '__file__', None)
if fn is None:
continue
fn = os.path.abspath(fn)
rv.add(os.path.dirname(fn))
return _find_common_roots(rv)
def _find_common_roots(paths):
"""Out of some paths it finds the common roots that need monitoring."""
paths = [x.split(os.path.sep) for x in paths]
root = {}
for chunks in sorted(paths, key=len, reverse=True):
node = root
for chunk in chunks:
node = node.setdefault(chunk, {})
node.clear()
rv = set()
def _walk(node, path):
for prefix, child in iteritems(node):
_walk(child, path + (prefix,))
if not node:
rv.add('/'.join(path))
_walk(root, ())
return rv
class ReloaderLoop(object):
name = None
# monkeypatched by testsuite. wrapping with `staticmethod` is required in
# case time.sleep has been replaced by a non-c function (e.g. by
# `eventlet.monkey_patch`) before we get here
_sleep = staticmethod(time.sleep)
def __init__(self, extra_files=None, interval=1):
self.extra_files = set(os.path.abspath(x)
for x in extra_files or ())
self.interval = interval
def run(self):
pass
def restart_with_reloader(self):
"""Spawn a new Python interpreter with the same arguments as this one,
but running the reloader thread.
"""
while 1:
_log('info', ' * Restarting with %s' % self.name)
args = [sys.executable] + sys.argv
new_environ = os.environ.copy()
new_environ['WERKZEUG_RUN_MAIN'] = 'true'
# a weird bug on windows. sometimes unicode strings end up in the
# environment and subprocess.call does not like this, encode them
# to latin1 and continue.
if os.name == 'nt' and PY2:
for key, value in iteritems(new_environ):
if isinstance(value, text_type):
new_environ[key] = value.encode('iso-8859-1')
exit_code = subprocess.call(args, env=new_environ)
if exit_code != 3:
return exit_code
def trigger_reload(self, filename):
filename = os.path.abspath(filename)
_log('info', ' * Detected change in %r, reloading' % filename)
sys.exit(3)
class StatReloaderLoop(ReloaderLoop):
name = 'stat'
def run(self):
mtimes = {}
while 1:
for filename in chain(_iter_module_files(), self.extra_files):
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
self.trigger_reload(filename)
self._sleep(self.interval)
class WatchdogReloaderLoop(ReloaderLoop):
def __init__(self, *args, **kwargs):
ReloaderLoop.__init__(self, *args, **kwargs)
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
self.observable_paths = set()
def _check_modification(filename):
if filename in self.extra_files:
self.trigger_reload(filename)
dirname = os.path.dirname(filename)
if dirname.startswith(tuple(self.observable_paths)):
if filename.endswith(('.pyc', '.pyo')):
self.trigger_reload(filename[:-1])
elif filename.endswith('.py'):
self.trigger_reload(filename)
class _CustomHandler(FileSystemEventHandler):
def on_created(self, event):
_check_modification(event.src_path)
def on_modified(self, event):
_check_modification(event.src_path)
reloader_name = Observer.__name__.lower()
if reloader_name.endswith('observer'):
reloader_name = reloader_name[:-8]
reloader_name += ' reloader'
self.name = reloader_name
self.observer_class = Observer
self.event_handler = _CustomHandler()
self.should_reload = False
def trigger_reload(self, filename):
# This is called inside an event handler, which means we can't throw
# SystemExit here. https://github.com/gorakhargosh/watchdog/issues/294
self.should_reload = True
ReloaderLoop.trigger_reload(self, filename)
def run(self):
watches = {}
observer = self.observer_class()
observer.start()
while not self.should_reload:
to_delete = set(watches)
paths = _find_observable_paths(self.extra_files)
for path in paths:
if path not in watches:
try:
watches[path] = observer.schedule(
self.event_handler, path, recursive=True)
except OSError:
# "Path is not a directory". We could filter out
# those paths beforehand, but that would cause
# additional stat calls.
watches[path] = None
to_delete.discard(path)
for path in to_delete:
watch = watches.pop(path, None)
if watch is not None:
observer.unschedule(watch)
self.observable_paths = paths
self._sleep(self.interval)
sys.exit(3)
reloader_loops = {
'stat': StatReloaderLoop,
'watchdog': WatchdogReloaderLoop,
}
try:
__import__('watchdog.observers')
except ImportError:
reloader_loops['auto'] = reloader_loops['stat']
else:
reloader_loops['auto'] = reloader_loops['watchdog']
def run_with_reloader(main_func, extra_files=None, interval=1,
reloader_type='auto'):
"""Run the given function in an independent python interpreter."""
import signal
reloader = reloader_loops[reloader_type](extra_files, interval)
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
try:
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
t = threading.Thread(target=main_func, args=())
t.setDaemon(True)
t.start()
reloader.run()
else:
sys.exit(reloader.restart_with_reloader())
except KeyboardInterrupt:
pass
| apache-2.0 |
bslatkin/8-bits | appengine-mapreduce/python/src/mapreduce/lib/pipeline/simplejson/scanner.py | 43 | 2596 | #!/usr/bin/env python
"""JSON token scanner
"""
import re
def _import_c_make_scanner():
try:
from mapreduce.lib.simplejson._speedups import make_scanner
return make_scanner
except ImportError:
return None
c_make_scanner = _import_c_make_scanner()
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
object_pairs_hook = context.object_pairs_hook
memo = context.memo
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict,
_scan_once, object_hook, object_pairs_hook, memo)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
def scan_once(string, idx):
try:
return _scan_once(string, idx)
finally:
memo.clear()
return scan_once
make_scanner = c_make_scanner or py_make_scanner
| apache-2.0 |
PGer/incubator-hawq | tools/bin/pythonSrc/PyGreSQL-4.0/tutorial/syscat.py | 59 | 5378 | # syscat.py - parses some system catalogs
# inspired from the PostgreSQL tutorial
# adapted to Python 1995 by Pascal ANDRE
print """
__________________________________________________________________
MODULE SYSCAT.PY : PARSES SOME POSTGRESQL SYSTEM CATALOGS
This module is designed for being imported from python prompt
In order to run the samples included here, first create a connection
using : cnx = syscat.DB(...)
The "..." should be replaced with whatever arguments you need to open an
existing database. Usually all you need is the name of the database and,
in fact, if it is the same as your login name, you can leave it empty.
then start the demo with: syscat.demo(cnx)
Some results may be empty, depending on your base status."
__________________________________________________________________
"""
from pg import DB
import sys
# waits for a key
def wait_key():
print "Press <enter>"
sys.stdin.read(1)
# lists all simple indices
def list_simple_ind(pgcnx):
result = pgcnx.query("""SELECT bc.relname AS class_name,
ic.relname AS index_name, a.attname
FROM pg_class bc, pg_class ic, pg_index i, pg_attribute a
WHERE i.indrelid = bc.oid AND i.indexrelid = bc.oid
AND i.indkey[0] = a.attnum AND a.attrelid = bc.oid
AND i.indproc = '0'::oid AND a.attisdropped = 'f'
ORDER BY class_name, index_name, attname""")
return result
# list all user defined attributes and their type in user-defined classes
def list_all_attr(pgcnx):
result = pgcnx.query("""SELECT c.relname, a.attname, t.typname
FROM pg_class c, pg_attribute a, pg_type t
WHERE c.relkind = 'r' and c.relname !~ '^pg_'
AND c.relname !~ '^Inv' and a.attnum > 0
AND a.attrelid = c.oid and a.atttypid = t.oid
AND a.attisdropped = 'f'
ORDER BY relname, attname""")
return result
# list all user defined base type
def list_user_base_type(pgcnx):
result = pgcnx.query("""SELECT u.usename, t.typname
FROM pg_type t, pg_user u
WHERE u.usesysid = int2in(int4out(t.typowner))
AND t.typrelid = '0'::oid and t.typelem = '0'::oid
AND u.usename <> 'postgres' order by usename, typname""")
return result
# list all right-unary operators
def list_right_unary_operator(pgcnx):
result = pgcnx.query("""SELECT o.oprname AS right_unary,
lt.typname AS operand, result.typname AS return_type
FROM pg_operator o, pg_type lt, pg_type result
WHERE o.oprkind='r' and o.oprleft = lt.oid
AND o.oprresult = result.oid
ORDER BY operand""")
return result
# list all left-unary operators
def list_left_unary_operator(pgcnx):
result = pgcnx.query("""SELECT o.oprname AS left_unary,
rt.typname AS operand, result.typname AS return_type
FROM pg_operator o, pg_type rt, pg_type result
WHERE o.oprkind='l' AND o.oprright = rt.oid
AND o.oprresult = result.oid
ORDER BY operand""")
return result
# list all binary operators
def list_binary_operator(pgcnx):
result = pgcnx.query("""SELECT o.oprname AS binary_op,
rt.typname AS right_opr, lt.typname AS left_opr,
result.typname AS return_type
FROM pg_operator o, pg_type rt, pg_type lt, pg_type result
WHERE o.oprkind = 'b' AND o.oprright = rt.oid
AND o.oprleft = lt.oid AND o.oprresult = result.oid""")
return result
# returns the name, args and return type from all function of lang l
def list_lang_func(pgcnx, l):
result = pgcnx.query("""SELECT p.proname, p.pronargs, t.typname
FROM pg_proc p, pg_language l, pg_type t
WHERE p.prolang = l.oid AND p.prorettype = t.oid
AND l.lanname = '%s'
ORDER BY proname""" % l)
return result
# lists all the aggregate functions and the type to which they can be applied
def list_agg_func(pgcnx):
result = pgcnx.query("""SELECT p.proname, t.typname
FROM pg_aggregate a, pg_proc p, pg_type t
WHERE a.aggfnoid = p.oid
and p.proargtypes[0] = t.oid
ORDER BY proname, typname""")
return result
# lists all the operator classes that can be used with each access method as
# well as the operators that can be used with the respective operator classes
def list_op_class(pgcnx):
result = pgcnx.query("""SELECT am.amname, opc.opcname, opr.oprname
FROM pg_am am, pg_amop amop, pg_opclass opc, pg_operator opr
WHERE amop.amopid = am.oid and amop.amopclaid = opc.oid
AND amop.amopopr = opr.oid order by amname, opcname, oprname""")
return result
# demo function - runs all examples
def demo(pgcnx):
import sys, os
save_stdout = sys.stdout
sys.stdout = os.popen("more", "w")
print "Listing simple indices ..."
print list_simple_ind(pgcnx)
print "Listing all attributes ..."
print list_all_attr(pgcnx)
print "Listing all user-defined base types ..."
print list_user_base_type(pgcnx)
print "Listing all left-unary operators defined ..."
print list_left_unary_operator(pgcnx)
print "Listing all right-unary operators defined ..."
print list_right_unary_operator(pgcnx)
print "Listing all binary operators ..."
print list_binary_operator(pgcnx)
print "Listing C external function linked ..."
print list_lang_func(pgcnx, 'C')
print "Listing C internal functions ..."
print list_lang_func(pgcnx, 'internal')
print "Listing SQL functions defined ..."
print list_lang_func(pgcnx, 'sql')
print "Listing 'aggregate functions' ..."
print list_agg_func(pgcnx)
print "Listing 'operator classes' ..."
print list_op_class(pgcnx)
del sys.stdout
sys.stdout = save_stdout
| apache-2.0 |
ahmadiga/min_edx | lms/djangoapps/shoppingcart/views.py | 64 | 40908 | import logging
import datetime
import decimal
import pytz
from ipware.ip import get_ip
from django.db.models import Q
from django.conf import settings
from django.contrib.auth.models import Group
from django.shortcuts import redirect
from django.http import (
HttpResponse, HttpResponseRedirect, HttpResponseNotFound,
HttpResponseBadRequest, HttpResponseForbidden, Http404
)
from django.utils.translation import ugettext as _
from course_modes.models import CourseMode
from util.json_request import JsonResponse
from django.views.decorators.http import require_POST, require_http_methods
from django.core.urlresolvers import reverse
from django.views.decorators.csrf import csrf_exempt
from util.bad_request_rate_limiter import BadRequestRateLimiter
from util.date_utils import get_default_time_display
from django.contrib.auth.decorators import login_required
from microsite_configuration import microsite
from edxmako.shortcuts import render_to_response
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from opaque_keys.edx.locator import CourseLocator
from opaque_keys import InvalidKeyError
from courseware.courses import get_course_by_id
from config_models.decorators import require_config
from shoppingcart.reports import RefundReport, ItemizedPurchaseReport, UniversityRevenueShareReport, CertificateStatusReport
from student.models import CourseEnrollment, EnrollmentClosedError, CourseFullError, \
AlreadyEnrolledError
from embargo import api as embargo_api
from .exceptions import (
ItemAlreadyInCartException, AlreadyEnrolledInCourseException,
CourseDoesNotExistException, ReportTypeDoesNotExistException,
MultipleCouponsNotAllowedException, InvalidCartItem,
ItemNotFoundInCartException, RedemptionCodeError
)
from .models import (
Order, OrderTypes,
PaidCourseRegistration, OrderItem, Coupon,
CertificateItem, CouponRedemption, CourseRegistrationCode,
RegistrationCodeRedemption, CourseRegCodeItem,
Donation, DonationConfiguration
)
from .processors import (
process_postpay_callback, render_purchase_form_html,
get_signed_purchase_params, get_purchase_endpoint
)
import json
from .decorators import enforce_shopping_cart_enabled
log = logging.getLogger("shoppingcart")
AUDIT_LOG = logging.getLogger("audit")
EVENT_NAME_USER_UPGRADED = 'edx.course.enrollment.upgrade.succeeded'
REPORT_TYPES = [
("refund_report", RefundReport),
("itemized_purchase_report", ItemizedPurchaseReport),
("university_revenue_share", UniversityRevenueShareReport),
("certificate_status", CertificateStatusReport),
]
def initialize_report(report_type, start_date, end_date, start_letter=None, end_letter=None):
"""
Creates the appropriate type of Report object based on the string report_type.
"""
for item in REPORT_TYPES:
if report_type in item:
return item[1](start_date, end_date, start_letter, end_letter)
raise ReportTypeDoesNotExistException
@require_POST
def add_course_to_cart(request, course_id):
"""
Adds course specified by course_id to the cart. The model function add_to_order does all the
heavy lifting (logging, error checking, etc)
"""
assert isinstance(course_id, basestring)
if not request.user.is_authenticated():
log.info(u"Anon user trying to add course %s to cart", course_id)
return HttpResponseForbidden(_('You must be logged-in to add to a shopping cart'))
cart = Order.get_cart_for_user(request.user)
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)
# All logging from here handled by the model
try:
paid_course_item = PaidCourseRegistration.add_to_order(cart, course_key)
except CourseDoesNotExistException:
return HttpResponseNotFound(_('The course you requested does not exist.'))
except ItemAlreadyInCartException:
return HttpResponseBadRequest(_('The course {course_id} is already in your cart.').format(course_id=course_id))
except AlreadyEnrolledInCourseException:
return HttpResponseBadRequest(
_('You are already registered in course {course_id}.').format(course_id=course_id))
else:
# in case a coupon redemption code has been applied, new items should also get a discount if applicable.
order = paid_course_item.order
order_items = order.orderitem_set.all().select_subclasses()
redeemed_coupons = CouponRedemption.objects.filter(order=order)
for redeemed_coupon in redeemed_coupons:
if Coupon.objects.filter(code=redeemed_coupon.coupon.code, course_id=course_key, is_active=True).exists():
coupon = Coupon.objects.get(code=redeemed_coupon.coupon.code, course_id=course_key, is_active=True)
CouponRedemption.add_coupon_redemption(coupon, order, order_items)
break # Since only one code can be applied to the cart, we'll just take the first one and then break.
return HttpResponse(_("Course added to cart."))
@login_required
@enforce_shopping_cart_enabled
def update_user_cart(request):
"""
when user change the number-of-students from the UI then
this method Update the corresponding qty field in OrderItem model and update the order_type in order model.
"""
try:
qty = int(request.POST.get('qty', -1))
except ValueError:
log.exception('Quantity must be an integer.')
return HttpResponseBadRequest('Quantity must be an integer.')
if not 1 <= qty <= 1000:
log.warning('Quantity must be between 1 and 1000.')
return HttpResponseBadRequest('Quantity must be between 1 and 1000.')
item_id = request.POST.get('ItemId', None)
if item_id:
try:
item = OrderItem.objects.get(id=item_id, status='cart')
except OrderItem.DoesNotExist:
log.exception(u'Cart OrderItem id=%s DoesNotExist', item_id)
return HttpResponseNotFound('Order item does not exist.')
item.qty = qty
item.save()
old_to_new_id_map = item.order.update_order_type()
total_cost = item.order.total_cost
callback_url = request.build_absolute_uri(
reverse("shoppingcart.views.postpay_callback")
)
cart = Order.get_cart_for_user(request.user)
form_html = render_purchase_form_html(cart, callback_url=callback_url)
return JsonResponse(
{
"total_cost": total_cost,
"oldToNewIdMap": old_to_new_id_map,
"form_html": form_html,
}
)
return HttpResponseBadRequest('Order item not found in request.')
@login_required
@enforce_shopping_cart_enabled
def show_cart(request):
"""
This view shows cart items.
"""
cart = Order.get_cart_for_user(request.user)
is_any_course_expired, expired_cart_items, expired_cart_item_names, valid_cart_item_tuples = \
verify_for_closed_enrollment(request.user, cart)
site_name = microsite.get_value('SITE_NAME', settings.SITE_NAME)
if is_any_course_expired:
for expired_item in expired_cart_items:
Order.remove_cart_item_from_order(expired_item, request.user)
cart.update_order_type()
callback_url = request.build_absolute_uri(
reverse("shoppingcart.views.postpay_callback")
)
form_html = render_purchase_form_html(cart, callback_url=callback_url)
context = {
'order': cart,
'shoppingcart_items': valid_cart_item_tuples,
'amount': cart.total_cost,
'is_course_enrollment_closed': is_any_course_expired,
'expired_course_names': expired_cart_item_names,
'site_name': site_name,
'form_html': form_html,
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'currency': settings.PAID_COURSE_REGISTRATION_CURRENCY[0],
}
return render_to_response("shoppingcart/shopping_cart.html", context)
@login_required
@enforce_shopping_cart_enabled
def clear_cart(request):
cart = Order.get_cart_for_user(request.user)
cart.clear()
coupon_redemption = CouponRedemption.objects.filter(user=request.user, order=cart.id)
if coupon_redemption:
coupon_redemption.delete()
log.info(
u'Coupon redemption entry removed for user %s for order %s',
request.user,
cart.id,
)
return HttpResponse('Cleared')
@login_required
@enforce_shopping_cart_enabled
def remove_item(request):
"""
This will remove an item from the user cart and also delete the corresponding coupon codes redemption.
"""
item_id = request.REQUEST.get('id', '-1')
items = OrderItem.objects.filter(id=item_id, status='cart').select_subclasses()
if not len(items):
log.exception(
u'Cannot remove cart OrderItem id=%s. DoesNotExist or item is already purchased',
item_id
)
else:
item = items[0]
if item.user == request.user:
Order.remove_cart_item_from_order(item, request.user)
item.order.update_order_type()
return HttpResponse('OK')
@login_required
@enforce_shopping_cart_enabled
def reset_code_redemption(request):
"""
This method reset the code redemption from user cart items.
"""
cart = Order.get_cart_for_user(request.user)
cart.reset_cart_items_prices()
CouponRedemption.remove_coupon_redemption_from_cart(request.user, cart)
return HttpResponse('reset')
@login_required
@enforce_shopping_cart_enabled
def use_code(request):
"""
Valid Code can be either Coupon or Registration code.
For a valid Coupon Code, this applies the coupon code and generates a discount against all applicable items.
For a valid Registration code, it deletes the item from the shopping cart and redirects to the
Registration Code Redemption page.
"""
code = request.POST["code"]
coupons = Coupon.objects.filter(
Q(code=code),
Q(is_active=True),
Q(expiration_date__gt=datetime.datetime.now(pytz.UTC)) |
Q(expiration_date__isnull=True)
)
if not coupons:
# If no coupons then we check that code against course registration code
try:
course_reg = CourseRegistrationCode.objects.get(code=code)
except CourseRegistrationCode.DoesNotExist:
return HttpResponseNotFound(_("Discount does not exist against code '{code}'.").format(code=code))
return use_registration_code(course_reg, request.user)
return use_coupon_code(coupons, request.user)
def get_reg_code_validity(registration_code, request, limiter):
"""
This function checks if the registration code is valid, and then checks if it was already redeemed.
"""
reg_code_already_redeemed = False
course_registration = None
try:
course_registration = CourseRegistrationCode.objects.get(code=registration_code)
except CourseRegistrationCode.DoesNotExist:
reg_code_is_valid = False
else:
if course_registration.is_valid:
reg_code_is_valid = True
else:
reg_code_is_valid = False
reg_code_already_redeemed = RegistrationCodeRedemption.is_registration_code_redeemed(registration_code)
if not reg_code_is_valid:
# tick the rate limiter counter
AUDIT_LOG.info("Redemption of a invalid RegistrationCode %s", registration_code)
limiter.tick_bad_request_counter(request)
raise Http404()
return reg_code_is_valid, reg_code_already_redeemed, course_registration
@require_http_methods(["GET", "POST"])
@login_required
def register_code_redemption(request, registration_code):
"""
This view allows the student to redeem the registration code
and enroll in the course.
"""
# Add some rate limiting here by re-using the RateLimitMixin as a helper class
site_name = microsite.get_value('SITE_NAME', settings.SITE_NAME)
limiter = BadRequestRateLimiter()
if limiter.is_rate_limit_exceeded(request):
AUDIT_LOG.warning("Rate limit exceeded in registration code redemption.")
return HttpResponseForbidden()
template_to_render = 'shoppingcart/registration_code_redemption.html'
if request.method == "GET":
reg_code_is_valid, reg_code_already_redeemed, course_registration = get_reg_code_validity(registration_code,
request, limiter)
course = get_course_by_id(getattr(course_registration, 'course_id'), depth=0)
# Restrict the user from enrolling based on country access rules
embargo_redirect = embargo_api.redirect_if_blocked(
course.id, user=request.user, ip_address=get_ip(request),
url=request.path
)
if embargo_redirect is not None:
return redirect(embargo_redirect)
context = {
'reg_code_already_redeemed': reg_code_already_redeemed,
'reg_code_is_valid': reg_code_is_valid,
'reg_code': registration_code,
'site_name': site_name,
'course': course,
'registered_for_course': not _is_enrollment_code_an_update(course, request.user, course_registration)
}
return render_to_response(template_to_render, context)
elif request.method == "POST":
reg_code_is_valid, reg_code_already_redeemed, course_registration = get_reg_code_validity(registration_code,
request, limiter)
course = get_course_by_id(getattr(course_registration, 'course_id'), depth=0)
# Restrict the user from enrolling based on country access rules
embargo_redirect = embargo_api.redirect_if_blocked(
course.id, user=request.user, ip_address=get_ip(request),
url=request.path
)
if embargo_redirect is not None:
return redirect(embargo_redirect)
context = {
'reg_code': registration_code,
'site_name': site_name,
'course': course,
'reg_code_is_valid': reg_code_is_valid,
'reg_code_already_redeemed': reg_code_already_redeemed,
}
if reg_code_is_valid and not reg_code_already_redeemed:
# remove the course from the cart if it was added there.
cart = Order.get_cart_for_user(request.user)
try:
cart_items = cart.find_item_by_course_id(course_registration.course_id)
except ItemNotFoundInCartException:
pass
else:
for cart_item in cart_items:
if isinstance(cart_item, PaidCourseRegistration) or isinstance(cart_item, CourseRegCodeItem):
cart_item.delete()
#now redeem the reg code.
redemption = RegistrationCodeRedemption.create_invoice_generated_registration_redemption(course_registration, request.user)
try:
kwargs = {}
if course_registration.mode_slug is not None:
if CourseMode.mode_for_course(course.id, course_registration.mode_slug):
kwargs['mode'] = course_registration.mode_slug
else:
raise RedemptionCodeError()
redemption.course_enrollment = CourseEnrollment.enroll(request.user, course.id, **kwargs)
redemption.save()
context['redemption_success'] = True
except RedemptionCodeError:
context['redeem_code_error'] = True
context['redemption_success'] = False
except EnrollmentClosedError:
context['enrollment_closed'] = True
context['redemption_success'] = False
except CourseFullError:
context['course_full'] = True
context['redemption_success'] = False
except AlreadyEnrolledError:
context['registered_for_course'] = True
context['redemption_success'] = False
else:
context['redemption_success'] = False
return render_to_response(template_to_render, context)
def _is_enrollment_code_an_update(course, user, redemption_code):
"""Checks to see if the user's enrollment can be updated by the code.
Check to see if the enrollment code and the user's enrollment match. If they are different, the code
may be used to alter the enrollment of the user. If the enrollment is inactive, will return True, since
the user may use the code to re-activate an enrollment as well.
Enrollment redemption codes must be associated with a paid course mode. If the current enrollment is a
different mode then the mode associated with the code, use of the code can be considered an upgrade.
Args:
course (CourseDescriptor): The course to check for enrollment.
user (User): The user that will be using the redemption code.
redemption_code (CourseRegistrationCode): The redemption code that will be used to update the user's enrollment.
Returns:
True if the redemption code can be used to upgrade the enrollment, or re-activate it.
"""
enrollment_mode, is_active = CourseEnrollment.enrollment_mode_for_user(user, course.id)
return not is_active or enrollment_mode != redemption_code.mode_slug
def use_registration_code(course_reg, user):
"""
This method utilize course registration code.
If the registration code is invalid, it returns an error.
If the registration code is already redeemed, it returns an error.
Else, it identifies and removes the applicable OrderItem from the Order
and redirects the user to the Registration code redemption page.
"""
if not course_reg.is_valid:
log.warning(u"The enrollment code (%s) is no longer valid.", course_reg.code)
return HttpResponseBadRequest(
_("This enrollment code ({enrollment_code}) is no longer valid.").format(
enrollment_code=course_reg.code
)
)
if RegistrationCodeRedemption.is_registration_code_redeemed(course_reg.code):
log.warning(u"This enrollment code ({%s}) has already been used.", course_reg.code)
return HttpResponseBadRequest(
_("This enrollment code ({enrollment_code}) is not valid.").format(
enrollment_code=course_reg.code
)
)
try:
cart = Order.get_cart_for_user(user)
cart_items = cart.find_item_by_course_id(course_reg.course_id)
except ItemNotFoundInCartException:
log.warning(u"Course item does not exist against registration code '%s'", course_reg.code)
return HttpResponseNotFound(
_("Code '{registration_code}' is not valid for any course in the shopping cart.").format(
registration_code=course_reg.code
)
)
else:
applicable_cart_items = [
cart_item for cart_item in cart_items
if (
(isinstance(cart_item, PaidCourseRegistration) or isinstance(cart_item, CourseRegCodeItem))and cart_item.qty == 1
)
]
if not applicable_cart_items:
return HttpResponseNotFound(
_("Cart item quantity should not be greater than 1 when applying activation code"))
redemption_url = reverse('register_code_redemption', kwargs={'registration_code': course_reg.code})
return HttpResponse(
json.dumps({'response': 'success', 'coupon_code_applied': False, 'redemption_url': redemption_url}),
content_type="application/json"
)
def use_coupon_code(coupons, user):
"""
This method utilize course coupon code
"""
cart = Order.get_cart_for_user(user)
cart_items = cart.orderitem_set.all().select_subclasses()
is_redemption_applied = False
for coupon in coupons:
try:
if CouponRedemption.add_coupon_redemption(coupon, cart, cart_items):
is_redemption_applied = True
except MultipleCouponsNotAllowedException:
return HttpResponseBadRequest(_("Only one coupon redemption is allowed against an order"))
if not is_redemption_applied:
log.warning(u"Discount does not exist against code '%s'.", coupons[0].code)
return HttpResponseNotFound(_("Discount does not exist against code '{code}'.").format(code=coupons[0].code))
return HttpResponse(
json.dumps({'response': 'success', 'coupon_code_applied': True}),
content_type="application/json"
)
@require_config(DonationConfiguration)
@require_POST
@login_required
def donate(request):
"""Add a single donation item to the cart and proceed to payment.
Warning: this call will clear all the items in the user's cart
before adding the new item!
Arguments:
request (Request): The Django request object. This should contain
a JSON-serialized dictionary with "amount" (string, required),
and "course_id" (slash-separated course ID string, optional).
Returns:
HttpResponse: 200 on success with JSON-encoded dictionary that has keys
"payment_url" (string) and "payment_params" (dictionary). The client
should POST the payment params to the payment URL.
HttpResponse: 400 invalid amount or course ID.
HttpResponse: 404 donations are disabled.
HttpResponse: 405 invalid request method.
Example usage:
POST /shoppingcart/donation/
with params {'amount': '12.34', course_id': 'edX/DemoX/Demo_Course'}
will respond with the signed purchase params
that the client can send to the payment processor.
"""
amount = request.POST.get('amount')
course_id = request.POST.get('course_id')
# Check that required parameters are present and valid
if amount is None:
msg = u"Request is missing required param 'amount'"
log.error(msg)
return HttpResponseBadRequest(msg)
try:
amount = (
decimal.Decimal(amount)
).quantize(
decimal.Decimal('.01'),
rounding=decimal.ROUND_DOWN
)
except decimal.InvalidOperation:
return HttpResponseBadRequest("Could not parse 'amount' as a decimal")
# Any amount is okay as long as it's greater than 0
# Since we've already quantized the amount to 0.01
# and rounded down, we can check if it's less than 0.01
if amount < decimal.Decimal('0.01'):
return HttpResponseBadRequest("Amount must be greater than 0")
if course_id is not None:
try:
course_id = CourseLocator.from_string(course_id)
except InvalidKeyError:
msg = u"Request included an invalid course key: {course_key}".format(course_key=course_id)
log.error(msg)
return HttpResponseBadRequest(msg)
# Add the donation to the user's cart
cart = Order.get_cart_for_user(request.user)
cart.clear()
try:
# Course ID may be None if this is a donation to the entire organization
Donation.add_to_order(cart, amount, course_id=course_id)
except InvalidCartItem as ex:
log.exception(
u"Could not create donation item for amount '%s' and course ID '%s'",
amount,
course_id
)
return HttpResponseBadRequest(unicode(ex))
# Start the purchase.
# This will "lock" the purchase so the user can't change
# the amount after we send the information to the payment processor.
# If the user tries to make another donation, it will be added
# to a new cart.
cart.start_purchase()
# Construct the response params (JSON-encoded)
callback_url = request.build_absolute_uri(
reverse("shoppingcart.views.postpay_callback")
)
# Add extra to make it easier to track transactions
extra_data = [
unicode(course_id) if course_id else "",
"donation_course" if course_id else "donation_general"
]
response_params = json.dumps({
# The HTTP end-point for the payment processor.
"payment_url": get_purchase_endpoint(),
# Parameters the client should send to the payment processor
"payment_params": get_signed_purchase_params(
cart,
callback_url=callback_url,
extra_data=extra_data
),
})
return HttpResponse(response_params, content_type="text/json")
def _get_verify_flow_redirect(order):
"""Check if we're in the verification flow and redirect if necessary.
Arguments:
order (Order): The order received by the post-pay callback.
Returns:
HttpResponseRedirect or None
"""
# See if the order contained any certificate items
# If so, the user is coming from the payment/verification flow.
cert_items = CertificateItem.objects.filter(order=order)
if cert_items.count() > 0:
# Currently, we allow the purchase of only one verified
# enrollment at a time; if there are more than one,
# this will choose the first.
if cert_items.count() > 1:
log.warning(
u"More than one certificate item in order %s; "
u"continuing with the payment/verification flow for "
u"the first order item (course %s).",
order.id, cert_items[0].course_id
)
course_id = cert_items[0].course_id
url = reverse(
'verify_student_payment_confirmation',
kwargs={'course_id': unicode(course_id)}
)
# Add a query string param for the order ID
# This allows the view to query for the receipt information later.
url += '?payment-order-num={order_num}'.format(order_num=order.id)
return HttpResponseRedirect(url)
@csrf_exempt
@require_POST
def postpay_callback(request):
"""
Receives the POST-back from processor.
Mainly this calls the processor-specific code to check if the payment was accepted, and to record the order
if it was, and to generate an error page.
If successful this function should have the side effect of changing the "cart" into a full "order" in the DB.
The cart can then render a success page which links to receipt pages.
If unsuccessful the order will be left untouched and HTML messages giving more detailed error info will be
returned.
"""
params = request.POST.dict()
result = process_postpay_callback(params)
if result['success']:
# See if this payment occurred as part of the verification flow process
# If so, send the user back into the flow so they have the option
# to continue with verification.
# Only orders where order_items.count() == 1 might be attempting to upgrade
attempting_upgrade = request.session.get('attempting_upgrade', False)
if attempting_upgrade:
if result['order'].has_items(CertificateItem):
course_id = result['order'].orderitem_set.all().select_subclasses("certificateitem")[0].course_id
if course_id:
course_enrollment = CourseEnrollment.get_enrollment(request.user, course_id)
if course_enrollment:
course_enrollment.emit_event(EVENT_NAME_USER_UPGRADED)
request.session['attempting_upgrade'] = False
verify_flow_redirect = _get_verify_flow_redirect(result['order'])
if verify_flow_redirect is not None:
return verify_flow_redirect
# Otherwise, send the user to the receipt page
return HttpResponseRedirect(reverse('shoppingcart.views.show_receipt', args=[result['order'].id]))
else:
request.session['attempting_upgrade'] = False
return render_to_response('shoppingcart/error.html', {'order': result['order'],
'error_html': result['error_html']})
@require_http_methods(["GET", "POST"])
@login_required
@enforce_shopping_cart_enabled
def billing_details(request):
"""
This is the view for capturing additional billing details
in case of the business purchase workflow.
"""
cart = Order.get_cart_for_user(request.user)
cart_items = cart.orderitem_set.all().select_subclasses()
if getattr(cart, 'order_type') != OrderTypes.BUSINESS:
raise Http404('Page not found!')
if request.method == "GET":
callback_url = request.build_absolute_uri(
reverse("shoppingcart.views.postpay_callback")
)
form_html = render_purchase_form_html(cart, callback_url=callback_url)
total_cost = cart.total_cost
context = {
'shoppingcart_items': cart_items,
'amount': total_cost,
'form_html': form_html,
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'currency': settings.PAID_COURSE_REGISTRATION_CURRENCY[0],
'site_name': microsite.get_value('SITE_NAME', settings.SITE_NAME),
}
return render_to_response("shoppingcart/billing_details.html", context)
elif request.method == "POST":
company_name = request.POST.get("company_name", "")
company_contact_name = request.POST.get("company_contact_name", "")
company_contact_email = request.POST.get("company_contact_email", "")
recipient_name = request.POST.get("recipient_name", "")
recipient_email = request.POST.get("recipient_email", "")
customer_reference_number = request.POST.get("customer_reference_number", "")
cart.add_billing_details(company_name, company_contact_name, company_contact_email, recipient_name,
recipient_email, customer_reference_number)
is_any_course_expired, __, __, __ = verify_for_closed_enrollment(request.user)
return JsonResponse({
'response': _('success'),
'is_course_enrollment_closed': is_any_course_expired
}) # status code 200: OK by default
def verify_for_closed_enrollment(user, cart=None):
"""
A multi-output helper function.
inputs:
user: a user object
cart: If a cart is provided it uses the same object, otherwise fetches the user's cart.
Returns:
is_any_course_expired: True if any of the items in the cart has it's enrollment period closed. False otherwise.
expired_cart_items: List of courses with enrollment period closed.
expired_cart_item_names: List of names of the courses with enrollment period closed.
valid_cart_item_tuples: List of courses which are still open for enrollment.
"""
if cart is None:
cart = Order.get_cart_for_user(user)
expired_cart_items = []
expired_cart_item_names = []
valid_cart_item_tuples = []
cart_items = cart.orderitem_set.all().select_subclasses()
is_any_course_expired = False
for cart_item in cart_items:
course_key = getattr(cart_item, 'course_id', None)
if course_key is not None:
course = get_course_by_id(course_key, depth=0)
if CourseEnrollment.is_enrollment_closed(user, course):
is_any_course_expired = True
expired_cart_items.append(cart_item)
expired_cart_item_names.append(course.display_name)
else:
valid_cart_item_tuples.append((cart_item, course))
return is_any_course_expired, expired_cart_items, expired_cart_item_names, valid_cart_item_tuples
@require_http_methods(["GET"])
@login_required
@enforce_shopping_cart_enabled
def verify_cart(request):
"""
Called when the user clicks the button to transfer control to CyberSource.
Returns a JSON response with is_course_enrollment_closed set to True if any of the courses has its
enrollment period closed. If all courses are still valid, is_course_enrollment_closed set to False.
"""
is_any_course_expired, __, __, __ = verify_for_closed_enrollment(request.user)
return JsonResponse(
{
'is_course_enrollment_closed': is_any_course_expired
}
) # status code 200: OK by default
@login_required
def show_receipt(request, ordernum):
"""
Displays a receipt for a particular order.
404 if order is not yet purchased or request.user != order.user
"""
try:
order = Order.objects.get(id=ordernum)
except Order.DoesNotExist:
raise Http404('Order not found!')
if order.user != request.user or order.status not in ['purchased', 'refunded']:
raise Http404('Order not found!')
if 'application/json' in request.META.get('HTTP_ACCEPT', ""):
return _show_receipt_json(order)
else:
return _show_receipt_html(request, order)
def _show_receipt_json(order):
"""Render the receipt page as JSON.
The included information is deliberately minimal:
as much as possible, the included information should
be common to *all* order items, so the client doesn't
need to handle different item types differently.
Arguments:
request (HttpRequest): The request for the receipt.
order (Order): The order model to display.
Returns:
HttpResponse
"""
order_info = {
'orderNum': order.id,
'currency': order.currency,
'status': order.status,
'purchase_datetime': get_default_time_display(order.purchase_time) if order.purchase_time else None,
'billed_to': {
'first_name': order.bill_to_first,
'last_name': order.bill_to_last,
'street1': order.bill_to_street1,
'street2': order.bill_to_street2,
'city': order.bill_to_city,
'state': order.bill_to_state,
'postal_code': order.bill_to_postalcode,
'country': order.bill_to_country,
},
'total_cost': order.total_cost,
'items': [
{
'quantity': item.qty,
'unit_cost': item.unit_cost,
'line_cost': item.line_cost,
'line_desc': item.line_desc,
'course_key': unicode(getattr(item, 'course_id'))
}
for item in OrderItem.objects.filter(order=order).select_subclasses()
]
}
return JsonResponse(order_info)
def _show_receipt_html(request, order):
"""Render the receipt page as HTML.
Arguments:
request (HttpRequest): The request for the receipt.
order (Order): The order model to display.
Returns:
HttpResponse
"""
order_items = OrderItem.objects.filter(order=order).select_subclasses()
shoppingcart_items = []
course_names_list = []
for order_item in order_items:
course_key = getattr(order_item, 'course_id')
if course_key:
course = get_course_by_id(course_key, depth=0)
shoppingcart_items.append((order_item, course))
course_names_list.append(course.display_name)
appended_course_names = ", ".join(course_names_list)
any_refunds = any(i.status == "refunded" for i in order_items)
receipt_template = 'shoppingcart/receipt.html'
__, instructions = order.generate_receipt_instructions()
order_type = getattr(order, 'order_type')
recipient_list = []
total_registration_codes = None
reg_code_info_list = []
recipient_list.append(getattr(order.user, 'email'))
if order_type == OrderTypes.BUSINESS:
if order.company_contact_email:
recipient_list.append(order.company_contact_email)
if order.recipient_email:
recipient_list.append(order.recipient_email)
for __, course in shoppingcart_items:
course_registration_codes = CourseRegistrationCode.objects.filter(order=order, course_id=course.id)
total_registration_codes = course_registration_codes.count()
for course_registration_code in course_registration_codes:
reg_code_info_list.append({
'course_name': course.display_name,
'redemption_url': reverse('register_code_redemption', args=[course_registration_code.code]),
'code': course_registration_code.code,
'is_valid': course_registration_code.is_valid,
'is_redeemed': RegistrationCodeRedemption.objects.filter(
registration_code=course_registration_code).exists(),
})
appended_recipient_emails = ", ".join(recipient_list)
context = {
'order': order,
'shoppingcart_items': shoppingcart_items,
'any_refunds': any_refunds,
'instructions': instructions,
'site_name': microsite.get_value('SITE_NAME', settings.SITE_NAME),
'order_type': order_type,
'appended_course_names': appended_course_names,
'appended_recipient_emails': appended_recipient_emails,
'currency_symbol': settings.PAID_COURSE_REGISTRATION_CURRENCY[1],
'currency': settings.PAID_COURSE_REGISTRATION_CURRENCY[0],
'total_registration_codes': total_registration_codes,
'reg_code_info_list': reg_code_info_list,
'order_purchase_date': order.purchase_time.strftime("%B %d, %Y"),
}
# We want to have the ability to override the default receipt page when
# there is only one item in the order.
if order_items.count() == 1:
receipt_template = order_items[0].single_item_receipt_template
context.update(order_items[0].single_item_receipt_context)
return render_to_response(receipt_template, context)
def _can_download_report(user):
"""
Tests if the user can download the payments report, based on membership in a group whose name is determined
in settings. If the group does not exist, denies all access
"""
try:
access_group = Group.objects.get(name=settings.PAYMENT_REPORT_GENERATOR_GROUP)
except Group.DoesNotExist:
return False
return access_group in user.groups.all()
def _get_date_from_str(date_input):
"""
Gets date from the date input string. Lets the ValueError raised by invalid strings be processed by the caller
"""
return datetime.datetime.strptime(date_input.strip(), "%Y-%m-%d").replace(tzinfo=pytz.UTC)
def _render_report_form(start_str, end_str, start_letter, end_letter, report_type, total_count_error=False, date_fmt_error=False):
"""
Helper function that renders the purchase form. Reduces repetition
"""
context = {
'total_count_error': total_count_error,
'date_fmt_error': date_fmt_error,
'start_date': start_str,
'end_date': end_str,
'start_letter': start_letter,
'end_letter': end_letter,
'requested_report': report_type,
}
return render_to_response('shoppingcart/download_report.html', context)
@login_required
def csv_report(request):
"""
Downloads csv reporting of orderitems
"""
if not _can_download_report(request.user):
return HttpResponseForbidden(_('You do not have permission to view this page.'))
if request.method == 'POST':
start_date = request.POST.get('start_date', '')
end_date = request.POST.get('end_date', '')
start_letter = request.POST.get('start_letter', '')
end_letter = request.POST.get('end_letter', '')
report_type = request.POST.get('requested_report', '')
try:
start_date = _get_date_from_str(start_date) + datetime.timedelta(days=0)
end_date = _get_date_from_str(end_date) + datetime.timedelta(days=1)
except ValueError:
# Error case: there was a badly formatted user-input date string
return _render_report_form(start_date, end_date, start_letter, end_letter, report_type, date_fmt_error=True)
report = initialize_report(report_type, start_date, end_date, start_letter, end_letter)
items = report.rows()
response = HttpResponse(mimetype='text/csv')
filename = "purchases_report_{}.csv".format(datetime.datetime.now(pytz.UTC).strftime("%Y-%m-%d-%H-%M-%S"))
response['Content-Disposition'] = 'attachment; filename="{}"'.format(filename)
report.write_csv(response)
return response
elif request.method == 'GET':
end_date = datetime.datetime.now(pytz.UTC)
start_date = end_date - datetime.timedelta(days=30)
start_letter = ""
end_letter = ""
return _render_report_form(start_date.strftime("%Y-%m-%d"), end_date.strftime("%Y-%m-%d"), start_letter, end_letter, report_type="")
else:
return HttpResponseBadRequest("HTTP Method Not Supported")
| agpl-3.0 |
vinhlh/bite-project | deps/gdata-python-client/samples/apps/marketplace_sample/gdata/apps/groups/service.py | 94 | 12924 | #!/usr/bin/python
#
# Copyright (C) 2008 Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Allow Google Apps domain administrators to manage groups, group members and group owners.
GroupsService: Provides methods to manage groups, members and owners.
"""
__author__ = '[email protected]'
import urllib
import gdata.apps
import gdata.apps.service
import gdata.service
API_VER = '2.0'
BASE_URL = '/a/feeds/group/' + API_VER + '/%s'
GROUP_MEMBER_URL = BASE_URL + '?member=%s'
GROUP_MEMBER_DIRECT_URL = GROUP_MEMBER_URL + '&directOnly=%s'
GROUP_ID_URL = BASE_URL + '/%s'
MEMBER_URL = BASE_URL + '/%s/member'
MEMBER_WITH_SUSPENDED_URL = MEMBER_URL + '?includeSuspendedUsers=%s'
MEMBER_ID_URL = MEMBER_URL + '/%s'
OWNER_URL = BASE_URL + '/%s/owner'
OWNER_WITH_SUSPENDED_URL = OWNER_URL + '?includeSuspendedUsers=%s'
OWNER_ID_URL = OWNER_URL + '/%s'
PERMISSION_OWNER = 'Owner'
PERMISSION_MEMBER = 'Member'
PERMISSION_DOMAIN = 'Domain'
PERMISSION_ANYONE = 'Anyone'
class GroupsService(gdata.apps.service.PropertyService):
"""Client for the Google Apps Groups service."""
def _ServiceUrl(self, service_type, is_existed, group_id, member_id, owner_email,
direct_only=False, domain=None, suspended_users=False):
if domain is None:
domain = self.domain
if service_type == 'group':
if group_id != '' and is_existed:
return GROUP_ID_URL % (domain, group_id)
elif member_id != '':
if direct_only:
return GROUP_MEMBER_DIRECT_URL % (domain, urllib.quote_plus(member_id),
self._Bool2Str(direct_only))
else:
return GROUP_MEMBER_URL % (domain, urllib.quote_plus(member_id))
else:
return BASE_URL % (domain)
if service_type == 'member':
if member_id != '' and is_existed:
return MEMBER_ID_URL % (domain, group_id, urllib.quote_plus(member_id))
elif suspended_users:
return MEMBER_WITH_SUSPENDED_URL % (domain, group_id,
self._Bool2Str(suspended_users))
else:
return MEMBER_URL % (domain, group_id)
if service_type == 'owner':
if owner_email != '' and is_existed:
return OWNER_ID_URL % (domain, group_id, urllib.quote_plus(owner_email))
elif suspended_users:
return OWNER_WITH_SUSPENDED_URL % (domain, group_id,
self._Bool2Str(suspended_users))
else:
return OWNER_URL % (domain, group_id)
def _Bool2Str(self, b):
if b is None:
return None
return str(b is True).lower()
def _IsExisted(self, uri):
try:
self._GetProperties(uri)
return True
except gdata.apps.service.AppsForYourDomainException, e:
if e.error_code == gdata.apps.service.ENTITY_DOES_NOT_EXIST:
return False
else:
raise e
def CreateGroup(self, group_id, group_name, description, email_permission):
"""Create a group.
Args:
group_id: The ID of the group (e.g. us-sales).
group_name: The name of the group.
description: A description of the group
email_permission: The subscription permission of the group.
Returns:
A dict containing the result of the create operation.
"""
uri = self._ServiceUrl('group', False, group_id, '', '')
properties = {}
properties['groupId'] = group_id
properties['groupName'] = group_name
properties['description'] = description
properties['emailPermission'] = email_permission
return self._PostProperties(uri, properties)
def UpdateGroup(self, group_id, group_name, description, email_permission):
"""Update a group's name, description and/or permission.
Args:
group_id: The ID of the group (e.g. us-sales).
group_name: The name of the group.
description: A description of the group
email_permission: The subscription permission of the group.
Returns:
A dict containing the result of the update operation.
"""
uri = self._ServiceUrl('group', True, group_id, '', '')
properties = {}
properties['groupId'] = group_id
properties['groupName'] = group_name
properties['description'] = description
properties['emailPermission'] = email_permission
return self._PutProperties(uri, properties)
def RetrieveGroup(self, group_id):
"""Retrieve a group based on its ID.
Args:
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('group', True, group_id, '', '')
return self._GetProperties(uri)
def RetrieveAllGroups(self):
"""Retrieve all groups in the domain.
Args:
None
Returns:
A list containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('group', True, '', '', '')
return self._GetPropertiesList(uri)
def RetrievePageOfGroups(self, start_group=None):
"""Retrieve one page of groups in the domain.
Args:
start_group: The key to continue for pagination through all groups.
Returns:
A feed object containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('group', True, '', '', '')
if start_group is not None:
uri += "?start="+start_group
property_feed = self._GetPropertyFeed(uri)
return property_feed
def RetrieveGroups(self, member_id, direct_only=False):
"""Retrieve all groups that belong to the given member_id.
Args:
member_id: The member's email address (e.g. [email protected]).
direct_only: Boolean whether only return groups that this member directly belongs to.
Returns:
A list containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('group', True, '', member_id, '', direct_only=direct_only)
return self._GetPropertiesList(uri)
def DeleteGroup(self, group_id):
"""Delete a group based on its ID.
Args:
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the delete operation.
"""
uri = self._ServiceUrl('group', True, group_id, '', '')
return self._DeleteProperties(uri)
def AddMemberToGroup(self, member_id, group_id):
"""Add a member to a group.
Args:
member_id: The member's email address (e.g. [email protected]).
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the add operation.
"""
uri = self._ServiceUrl('member', False, group_id, member_id, '')
properties = {}
properties['memberId'] = member_id
return self._PostProperties(uri, properties)
def IsMember(self, member_id, group_id):
"""Check whether the given member already exists in the given group.
Args:
member_id: The member's email address (e.g. [email protected]).
group_id: The ID of the group (e.g. us-sales).
Returns:
True if the member exists in the group. False otherwise.
"""
uri = self._ServiceUrl('member', True, group_id, member_id, '')
return self._IsExisted(uri)
def RetrieveMember(self, member_id, group_id):
"""Retrieve the given member in the given group.
Args:
member_id: The member's email address (e.g. [email protected]).
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('member', True, group_id, member_id, '')
return self._GetProperties(uri)
def RetrieveAllMembers(self, group_id, suspended_users=False):
"""Retrieve all members in the given group.
Args:
group_id: The ID of the group (e.g. us-sales).
suspended_users: A boolean; should we include any suspended users in
the membership list returned?
Returns:
A list containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('member', True, group_id, '', '',
suspended_users=suspended_users)
return self._GetPropertiesList(uri)
def RetrievePageOfMembers(self, group_id, suspended_users=False, start=None):
"""Retrieve one page of members of a given group.
Args:
group_id: The ID of the group (e.g. us-sales).
suspended_users: A boolean; should we include any suspended users in
the membership list returned?
start: The key to continue for pagination through all members.
Returns:
A feed object containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('member', True, group_id, '', '',
suspended_users=suspended_users)
if start is not None:
if suspended_users:
uri += "&start="+start
else:
uri += "?start="+start
property_feed = self._GetPropertyFeed(uri)
return property_feed
def RemoveMemberFromGroup(self, member_id, group_id):
"""Remove the given member from the given group.
Args:
member_id: The member's email address (e.g. [email protected]).
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the remove operation.
"""
uri = self._ServiceUrl('member', True, group_id, member_id, '')
return self._DeleteProperties(uri)
def AddOwnerToGroup(self, owner_email, group_id):
"""Add an owner to a group.
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the add operation.
"""
uri = self._ServiceUrl('owner', False, group_id, '', owner_email)
properties = {}
properties['email'] = owner_email
return self._PostProperties(uri, properties)
def IsOwner(self, owner_email, group_id):
"""Check whether the given member an owner of the given group.
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
True if the member is an owner of the given group. False otherwise.
"""
uri = self._ServiceUrl('owner', True, group_id, '', owner_email)
return self._IsExisted(uri)
def RetrieveOwner(self, owner_email, group_id):
"""Retrieve the given owner in the given group.
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('owner', True, group_id, '', owner_email)
return self._GetProperties(uri)
def RetrieveAllOwners(self, group_id, suspended_users=False):
"""Retrieve all owners of the given group.
Args:
group_id: The ID of the group (e.g. us-sales).
suspended_users: A boolean; should we include any suspended users in
the ownership list returned?
Returns:
A list containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('owner', True, group_id, '', '',
suspended_users=suspended_users)
return self._GetPropertiesList(uri)
def RetrievePageOfOwners(self, group_id, suspended_users=False, start=None):
"""Retrieve one page of owners of the given group.
Args:
group_id: The ID of the group (e.g. us-sales).
suspended_users: A boolean; should we include any suspended users in
the ownership list returned?
start: The key to continue for pagination through all owners.
Returns:
A feed object containing the result of the retrieve operation.
"""
uri = self._ServiceUrl('owner', True, group_id, '', '',
suspended_users=suspended_users)
if start is not None:
if suspended_users:
uri += "&start="+start
else:
uri += "?start="+start
property_feed = self._GetPropertyFeed(uri)
return property_feed
def RemoveOwnerFromGroup(self, owner_email, group_id):
"""Remove the given owner from the given group.
Args:
owner_email: The email address of a group owner.
group_id: The ID of the group (e.g. us-sales).
Returns:
A dict containing the result of the remove operation.
"""
uri = self._ServiceUrl('owner', True, group_id, '', owner_email)
return self._DeleteProperties(uri)
| apache-2.0 |
firerszd/kbengine | kbe/src/lib/python/Lib/idlelib/AutoCompleteWindow.py | 88 | 17672 | """
An auto-completion window for IDLE, used by the AutoComplete extension
"""
from tkinter import *
from idlelib.MultiCall import MC_SHIFT
from idlelib.AutoComplete import COMPLETE_FILES, COMPLETE_ATTRIBUTES
HIDE_VIRTUAL_EVENT_NAME = "<<autocompletewindow-hide>>"
HIDE_SEQUENCES = ("<FocusOut>", "<ButtonPress>")
KEYPRESS_VIRTUAL_EVENT_NAME = "<<autocompletewindow-keypress>>"
# We need to bind event beyond <Key> so that the function will be called
# before the default specific IDLE function
KEYPRESS_SEQUENCES = ("<Key>", "<Key-BackSpace>", "<Key-Return>", "<Key-Tab>",
"<Key-Up>", "<Key-Down>", "<Key-Home>", "<Key-End>",
"<Key-Prior>", "<Key-Next>")
KEYRELEASE_VIRTUAL_EVENT_NAME = "<<autocompletewindow-keyrelease>>"
KEYRELEASE_SEQUENCE = "<KeyRelease>"
LISTUPDATE_SEQUENCE = "<B1-ButtonRelease>"
WINCONFIG_SEQUENCE = "<Configure>"
DOUBLECLICK_SEQUENCE = "<B1-Double-ButtonRelease>"
class AutoCompleteWindow:
def __init__(self, widget):
# The widget (Text) on which we place the AutoCompleteWindow
self.widget = widget
# The widgets we create
self.autocompletewindow = self.listbox = self.scrollbar = None
# The default foreground and background of a selection. Saved because
# they are changed to the regular colors of list items when the
# completion start is not a prefix of the selected completion
self.origselforeground = self.origselbackground = None
# The list of completions
self.completions = None
# A list with more completions, or None
self.morecompletions = None
# The completion mode. Either AutoComplete.COMPLETE_ATTRIBUTES or
# AutoComplete.COMPLETE_FILES
self.mode = None
# The current completion start, on the text box (a string)
self.start = None
# The index of the start of the completion
self.startindex = None
# The last typed start, used so that when the selection changes,
# the new start will be as close as possible to the last typed one.
self.lasttypedstart = None
# Do we have an indication that the user wants the completion window
# (for example, he clicked the list)
self.userwantswindow = None
# event ids
self.hideid = self.keypressid = self.listupdateid = self.winconfigid \
= self.keyreleaseid = self.doubleclickid = None
# Flag set if last keypress was a tab
self.lastkey_was_tab = False
def _change_start(self, newstart):
min_len = min(len(self.start), len(newstart))
i = 0
while i < min_len and self.start[i] == newstart[i]:
i += 1
if i < len(self.start):
self.widget.delete("%s+%dc" % (self.startindex, i),
"%s+%dc" % (self.startindex, len(self.start)))
if i < len(newstart):
self.widget.insert("%s+%dc" % (self.startindex, i),
newstart[i:])
self.start = newstart
def _binary_search(self, s):
"""Find the first index in self.completions where completions[i] is
greater or equal to s, or the last index if there is no such
one."""
i = 0; j = len(self.completions)
while j > i:
m = (i + j) // 2
if self.completions[m] >= s:
j = m
else:
i = m + 1
return min(i, len(self.completions)-1)
def _complete_string(self, s):
"""Assuming that s is the prefix of a string in self.completions,
return the longest string which is a prefix of all the strings which
s is a prefix of them. If s is not a prefix of a string, return s."""
first = self._binary_search(s)
if self.completions[first][:len(s)] != s:
# There is not even one completion which s is a prefix of.
return s
# Find the end of the range of completions where s is a prefix of.
i = first + 1
j = len(self.completions)
while j > i:
m = (i + j) // 2
if self.completions[m][:len(s)] != s:
j = m
else:
i = m + 1
last = i-1
if first == last: # only one possible completion
return self.completions[first]
# We should return the maximum prefix of first and last
first_comp = self.completions[first]
last_comp = self.completions[last]
min_len = min(len(first_comp), len(last_comp))
i = len(s)
while i < min_len and first_comp[i] == last_comp[i]:
i += 1
return first_comp[:i]
def _selection_changed(self):
"""Should be called when the selection of the Listbox has changed.
Updates the Listbox display and calls _change_start."""
cursel = int(self.listbox.curselection()[0])
self.listbox.see(cursel)
lts = self.lasttypedstart
selstart = self.completions[cursel]
if self._binary_search(lts) == cursel:
newstart = lts
else:
min_len = min(len(lts), len(selstart))
i = 0
while i < min_len and lts[i] == selstart[i]:
i += 1
newstart = selstart[:i]
self._change_start(newstart)
if self.completions[cursel][:len(self.start)] == self.start:
# start is a prefix of the selected completion
self.listbox.configure(selectbackground=self.origselbackground,
selectforeground=self.origselforeground)
else:
self.listbox.configure(selectbackground=self.listbox.cget("bg"),
selectforeground=self.listbox.cget("fg"))
# If there are more completions, show them, and call me again.
if self.morecompletions:
self.completions = self.morecompletions
self.morecompletions = None
self.listbox.delete(0, END)
for item in self.completions:
self.listbox.insert(END, item)
self.listbox.select_set(self._binary_search(self.start))
self._selection_changed()
def show_window(self, comp_lists, index, complete, mode, userWantsWin):
"""Show the autocomplete list, bind events.
If complete is True, complete the text, and if there is exactly one
matching completion, don't open a list."""
# Handle the start we already have
self.completions, self.morecompletions = comp_lists
self.mode = mode
self.startindex = self.widget.index(index)
self.start = self.widget.get(self.startindex, "insert")
if complete:
completed = self._complete_string(self.start)
start = self.start
self._change_start(completed)
i = self._binary_search(completed)
if self.completions[i] == completed and \
(i == len(self.completions)-1 or
self.completions[i+1][:len(completed)] != completed):
# There is exactly one matching completion
return completed == start
self.userwantswindow = userWantsWin
self.lasttypedstart = self.start
# Put widgets in place
self.autocompletewindow = acw = Toplevel(self.widget)
# Put it in a position so that it is not seen.
acw.wm_geometry("+10000+10000")
# Make it float
acw.wm_overrideredirect(1)
try:
# This command is only needed and available on Tk >= 8.4.0 for OSX
# Without it, call tips intrude on the typing process by grabbing
# the focus.
acw.tk.call("::tk::unsupported::MacWindowStyle", "style", acw._w,
"help", "noActivates")
except TclError:
pass
self.scrollbar = scrollbar = Scrollbar(acw, orient=VERTICAL)
self.listbox = listbox = Listbox(acw, yscrollcommand=scrollbar.set,
exportselection=False, bg="white")
for item in self.completions:
listbox.insert(END, item)
self.origselforeground = listbox.cget("selectforeground")
self.origselbackground = listbox.cget("selectbackground")
scrollbar.config(command=listbox.yview)
scrollbar.pack(side=RIGHT, fill=Y)
listbox.pack(side=LEFT, fill=BOTH, expand=True)
# Initialize the listbox selection
self.listbox.select_set(self._binary_search(self.start))
self._selection_changed()
# bind events
self.hideid = self.widget.bind(HIDE_VIRTUAL_EVENT_NAME,
self.hide_event)
for seq in HIDE_SEQUENCES:
self.widget.event_add(HIDE_VIRTUAL_EVENT_NAME, seq)
self.keypressid = self.widget.bind(KEYPRESS_VIRTUAL_EVENT_NAME,
self.keypress_event)
for seq in KEYPRESS_SEQUENCES:
self.widget.event_add(KEYPRESS_VIRTUAL_EVENT_NAME, seq)
self.keyreleaseid = self.widget.bind(KEYRELEASE_VIRTUAL_EVENT_NAME,
self.keyrelease_event)
self.widget.event_add(KEYRELEASE_VIRTUAL_EVENT_NAME,KEYRELEASE_SEQUENCE)
self.listupdateid = listbox.bind(LISTUPDATE_SEQUENCE,
self.listselect_event)
self.winconfigid = acw.bind(WINCONFIG_SEQUENCE, self.winconfig_event)
self.doubleclickid = listbox.bind(DOUBLECLICK_SEQUENCE,
self.doubleclick_event)
def winconfig_event(self, event):
if not self.is_active():
return
# Position the completion list window
text = self.widget
text.see(self.startindex)
x, y, cx, cy = text.bbox(self.startindex)
acw = self.autocompletewindow
acw_width, acw_height = acw.winfo_width(), acw.winfo_height()
text_width, text_height = text.winfo_width(), text.winfo_height()
new_x = text.winfo_rootx() + min(x, max(0, text_width - acw_width))
new_y = text.winfo_rooty() + y
if (text_height - (y + cy) >= acw_height # enough height below
or y < acw_height): # not enough height above
# place acw below current line
new_y += cy
else:
# place acw above current line
new_y -= acw_height
acw.wm_geometry("+%d+%d" % (new_x, new_y))
def hide_event(self, event):
if not self.is_active():
return
self.hide_window()
def listselect_event(self, event):
if not self.is_active():
return
self.userwantswindow = True
cursel = int(self.listbox.curselection()[0])
self._change_start(self.completions[cursel])
def doubleclick_event(self, event):
# Put the selected completion in the text, and close the list
cursel = int(self.listbox.curselection()[0])
self._change_start(self.completions[cursel])
self.hide_window()
def keypress_event(self, event):
if not self.is_active():
return
keysym = event.keysym
if hasattr(event, "mc_state"):
state = event.mc_state
else:
state = 0
if keysym != "Tab":
self.lastkey_was_tab = False
if (len(keysym) == 1 or keysym in ("underscore", "BackSpace")
or (self.mode == COMPLETE_FILES and keysym in
("period", "minus"))) \
and not (state & ~MC_SHIFT):
# Normal editing of text
if len(keysym) == 1:
self._change_start(self.start + keysym)
elif keysym == "underscore":
self._change_start(self.start + '_')
elif keysym == "period":
self._change_start(self.start + '.')
elif keysym == "minus":
self._change_start(self.start + '-')
else:
# keysym == "BackSpace"
if len(self.start) == 0:
self.hide_window()
return
self._change_start(self.start[:-1])
self.lasttypedstart = self.start
self.listbox.select_clear(0, int(self.listbox.curselection()[0]))
self.listbox.select_set(self._binary_search(self.start))
self._selection_changed()
return "break"
elif keysym == "Return":
self.hide_window()
return
elif (self.mode == COMPLETE_ATTRIBUTES and keysym in
("period", "space", "parenleft", "parenright", "bracketleft",
"bracketright")) or \
(self.mode == COMPLETE_FILES and keysym in
("slash", "backslash", "quotedbl", "apostrophe")) \
and not (state & ~MC_SHIFT):
# If start is a prefix of the selection, but is not '' when
# completing file names, put the whole
# selected completion. Anyway, close the list.
cursel = int(self.listbox.curselection()[0])
if self.completions[cursel][:len(self.start)] == self.start \
and (self.mode == COMPLETE_ATTRIBUTES or self.start):
self._change_start(self.completions[cursel])
self.hide_window()
return
elif keysym in ("Home", "End", "Prior", "Next", "Up", "Down") and \
not state:
# Move the selection in the listbox
self.userwantswindow = True
cursel = int(self.listbox.curselection()[0])
if keysym == "Home":
newsel = 0
elif keysym == "End":
newsel = len(self.completions)-1
elif keysym in ("Prior", "Next"):
jump = self.listbox.nearest(self.listbox.winfo_height()) - \
self.listbox.nearest(0)
if keysym == "Prior":
newsel = max(0, cursel-jump)
else:
assert keysym == "Next"
newsel = min(len(self.completions)-1, cursel+jump)
elif keysym == "Up":
newsel = max(0, cursel-1)
else:
assert keysym == "Down"
newsel = min(len(self.completions)-1, cursel+1)
self.listbox.select_clear(cursel)
self.listbox.select_set(newsel)
self._selection_changed()
self._change_start(self.completions[newsel])
return "break"
elif (keysym == "Tab" and not state):
if self.lastkey_was_tab:
# two tabs in a row; insert current selection and close acw
cursel = int(self.listbox.curselection()[0])
self._change_start(self.completions[cursel])
self.hide_window()
return "break"
else:
# first tab; let AutoComplete handle the completion
self.userwantswindow = True
self.lastkey_was_tab = True
return
elif any(s in keysym for s in ("Shift", "Control", "Alt",
"Meta", "Command", "Option")):
# A modifier key, so ignore
return
elif event.char and event.char >= ' ':
# Regular character with a non-length-1 keycode
self._change_start(self.start + event.char)
self.lasttypedstart = self.start
self.listbox.select_clear(0, int(self.listbox.curselection()[0]))
self.listbox.select_set(self._binary_search(self.start))
self._selection_changed()
return "break"
else:
# Unknown event, close the window and let it through.
self.hide_window()
return
def keyrelease_event(self, event):
if not self.is_active():
return
if self.widget.index("insert") != \
self.widget.index("%s+%dc" % (self.startindex, len(self.start))):
# If we didn't catch an event which moved the insert, close window
self.hide_window()
def is_active(self):
return self.autocompletewindow is not None
def complete(self):
self._change_start(self._complete_string(self.start))
# The selection doesn't change.
def hide_window(self):
if not self.is_active():
return
# unbind events
for seq in HIDE_SEQUENCES:
self.widget.event_delete(HIDE_VIRTUAL_EVENT_NAME, seq)
self.widget.unbind(HIDE_VIRTUAL_EVENT_NAME, self.hideid)
self.hideid = None
for seq in KEYPRESS_SEQUENCES:
self.widget.event_delete(KEYPRESS_VIRTUAL_EVENT_NAME, seq)
self.widget.unbind(KEYPRESS_VIRTUAL_EVENT_NAME, self.keypressid)
self.keypressid = None
self.widget.event_delete(KEYRELEASE_VIRTUAL_EVENT_NAME,
KEYRELEASE_SEQUENCE)
self.widget.unbind(KEYRELEASE_VIRTUAL_EVENT_NAME, self.keyreleaseid)
self.keyreleaseid = None
self.listbox.unbind(LISTUPDATE_SEQUENCE, self.listupdateid)
self.listupdateid = None
self.autocompletewindow.unbind(WINCONFIG_SEQUENCE, self.winconfigid)
self.winconfigid = None
# destroy widgets
self.scrollbar.destroy()
self.scrollbar = None
self.listbox.destroy()
self.listbox = None
self.autocompletewindow.destroy()
self.autocompletewindow = None
| lgpl-3.0 |
SivilTaram/edx-platform | common/djangoapps/embargo/migrations/0006_auto__add_field_restrictedcourse_disable_access_check.py | 94 | 7741 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'RestrictedCourse.disable_access_check'
db.add_column('embargo_restrictedcourse', 'disable_access_check',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'RestrictedCourse.disable_access_check'
db.delete_column('embargo_restrictedcourse', 'disable_access_check')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'embargo.country': {
'Meta': {'ordering': "['country']", 'object_name': 'Country'},
'country': ('django_countries.fields.CountryField', [], {'unique': 'True', 'max_length': '2', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.countryaccessrule': {
'Meta': {'unique_together': "(('restricted_course', 'country'),)", 'object_name': 'CountryAccessRule'},
'country': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['embargo.Country']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'restricted_course': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['embargo.RestrictedCourse']"}),
'rule_type': ('django.db.models.fields.CharField', [], {'default': "'blacklist'", 'max_length': '255'})
},
'embargo.courseaccessrulehistory': {
'Meta': {'object_name': 'CourseAccessRuleHistory'},
'course_key': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'snapshot': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'})
},
'embargo.embargoedcourse': {
'Meta': {'object_name': 'EmbargoedCourse'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'embargoed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.embargoedstate': {
'Meta': {'object_name': 'EmbargoedState'},
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'embargoed_countries': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'embargo.ipfilter': {
'Meta': {'object_name': 'IPFilter'},
'blacklist': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'change_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'whitelist': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'embargo.restrictedcourse': {
'Meta': {'object_name': 'RestrictedCourse'},
'access_msg_key': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '255'}),
'course_key': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'disable_access_check': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'enroll_msg_key': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
}
}
complete_apps = ['embargo'] | agpl-3.0 |
Alexey-T/CudaText | app/cudatext.app/Contents/Resources/py/cuda_comments/cd_comments.py | 2 | 17881 | ''' Plugin for CudaText editor
Authors:
Andrey Kvichansky (kvichans on github.com)
Alexey Torgashin (CudaText)
Version:
'0.8.9 2021-04-05'
'''
import os
import cudatext as app
from cudatext import ed
import cudatext_cmd as cmds
import cudax_lib as apx
from .cd_plug_lib import *
# I18N
_ = get_translation(__file__)
pass; LOG = (-1==-1) # Do or dont logging.
class Command:
def __init__(self):
self.pair4lex = {}
#def __init__
def dlg_config(self):
save_bd_col = apx.get_opt('comment_save_column' , False)
at_min_bd = apx.get_opt('comment_equal_column' , False)
move_down = apx.get_opt('comment_move_down' , True)
save_s = _('(Line commands) Try to keep text position after (un)commenting')
save_h = _('Try to replace only blank(s) to keep text positions:'
'\rUncommented lines:'
'\r····foo1'
'\r····foo2'
'\rCommented lines:'
'\r#···foo1'
'\r···#foo2'
)
vert_s = _('(Line "at non-space") If selected few lines, insert comment at maximal common indent')
vert_h = _('Use max same column to comment:'
'\rUncommented lines:'
'\r··foo1'
'\r····foo2'
'\r······foo3'
'\rCommented lines:'
'\r·#foo1'
'\r·#··foo2'
'\r·#····foo3'
)
down_s = _('(All) Move caret to next line')
aid,vals,chds = dlg_wrapper(_('Config commenting commands'), 610, 110, #NOTE: dlg-cmnt
[dict(cid='save',tp='ch' ,t=5 ,l=5 ,w=600 ,cap=save_s ,hint=save_h) #
,dict(cid='vert',tp='ch' ,t=5+25 ,l=5 ,w=600 ,cap=vert_s ,hint=vert_h) #
,dict(cid='down',tp='ch' ,t=5+50 ,l=5 ,w=600 ,cap=down_s ) #
,dict(cid='!' ,tp='bt' ,t=80 ,l=610-165-5,w=80 ,cap=_('OK'),ex0='1' ) # default
,dict(cid='-' ,tp='bt' ,t=80 ,l=610 -80-5,w=80 ,cap=_('Cancel') )
], dict(save=save_bd_col
,vert=at_min_bd
,down=move_down
), focus_cid='save')
if aid is None or aid=='-': return
if vals['save'] != save_bd_col: apx.set_opt('comment_save_column' , vals['save'])
if vals['vert'] != at_min_bd: apx.set_opt('comment_equal_column' , vals['vert'])
if vals['down'] != move_down: apx.set_opt('comment_move_down' , vals['down'])
#def dlg_config
def cmt_toggle_line_1st(self):
return self._cmt_toggle_line('bgn', '1st')
def cmt_add_line_1st(self):
return self._cmt_toggle_line('add', '1st')
def cmt_toggle_line_body(self):
return self._cmt_toggle_line('bgn', 'bod')
def cmt_add_line_body(self):
return self._cmt_toggle_line('add', 'bod')
def cmt_del_line(self):
return self._cmt_toggle_line('del')
def _cmt_toggle_line(self, cmt_act, cmt_type='', ed_=ed):
''' Add/Remove full line comment
Params
cmt_act 'del' uncomment all lines
'add' comment all lines
'bgn' (un)comment all as toggled first line
cmt_type '1st' at begin of line
'bod' at first not blank
'''
# if not apx._check_API('1.0.108'): return
lex = ed_.get_prop(app.PROP_LEXER_CARET)
if not lex:
return
prop = app.lexer_proc(app.LEXER_GET_PROP, lex)
if not prop:
return
cmt_sgn = prop['c_line']
pass; #log('cmt_type, lex, cmt_sgn={}', (cmt_type, lex, cmt_sgn))
if not cmt_sgn:
return app.msg_status(f(_('Lexer "{}" don\'t support "line comments"'), lex))
# Analize
empty_sel = False
rWrks = []
use_rep_lines = True # use API replace_lines()
y1,y2,lines = (-1, -1, []) if use_rep_lines else (None, None, None)
pass; #LOG and log('ed_.get_sel_mode(),app.SEL_NORMAL,app.SEL_COLUMN={}', (ed_.get_sel_mode(),app.SEL_NORMAL,app.SEL_COLUMN))
crts = ed_.get_carets()
if False:pass
elif ed_.get_sel_mode() == app.SEL_NORMAL:
empty_sel = 1==len(crts) and -1==crts[0][3]
for (cCrt, rCrt ,cEnd, rEnd) in crts:
(rCrtMin
,rCrtMax) = apx.minmax(rCrt, rEnd if -1!=rEnd else rCrt)
if -1!=rEnd and rCrt>rEnd and 0==cCrt:
rCrtMax = rCrtMax-1 # For direct section along left bound
rWrks += list(range(rCrtMin, rCrtMax+1))
use_rep_lines = use_rep_lines and 1==len(crts)
elif ed_.get_sel_mode() == app.SEL_COLUMN:
(cBgn
,rSelBgn
,cEnd
,rSelEnd) = ed_.get_sel_rect()
rWrks = list(range(rSelBgn, rSelEnd+1))
if not rWrks:
rWrks = [crts[0][1]]
pass; #log('rWrks={}', (rWrks))
y1,y2 = (rWrks[0],rWrks[-1]) if use_rep_lines else (y1,y2)
pass; #LOG and log('y1,y2,lines={}', (y1,y2,lines))
do_uncmt = ed_.get_text_line(rWrks[0]).lstrip().startswith(cmt_sgn) \
if cmt_act=='bgn' else \
True \
if cmt_act=='del' else \
False
# Work
save_bd_col = apx.get_opt('comment_save_column' , False)
at_min_bd = apx.get_opt('comment_equal_column', False)
col_min_bd = 1000 # infinity
col_kept = False # plugin applied the "Try to keep text position"
if at_min_bd:
for rWrk in rWrks:
line = ed_.get_text_line(rWrk)
pos_body = line.index(line.lstrip())
pos_body = len(line) if 0==len(line.lstrip()) else pos_body
col_min_bd = min(pos_body, col_min_bd)
if 0==col_min_bd:
break # for rWrk
blnks4cmt = ' '*len(cmt_sgn) # '\t'.expandtabs(len(cmt_sgn))
pass; #log('rWrks,do_uncmt, save_cols, at_min_bd, col_min_bd={}', (rWrks,do_uncmt,save_bd_col,at_min_bd,col_min_bd))
for rWrk in rWrks:
line = ed_.get_text_line(rWrk)
pos_body= line.index(line.lstrip())
pos_body= len(line) if 0==len(line.lstrip()) else pos_body
pass; #LOG and log('rWrk,pos_body,line={}', (rWrk,pos_body,line))
if do_uncmt:
# Uncomment!
if not line[pos_body:].startswith(cmt_sgn):
# Already no comment
if use_rep_lines:
lines += [line]
continue #for rWrk
if False:pass
elif len(line)==len(cmt_sgn): # and line.startswith(cmt_sgn)
line = ''
elif save_bd_col and (' '==line[0] or
' '==line[pos_body+len(cmt_sgn)]):
# Before or after cmt_sgn must be blank
line = line.replace(cmt_sgn, blnks4cmt, 1)
col_kept = True
else:
line = line.replace(cmt_sgn, '' , 1)
else:
# Comment!
if cmt_type=='bod' and line[pos_body:].startswith(cmt_sgn):
# Body comment already sets - willnot double it
if use_rep_lines:
lines += [line]
continue #for rWrk
if False:pass
elif cmt_type=='1st' and save_bd_col and line.startswith(blnks4cmt) :
line = line.replace(blnks4cmt, cmt_sgn, 1)
col_kept = True
#elif cmt_type=='1st' and save_bd_col # !line.startswith(blnks4cmt) :
elif cmt_type=='1st':# !save_bd_col
line = cmt_sgn+line
elif cmt_type=='bod' and save_bd_col and line.startswith(blnks4cmt):
col_kept = True
pos_cmnt = col_min_bd if at_min_bd else pos_body
pass; #LOG and log('pos_cmnt={}', (pos_cmnt))
if pos_cmnt>=len(cmt_sgn):
line = line[:pos_cmnt-len(cmt_sgn)]+cmt_sgn+line[pos_cmnt: ]
else:
line = line[:pos_cmnt ]+cmt_sgn+line[pos_cmnt+len(cmt_sgn):]
#line = line[:pos_cmnt-len(cmt_sgn)]+cmt_sgn+line[pos_cmnt:]
#line = line[:pos_body-len(cmt_sgn)]+cmt_sgn+line[pos_body:]
#elif cmt_type=='bod' and save_bd_col # !line.startswith(blnks4cmt) :
elif cmt_type=='bod':# !save_bd_col
pos_cmnt = col_min_bd if at_min_bd else pos_body
pass; #LOG and log('pos_cmnt={}', (pos_cmnt))
line = line[:pos_cmnt] +cmt_sgn+line[pos_cmnt:]
#line = line[:pos_body] +cmt_sgn+line[pos_body:]
pass; #LOG and log('new line={}', (line))
if use_rep_lines:
lines += [line]
else:
pass; log('line={}',(line))
ed_.set_text_line(rWrk, line)
#for rWrk
if use_rep_lines:
pass; #log('y1, y2, len(lines), lines={}',(y1, y2, len(lines), lines))
if y1==y2:
ed_.set_text_line(y1, lines[0])
else:
ed_.replace_lines(y1, y2, lines)
# move caret down
(cCrt, rCrt, cEnd, rEnd) = crts[0]
move_down = apx.get_opt('comment_move_down', True) and (rCrt+1 < ed_.get_line_count())
if empty_sel and move_down:
apx._move_caret_down(cCrt, rCrt)
# shift caret horizontally if it's on the same line
if not move_down and empty_sel and not col_kept:
dx = len(cmt_sgn)
if do_uncmt:
dx = -dx
cCrt = max(0, cCrt+dx)
ed_.set_caret(cCrt, rCrt)
#def _cmt_toggle_line
def cmt_toggle_stream(self):
''' '''
if ed.get_sel_mode() != app.SEL_NORMAL:
return app.msg_status(f(_('{} works only with normal selection'), _('Commenting')))
lex = ed.get_prop(app.PROP_LEXER_CARET)
((bgn_sgn
,end_sgn)
,bOnlyLn)=self._get_cmt_pair(lex)
if not bgn_sgn:
return app.msg_status(f(_('No stream comment for lexer "{}"'), lex))
crts = ed.get_carets()
pass; #LOG and log('lex, get_carets()={}', (lex, crts))
pass; #LOG and log('(bgn_sgn,end_sgn),bOnlyLn,bUseFLn={}', ((bgn_sgn,end_sgn),bOnlyLn,bUseFLn))
for icrt, (cCrt, rCrt, cEnd, rEnd) in enumerate(crts):
pass; #LOG and log('(cCrt, rCrt), (cEnd, rEnd)={}', ((cCrt, rCrt), (cEnd, rEnd)))
empty_sel = -1==rEnd
bDrtSel = -1==rEnd or (rCrt, cCrt)>(rEnd, cEnd)
bEntireLn = (rEnd>=0) and (cEnd==0) and (cCrt==0)
bEntireLn1 = bEntireLn and abs(rEnd-rCrt)==1
bEntireLn2 = bEntireLn and abs(rEnd-rCrt)>1
if False:pass
elif empty_sel:
# Use full line
line = ed.get_text_line(rCrt)
(cTx1, rTx1), (cTx2, rTx2) = (0, rCrt), (len(line), rCrt)
elif bOnlyLn: # and not empty_sel
# Only full lines
rTx1, rTx2 = apx.minmax(rCrt, rEnd)
line = ed.get_text_line(rTx2)
(cTx1, rTx1), (cTx2, rTx2) = (0, rTx1), (len(line), rTx2)
elif empty_sel: # and not bUseFLn and not bOnlyLn
continue
else:
(rTx1, cTx1), (rTx2, cTx2) = apx.minmax((rCrt, cCrt), (rEnd, cEnd))
selTx = ed.get_text_substr(cTx1, rTx1, cTx2, rTx2)
pass; #LOG and log('(rTx1, cTx1), (rTx2, cTx2), selTx={}', ((rTx1, cTx1), (rTx2, cTx2), repr(selTx)))
do_uncmt= selTx.startswith(bgn_sgn) #and selTx.endswith(end_sgn)
# don't check for ending of selection - for HTML and entire selected line(s)
pass; #LOG and log('do_uncmt={}', (do_uncmt))
if False:pass
elif not do_uncmt and bOnlyLn:
# Comment!
ed.insert(0, rTx2+1, end_sgn+'\n') #! true insert sequence
ed.insert(0, rTx1, bgn_sgn+'\n') #! true insert sequence
(cNSel1, rNSel1
,cNSel2, rNSel2) = 0, rTx1, len(end_sgn), rTx2+2
elif not do_uncmt:
# Comment!
if bEntireLn1:
s = ed.get_text_line(rTx1)
ed.set_text_line(rTx1, bgn_sgn+s+end_sgn)
(cNSel1, rNSel1
,cNSel2, rNSel2) = (0, rTx1, 0, rTx2)
elif bEntireLn2:
ed.insert(0, rTx2, end_sgn+'\n')
ed.insert(0, rTx1, bgn_sgn+'\n')
(cNSel1, rNSel1
,cNSel2, rNSel2) = (0, rTx1, 0, rTx2+2)
else:
ed.insert(cTx2, rTx2, end_sgn) #! true insert sequence
ed.insert(cTx1, rTx1, bgn_sgn) #! true insert sequence
if False:pass
elif rTx1==rTx2:
# sel into one row
(cNSel1, rNSel1
,cNSel2, rNSel2) = cTx1, rTx1, cTx2+len(bgn_sgn)+len(end_sgn), rTx2
elif rTx1!=rTx2:
# sel ends on diff rows
(cNSel1, rNSel1
,cNSel2, rNSel2) = cTx1, rTx1, cTx2 +len(end_sgn), rTx2
elif do_uncmt and bOnlyLn:
# UnComment!
ed.delete(0, rTx2, 0, rTx2+1) #! true delete sequence
ed.delete(0, rTx1, 0, rTx1+1) #! true delete sequence
(cNSel1, rNSel1
,cNSel2, rNSel2) = 0, rTx1, len(ed.get_text_line(rTx2-2)), rTx2-2
elif do_uncmt:
# UnComment!
if selTx.endswith(end_sgn):
ed.delete(cTx2-len(end_sgn), rTx2, cTx2, rTx2) #! true delete sequence
ed.delete(cTx1, rTx1, cTx1+len(bgn_sgn), rTx1) #! true delete sequence
if False:pass
elif rTx1==rTx2:
# sel into one row
(cNSel1, rNSel1
,cNSel2, rNSel2) = cTx1, rTx1, cTx2-len(bgn_sgn)-len(end_sgn), rTx2
elif rTx1!=rTx2:
# sel ends on diff rows
(cNSel1, rNSel1
,cNSel2, rNSel2) = cTx1, rTx1, cTx2 -len(end_sgn), rTx2
elif bEntireLn1:
s = ed.get_text_line(rTx1)
if s.startswith(bgn_sgn):
s = s[len(bgn_sgn):]
if s.endswith(end_sgn):
s = s[:-len(end_sgn)]
ed.set_text_line(rTx1, s)
(cNSel1, rNSel1
,cNSel2, rNSel2) = (0, rTx1, 0, rTx2)
elif bEntireLn2:
ed.delete(0, rTx2-1, 0, rTx2)
ed.delete(0, rTx1, 0, rTx1+1)
(cNSel1, rNSel1
,cNSel2, rNSel2) = (0, rTx1, 0, rTx2-2)
pass; #LOG and log('bDrtSel, (cNSel1, rNSel1), (cNSel2, rNSel2)={}', (bDrtSel, (cNSel1, rNSel1), (cNSel2, rNSel2)))
if bDrtSel:
ed.set_caret(cNSel2, rNSel2, cNSel1, rNSel1, app.CARET_SET_INDEX+icrt)
else:
ed.set_caret(cNSel1, rNSel1, cNSel2, rNSel2, app.CARET_SET_INDEX+icrt)
#for icrt
move_down = apx.get_opt('comment_move_down', True)
if False:pass
elif 1==len(crts) and empty_sel and move_down:
apx._move_caret_down(cCrt, rCrt)
if bOnlyLn and not do_uncmt:
crt=ed.get_carets()[0]; apx._move_caret_down(crt[0], crt[1])
crt=ed.get_carets()[0]; apx._move_caret_down(crt[0], crt[1])
#def cmt_toggle_stream
def _get_cmt_pair(self, lex):
''' Return ((begin_sign, end_sign), only_lines)
begin_sign as '/*'
end_sign as '*/'
only_lines True if each of *_sign must be whole line
'''
if lex not in self.pair4lex:
only_ln = False
prop = app.lexer_proc(app.LEXER_GET_PROP, lex)
pair1 = prop['c_str'] if prop else None
pair2 = prop['c_lined'] if prop else None
if pair1 is not None:
pair = pair1
elif pair2 is not None:
pair = pair2
only_ln = True
else:
pair = ('', '')
self.pair4lex[lex] = (pair, only_ln)
return self.pair4lex[lex]
#def _get_cmt_pair
| mpl-2.0 |
pbougue/navitia | source/jormungandr/jormungandr/interfaces/v1/Calendars.py | 2 | 4757 | # coding=utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
from jormungandr import i_manager
from jormungandr.interfaces.v1.ResourceUri import ResourceUri
from jormungandr.interfaces.parsers import default_count_arg_type
from jormungandr.interfaces.v1.decorators import get_obj_serializer
from jormungandr.interfaces.v1.errors import ManageError
from jormungandr.interfaces.v1.serializer import api
from navitiacommon.parser_args_type import DateTimeFormat, DepthArgument
from datetime import datetime
import six
class Calendars(ResourceUri):
def __init__(self):
ResourceUri.__init__(self, output_type_serializer=api.CalendarsSerializer)
parser_get = self.parsers["get"]
parser_get.add_argument("depth", type=DepthArgument(), default=1, help="The depth of your object")
parser_get.add_argument(
"count", type=default_count_arg_type, default=10, help="Number of calendars per page"
)
parser_get.add_argument("start_page", type=int, default=0, help="The current page")
parser_get.add_argument("start_date", type=six.text_type, default="", help="Start date")
parser_get.add_argument("end_date", type=six.text_type, default="", help="End date")
parser_get.add_argument(
"forbidden_id[]",
type=six.text_type,
deprecated=True,
help="DEPRECATED, replaced by `forbidden_uris[]`",
dest="__temporary_forbidden_id[]",
default=[],
action='append',
schema_metadata={'format': 'pt-object'},
)
parser_get.add_argument(
"forbidden_uris[]",
type=six.text_type,
help="forbidden uris",
dest="forbidden_uris[]",
default=[],
action="append",
schema_metadata={'format': 'pt-object'},
)
parser_get.add_argument(
"distance",
type=int,
default=200,
help="Distance range of the query. Used only if a coord is in the query",
)
parser_get.add_argument(
"_current_datetime",
type=DateTimeFormat(),
schema_metadata={'default': 'now'},
hidden=True,
default=datetime.utcnow(),
help='The datetime considered as "now". Used for debug, default is '
'the moment of the request. It will mainly change the output '
'of the disruptions.',
)
self.collection = 'calendars'
self.get_decorators.insert(0, ManageError())
self.get_decorators.insert(1, get_obj_serializer(self))
def options(self, **kwargs):
return self.api_description(**kwargs)
def get(self, region=None, lon=None, lat=None, uri=None, id=None):
self.region = i_manager.get_region(region, lon, lat)
args = self.parsers["get"].parse_args()
# for retrocompatibility purpose
for forbid_id in args['__temporary_forbidden_id[]']:
args['forbidden_uris[]'].append(forbid_id)
if id:
args["filter"] = "calendar.uri=" + id
elif uri:
# Calendars of line
if uri[-1] == "/":
uri = uri[:-1]
uris = uri.split("/")
args["filter"] = self.get_filter(uris, args)
else:
args["filter"] = ""
self._register_interpreted_parameters(args)
response = i_manager.dispatch(args, "calendars", instance_name=self.region)
return response
| agpl-3.0 |
TeamBliss-Devices/android_kernel_htc_msm8974 | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
WhySoGeeky/DroidPot | venv/lib/python2.7/site-packages/django/contrib/gis/admin/widgets.py | 449 | 4881 | import logging
from django.contrib.gis.gdal import GDALException
from django.contrib.gis.geos import GEOSException, GEOSGeometry
from django.forms.widgets import Textarea
from django.template import loader
from django.utils import six, translation
# Creating a template context that contains Django settings
# values needed by admin map templates.
geo_context = {'LANGUAGE_BIDI': translation.get_language_bidi()}
logger = logging.getLogger('django.contrib.gis')
class OpenLayersWidget(Textarea):
"""
Renders an OpenLayers map using the WKT of the geometry.
"""
def render(self, name, value, attrs=None):
# Update the template parameters with any attributes passed in.
if attrs:
self.params.update(attrs)
self.params['editable'] = self.params['modifiable']
else:
self.params['editable'] = True
# Defaulting the WKT value to a blank string -- this
# will be tested in the JavaScript and the appropriate
# interface will be constructed.
self.params['wkt'] = ''
# If a string reaches here (via a validation error on another
# field) then just reconstruct the Geometry.
if isinstance(value, six.string_types):
try:
value = GEOSGeometry(value)
except (GEOSException, ValueError) as err:
logger.error(
"Error creating geometry from value '%s' (%s)" % (
value, err)
)
value = None
if (value and value.geom_type.upper() != self.geom_type and
self.geom_type != 'GEOMETRY'):
value = None
# Constructing the dictionary of the map options.
self.params['map_options'] = self.map_options()
# Constructing the JavaScript module name using the name of
# the GeometryField (passed in via the `attrs` keyword).
# Use the 'name' attr for the field name (rather than 'field')
self.params['name'] = name
# note: we must switch out dashes for underscores since js
# functions are created using the module variable
js_safe_name = self.params['name'].replace('-', '_')
self.params['module'] = 'geodjango_%s' % js_safe_name
if value:
# Transforming the geometry to the projection used on the
# OpenLayers map.
srid = self.params['srid']
if value.srid != srid:
try:
ogr = value.ogr
ogr.transform(srid)
wkt = ogr.wkt
except GDALException as err:
logger.error(
"Error transforming geometry from srid '%s' to srid '%s' (%s)" % (
value.srid, srid, err)
)
wkt = ''
else:
wkt = value.wkt
# Setting the parameter WKT with that of the transformed
# geometry.
self.params['wkt'] = wkt
self.params.update(geo_context)
return loader.render_to_string(self.template, self.params)
def map_options(self):
"Builds the map options hash for the OpenLayers template."
# JavaScript construction utilities for the Bounds and Projection.
def ol_bounds(extent):
return 'new OpenLayers.Bounds(%s)' % str(extent)
def ol_projection(srid):
return 'new OpenLayers.Projection("EPSG:%s")' % srid
# An array of the parameter name, the name of their OpenLayers
# counterpart, and the type of variable they are.
map_types = [('srid', 'projection', 'srid'),
('display_srid', 'displayProjection', 'srid'),
('units', 'units', str),
('max_resolution', 'maxResolution', float),
('max_extent', 'maxExtent', 'bounds'),
('num_zoom', 'numZoomLevels', int),
('max_zoom', 'maxZoomLevels', int),
('min_zoom', 'minZoomLevel', int),
]
# Building the map options hash.
map_options = {}
for param_name, js_name, option_type in map_types:
if self.params.get(param_name, False):
if option_type == 'srid':
value = ol_projection(self.params[param_name])
elif option_type == 'bounds':
value = ol_bounds(self.params[param_name])
elif option_type in (float, int):
value = self.params[param_name]
elif option_type in (str,):
value = '"%s"' % self.params[param_name]
else:
raise TypeError
map_options[js_name] = value
return map_options
| mit |
ManageIQ/integration_tests | cfme/containers/project.py | 3 | 5378 | import attr
from navmazing import NavigateToAttribute
from navmazing import NavigateToSibling
from cfme.common import Taggable
from cfme.common import TaggableCollection
from cfme.common import TagPageView
from cfme.containers.provider import ContainerObjectAllBaseView
from cfme.containers.provider import ContainerObjectDetailsBaseView
from cfme.containers.provider import GetRandomInstancesMixin
from cfme.containers.provider import Labelable
from cfme.modeling.base import BaseCollection
from cfme.modeling.base import BaseEntity
from cfme.utils.appliance.implementations.ui import CFMENavigateStep
from cfme.utils.appliance.implementations.ui import navigator
from cfme.utils.providers import get_crud_by_name
class ProjectAllView(ContainerObjectAllBaseView):
"""Container Projects All view"""
SUMMARY_TEXT = 'Container Projects'
class ProjectDetailsView(ContainerObjectDetailsBaseView):
"""Container Projects Detail view"""
SUMMARY_TEXT = 'Container Projects'
class ProjectDashboardView(ProjectDetailsView):
@property
def is_displayed(self):
return(
self.breadcrumb.is_displayed and
'{} (Dashboard)'.format(self.context['object'].name) in self.breadcrumb.active_location)
@attr.s
class Project(BaseEntity, Taggable, Labelable):
PLURAL = 'Projects'
all_view = ProjectAllView
details_view = ProjectDetailsView
name = attr.ib()
provider = attr.ib()
@attr.s
class ProjectCollection(GetRandomInstancesMixin, BaseCollection, TaggableCollection):
"""Collection object for :py:class:`Project`."""
ENTITY = Project
def all(self):
# container_projects table has ems_id, join with ext_mgmgt_systems on id for provider name
# TODO Update to use REST API instead of DB queries
project_table = self.appliance.db.client['container_projects']
ems_table = self.appliance.db.client['ext_management_systems']
project_query = (
self.appliance.db.client.session
.query(project_table.name, ems_table.name)
.join(ems_table, project_table.ems_id == ems_table.id))
if self.filters.get('archived'):
project_query = project_query.filter(project_table.deleted_on.isnot(None))
if self.filters.get('active'):
project_query = project_query.filter(project_table.deleted_on.is_(None))
provider = None
# filtered
if self.filters.get('provider'):
provider = self.filters.get('provider')
project_query = project_query.filter(ems_table.name == provider.name)
projects = []
for name, ems_name in project_query.all():
projects.append(self.instantiate(name=name,
provider=provider or get_crud_by_name(ems_name)))
return projects
@navigator.register(ProjectCollection, 'All')
class All(CFMENavigateStep):
prerequisite = NavigateToAttribute('appliance.server', 'LoggedIn')
VIEW = ProjectAllView
def step(self, *args, **kwargs):
self.prerequisite_view.navigation.select('Compute', 'Containers', 'Projects')
def resetter(self, *args, **kwargs):
# Reset view and selection
if self.view.toolbar.view_selector.is_displayed:
self.view.toolbar.view_selector.select("List View")
if self.view.paginator.is_displayed:
self.view.paginator.reset_selection()
@navigator.register(Project, 'Details')
class Details(CFMENavigateStep):
VIEW = ProjectDetailsView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self, *args, **kwargs):
search_visible = self.prerequisite_view.entities.search.is_displayed
self.prerequisite_view.entities.get_entity(name=self.obj.name,
surf_pages=not search_visible,
use_search=search_visible).click()
def resetter(self, *args, **kwargs):
if self.view.toolbar.view_selector.is_displayed:
self.view.toolbar.view_selector.select("Summary View")
@navigator.register(Project, 'Dashboard')
class Dashboard(CFMENavigateStep):
VIEW = ProjectDashboardView
prerequisite = NavigateToAttribute('parent', 'All')
def step(self, *args, **kwargs):
search_visible = self.prerequisite_view.entities.search.is_displayed
self.prerequisite_view.entities.get_entity(name=self.obj.name,
surf_pages=not search_visible,
use_search=search_visible).click()
def resetter(self, *args, **kwargs):
if self.view.toolbar.view_selector.is_displayed:
self.view.toolbar.view_selector.select("Dashboard View")
@navigator.register(Project, 'EditTagsFromDetails')
class EditTagsFromDetails(CFMENavigateStep):
VIEW = TagPageView
prerequisite = NavigateToSibling('Details')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.policy.item_select('Edit Tags')
@navigator.register(Project, 'EditTagsFromDashboard')
class EditTagsFromDashboard(CFMENavigateStep):
VIEW = TagPageView
prerequisite = NavigateToSibling('Dashboard')
def step(self, *args, **kwargs):
self.prerequisite_view.toolbar.policy.item_select('Edit Tags')
| gpl-2.0 |
campbe13/openhatch | vendor/packages/whoosh/src/whoosh/filedb/filestore.py | 17 | 6771 | # Copyright 2009 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
import os
from threading import Lock
from whoosh.compat import BytesIO
from whoosh.index import _DEF_INDEX_NAME
from whoosh.store import Storage
from whoosh.support.filelock import FileLock
from whoosh.filedb.structfile import StructFile
class ReadOnlyError(Exception):
pass
class FileStorage(Storage):
"""Storage object that stores the index as files in a directory on disk.
"""
def __init__(self, path, mapped=True, readonly=False):
self.folder = path
self.mapped = mapped
self.readonly = readonly
self.locks = {}
if not os.path.exists(path):
raise IOError("Directory %s does not exist" % path)
def create_index(self, schema, indexname=_DEF_INDEX_NAME):
if self.readonly:
raise ReadOnlyError
from whoosh.filedb.fileindex import _create_index, FileIndex
_create_index(self, schema, indexname)
return FileIndex(self, schema, indexname)
def open_index(self, indexname=_DEF_INDEX_NAME, schema=None):
from whoosh.filedb.fileindex import FileIndex
return FileIndex(self, schema=schema, indexname=indexname)
def create_file(self, name, excl=False, mode="wb", **kwargs):
if self.readonly:
raise ReadOnlyError
path = self._fpath(name)
if excl:
flags = os.O_CREAT | os.O_EXCL | os.O_RDWR
if hasattr(os, "O_BINARY"):
flags |= os.O_BINARY
fd = os.open(path, flags)
fileobj = os.fdopen(fd, mode)
else:
fileobj = open(path, mode)
f = StructFile(fileobj, name=name, mapped=self.mapped, **kwargs)
return f
def open_file(self, name, *args, **kwargs):
try:
f = StructFile(open(self._fpath(name), "rb"), name=name, *args,
**kwargs)
except IOError:
#print("Tried to open %r, files=%r" % (name, self.list()))
raise
return f
def _fpath(self, fname):
return os.path.join(self.folder, fname)
def clean(self):
path = self.folder
if not os.path.exists(path):
os.mkdir(path)
files = self.list()
for file in files:
os.remove(os.path.join(path, file))
def list(self):
try:
files = os.listdir(self.folder)
except IOError:
files = []
return files
def file_exists(self, name):
return os.path.exists(self._fpath(name))
def file_modified(self, name):
return os.path.getmtime(self._fpath(name))
def file_length(self, name):
return os.path.getsize(self._fpath(name))
def delete_file(self, name):
os.remove(self._fpath(name))
def rename_file(self, frm, to, safe=False):
if os.path.exists(self._fpath(to)):
if safe:
raise NameError("File %r exists" % to)
else:
os.remove(self._fpath(to))
os.rename(self._fpath(frm), self._fpath(to))
def lock(self, name):
return FileLock(self._fpath(name))
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self.folder))
class RamStorage(FileStorage):
"""Storage object that keeps the index in memory.
"""
def __init__(self):
self.files = {}
self.locks = {}
self.folder = ''
def list(self):
return list(self.files.keys())
def clean(self):
self.files = {}
def total_size(self):
return sum(self.file_length(f) for f in self.list())
def file_exists(self, name):
return name in self.files
def file_length(self, name):
if name not in self.files:
raise NameError
return len(self.files[name])
def delete_file(self, name):
if name not in self.files:
raise NameError
del self.files[name]
def rename_file(self, name, newname, safe=False):
if name not in self.files:
raise NameError("File %r does not exist" % name)
if safe and newname in self.files:
raise NameError("File %r exists" % newname)
content = self.files[name]
del self.files[name]
self.files[newname] = content
def create_file(self, name, **kwargs):
def onclose_fn(sfile):
self.files[name] = sfile.file.getvalue()
f = StructFile(BytesIO(), name=name, onclose=onclose_fn)
return f
def open_file(self, name, *args, **kwargs):
if name not in self.files:
raise NameError("No such file %r" % name)
return StructFile(BytesIO(self.files[name]), name=name, *args,
**kwargs)
def lock(self, name):
if name not in self.locks:
self.locks[name] = Lock()
return self.locks[name]
def copy_to_ram(storage):
"""Copies the given FileStorage object into a new RamStorage object.
:rtype: :class:`RamStorage`
"""
import shutil
ram = RamStorage()
for name in storage.list():
f = storage.open_file(name)
r = ram.create_file(name)
shutil.copyfileobj(f.file, r.file)
f.close()
r.close()
return ram
| agpl-3.0 |
bigswitch/neutron | neutron/tests/fullstack/test_qos.py | 1 | 7375 | # Copyright 2015 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants
from oslo_utils import uuidutils
import testscenarios
from neutron.agent.common import ovs_lib
from neutron.agent.linux import bridge_lib
from neutron.agent.linux import tc_lib
from neutron.agent.linux import utils
from neutron.services.qos import qos_consts
from neutron.tests.fullstack import base
from neutron.tests.fullstack.resources import environment
from neutron.tests.fullstack.resources import machine
from neutron.plugins.ml2.drivers.linuxbridge.agent.common import \
config as linuxbridge_agent_config
from neutron.plugins.ml2.drivers.linuxbridge.agent import \
linuxbridge_neutron_agent as linuxbridge_agent
from neutron.plugins.ml2.drivers.openvswitch.mech_driver import \
mech_openvswitch as mech_ovs
load_tests = testscenarios.load_tests_apply_scenarios
BANDWIDTH_LIMIT = 500
BANDWIDTH_BURST = 100
def _wait_for_rule_applied_ovs_agent(vm, limit, burst):
utils.wait_until_true(
lambda: vm.bridge.get_egress_bw_limit_for_port(
vm.port.name) == (limit, burst))
def _wait_for_rule_applied_linuxbridge_agent(vm, limit, burst):
port_name = linuxbridge_agent.LinuxBridgeManager.get_tap_device_name(
vm.neutron_port['id'])
tc = tc_lib.TcCommand(
port_name,
linuxbridge_agent_config.DEFAULT_KERNEL_HZ_VALUE,
namespace=vm.host.host_namespace
)
utils.wait_until_true(
lambda: tc.get_filters_bw_limits() == (limit, burst))
def _wait_for_rule_applied(vm, limit, burst):
if isinstance(vm.bridge, ovs_lib.OVSBridge):
_wait_for_rule_applied_ovs_agent(vm, limit, burst)
if isinstance(vm.bridge, bridge_lib.BridgeDevice):
_wait_for_rule_applied_linuxbridge_agent(vm, limit, burst)
def _wait_for_rule_removed(vm):
# No values are provided when port doesn't have qos policy
_wait_for_rule_applied(vm, None, None)
class TestQoSWithL2Agent(base.BaseFullStackTestCase):
scenarios = [
("ovs", {'l2_agent_type': constants.AGENT_TYPE_OVS}),
("linuxbridge", {'l2_agent_type': constants.AGENT_TYPE_LINUXBRIDGE})
]
def setUp(self):
host_desc = [environment.HostDescription(
l3_agent=False,
l2_agent_type=self.l2_agent_type)]
env_desc = environment.EnvironmentDescription(qos=True)
env = environment.Environment(env_desc, host_desc)
super(TestQoSWithL2Agent, self).setUp(env)
def _create_qos_policy(self):
return self.safe_client.create_qos_policy(
self.tenant_id, 'fs_policy', 'Fullstack testing policy',
shared='False')
def _prepare_vm_with_qos_policy(self, limit, burst):
qos_policy = self._create_qos_policy()
qos_policy_id = qos_policy['id']
rule = self.safe_client.create_bandwidth_limit_rule(
self.tenant_id, qos_policy_id, limit, burst)
# Make it consistent with GET reply
qos_policy['rules'].append(rule)
rule['type'] = qos_consts.RULE_TYPE_BANDWIDTH_LIMIT
rule['qos_policy_id'] = qos_policy_id
port = self.safe_client.create_port(
self.tenant_id, self.network['id'],
self.environment.hosts[0].hostname,
qos_policy_id)
vm = self.useFixture(
machine.FakeFullstackMachine(
self.environment.hosts[0],
self.network['id'],
self.tenant_id,
self.safe_client,
neutron_port=port))
return vm, qos_policy
def test_qos_policy_rule_lifecycle(self):
new_limit = BANDWIDTH_LIMIT + 100
self.tenant_id = uuidutils.generate_uuid()
self.network = self.safe_client.create_network(self.tenant_id,
'network-test')
self.subnet = self.safe_client.create_subnet(
self.tenant_id, self.network['id'],
cidr='10.0.0.0/24',
gateway_ip='10.0.0.1',
name='subnet-test',
enable_dhcp=False)
# Create port with qos policy attached
vm, qos_policy = self._prepare_vm_with_qos_policy(BANDWIDTH_LIMIT,
BANDWIDTH_BURST)
_wait_for_rule_applied(vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST)
qos_policy_id = qos_policy['id']
rule = qos_policy['rules'][0]
# Remove rule from qos policy
self.client.delete_bandwidth_limit_rule(rule['id'], qos_policy_id)
_wait_for_rule_removed(vm)
# Create new rule with no given burst value, in such case ovs and lb
# agent should apply burst value as
# bandwidth_limit * qos_consts.DEFAULT_BURST_RATE
new_expected_burst = int(
new_limit * qos_consts.DEFAULT_BURST_RATE
)
new_rule = self.safe_client.create_bandwidth_limit_rule(
self.tenant_id, qos_policy_id, new_limit)
_wait_for_rule_applied(vm, new_limit, new_expected_burst)
# Update qos policy rule id
self.client.update_bandwidth_limit_rule(
new_rule['id'], qos_policy_id,
body={'bandwidth_limit_rule': {'max_kbps': BANDWIDTH_LIMIT,
'max_burst_kbps': BANDWIDTH_BURST}})
_wait_for_rule_applied(vm, BANDWIDTH_LIMIT, BANDWIDTH_BURST)
# Remove qos policy from port
self.client.update_port(
vm.neutron_port['id'],
body={'port': {'qos_policy_id': None}})
_wait_for_rule_removed(vm)
class TestQoSWithL2Population(base.BaseFullStackTestCase):
def setUp(self):
# We limit this test to using the openvswitch mech driver, because DSCP
# is presently not implemented for Linux Bridge. The 'rule_types' API
# call only returns rule types that are supported by all configured
# mech drivers. So in a fullstack scenario, where both the OVS and the
# Linux Bridge mech drivers are configured, the DSCP rule type will be
# unavailable since it is not implemented in Linux Bridge.
mech_driver = 'openvswitch'
host_desc = [] # No need to register agents for this test case
env_desc = environment.EnvironmentDescription(qos=True, l2_pop=True,
mech_drivers=mech_driver)
env = environment.Environment(env_desc, host_desc)
super(TestQoSWithL2Population, self).setUp(env)
def test_supported_qos_rule_types(self):
res = self.client.list_qos_rule_types()
rule_types = {t['type'] for t in res['rule_types']}
expected_rules = (
set(mech_ovs.OpenvswitchMechanismDriver.supported_qos_rule_types))
self.assertEqual(expected_rules, rule_types)
| apache-2.0 |
sharad/calibre | src/calibre/utils/logging.py | 2 | 5679 | from __future__ import with_statement
__license__ = 'GPL 3'
__copyright__ = '2009, Kovid Goyal <[email protected]>'
__docformat__ = 'restructuredtext en'
'A simplified logging system'
DEBUG = 0
INFO = 1
WARN = 2
ERROR = 3
import sys, traceback, cStringIO
from functools import partial
from threading import RLock
from calibre import isbytestring, force_unicode, as_unicode
class Stream(object):
def __init__(self, stream=None):
from calibre import prints
self._prints = partial(prints, safe_encode=True)
if stream is None:
stream = cStringIO.StringIO()
self.stream = stream
def flush(self):
self.stream.flush()
class ANSIStream(Stream):
def __init__(self, stream=sys.stdout):
Stream.__init__(self, stream)
self.color = {
DEBUG: u'green',
INFO: None,
WARN: u'yellow',
ERROR: u'red',
}
def prints(self, level, *args, **kwargs):
from calibre.utils.terminal import ColoredStream
with ColoredStream(self.stream, self.color[level]):
self._prints(*args, **kwargs)
def flush(self):
self.stream.flush()
class FileStream(Stream):
def __init__(self, stream=None):
Stream.__init__(self, stream)
def prints(self, level, *args, **kwargs):
kwargs['file'] = self.stream
self._prints(*args, **kwargs)
class HTMLStream(Stream):
color = {
DEBUG: '<span style="color:green">',
INFO:'<span>',
WARN: '<span style="color:blue">',
ERROR: '<span style="color:red">'
}
normal = '</span>'
def __init__(self, stream=sys.stdout):
Stream.__init__(self, stream)
def prints(self, level, *args, **kwargs):
self.stream.write(self.color[level])
kwargs['file'] = self.stream
self._prints(*args, **kwargs)
self.stream.write(self.normal)
def flush(self):
self.stream.flush()
class UnicodeHTMLStream(HTMLStream):
def __init__(self):
self.clear()
def flush(self):
pass
def prints(self, level, *args, **kwargs):
col = self.color[level]
if col != self.last_col:
if self.data:
self.data.append(self.normal)
self.data.append(col)
self.last_col = col
sep = kwargs.get(u'sep', u' ')
end = kwargs.get(u'end', u'\n')
for arg in args:
if isbytestring(arg):
arg = force_unicode(arg)
elif not isinstance(arg, unicode):
arg = as_unicode(arg)
self.data.append(arg+sep)
self.plain_text.append(arg+sep)
self.data.append(end)
self.plain_text.append(end)
def clear(self):
self.data = []
self.plain_text = []
self.last_col = self.color[INFO]
@property
def html(self):
end = self.normal if self.data else u''
return u''.join(self.data) + end
def dump(self):
return [self.data, self.plain_text, self.last_col]
def load(self, dump):
self.data, self.plain_text, self.last_col = dump
def append_dump(self, dump):
d, p, lc = dump
self.data.extend(d)
self.plain_text.extend(p)
self.last_col = lc
class Log(object):
DEBUG = DEBUG
INFO = INFO
WARN = WARN
ERROR = ERROR
def __init__(self, level=INFO):
self.filter_level = level
default_output = ANSIStream()
self.outputs = [default_output]
self.debug = partial(self.prints, DEBUG)
self.info = partial(self.prints, INFO)
self.warn = self.warning = partial(self.prints, WARN)
self.error = partial(self.prints, ERROR)
def prints(self, level, *args, **kwargs):
if level < self.filter_level:
return
for output in self.outputs:
output.prints(level, *args, **kwargs)
def exception(self, *args, **kwargs):
limit = kwargs.pop('limit', None)
self.prints(ERROR, *args, **kwargs)
self.prints(DEBUG, traceback.format_exc(limit))
def __call__(self, *args, **kwargs):
self.prints(INFO, *args, **kwargs)
class DevNull(Log):
def __init__(self):
Log.__init__(self, level=Log.ERROR)
self.outputs = []
class ThreadSafeLog(Log):
def __init__(self, level=Log.INFO):
Log.__init__(self, level=level)
self._lock = RLock()
def prints(self, *args, **kwargs):
with self._lock:
Log.prints(self, *args, **kwargs)
class ThreadSafeWrapper(Log):
def __init__(self, other_log):
Log.__init__(self, level=other_log.filter_level)
self.outputs = list(other_log.outputs)
self._lock = RLock()
def prints(self, *args, **kwargs):
with self._lock:
Log.prints(self, *args, **kwargs)
class GUILog(ThreadSafeLog):
'''
Logs in HTML and plain text as unicode. Ideal for display in a GUI context.
'''
def __init__(self):
ThreadSafeLog.__init__(self, level=self.DEBUG)
self.outputs = [UnicodeHTMLStream()]
def clear(self):
self.outputs[0].clear()
@property
def html(self):
return self.outputs[0].html
@property
def plain_text(self):
return u''.join(self.outputs[0].plain_text)
def dump(self):
return self.outputs[0].dump()
def load(self, dump):
return self.outputs[0].load(dump)
def append_dump(self, dump):
return self.outputs[0].append_dump(dump)
default_log = Log()
| gpl-3.0 |
factorlibre/openerp-server-6.1 | openerp/test/__init__.py | 14 | 1099 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 OpenERP S.A. http://www.openerp.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from test_osv import *
from test_translate import *
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mosbasik/buzhug | javasrc/lib/Jython/Lib/test/test_jsr223.py | 23 | 2151 | # XXX Earlier version of this test also tested put, get, eval on the
# engine, however this introduced action at a distance where aspects
# of the sys state changed (notably sys.stdin.newlines), which then
# impacted test_univnewlines later in the regrtest.
#
# For now, there may be limits in how much we can test Jython from
# itself, no matter how attractive from an ouroboros perspective that
# may be :). Certainly worth revisiting in 2.6.
import unittest
import sys
from test import test_support
from javax.script import ScriptEngine, ScriptEngineManager
class JSR223TestCase(unittest.TestCase):
def test_factory(self):
engine = ScriptEngineManager().getEngineByName("python")
f = engine.factory
language_version = ".".join(str(comp) for comp in sys.version_info[0:2]) # such as "2.5"
impl_version = ".".join(str(comp) for comp in sys.version_info[0:3]) # such as "2.5.2"
self.assertNotEqual(f.scriptEngine, engine) # we don't pool engines
self.assertEqual(f.engineName, "jython")
self.assertEqual(f.engineVersion, impl_version)
self.assertEqual(set(f.extensions), set(['py']))
self.assertEqual(f.languageName, "python")
self.assertEqual(f.languageVersion, language_version)
self.assertEqual(set(f.names), set(["python", "jython"]))
self.assertEqual(set(f.mimeTypes), set(["text/python", "application/python", "text/x-python", "application/x-python"]))
# variants
self.assertEqual(f.getParameter(ScriptEngine.ENGINE), "jython")
self.assertEqual(f.getParameter(ScriptEngine.ENGINE_VERSION), impl_version)
self.assertEqual(f.getParameter(ScriptEngine.NAME), "jython")
self.assertEqual(f.getParameter(ScriptEngine.LANGUAGE), "python")
self.assertEqual(f.getParameter(ScriptEngine.LANGUAGE_VERSION), language_version)
self.assertEqual(f.getOutputStatement("abc"), "print u'abc'")
self.assertEqual(f.getProgram("x = 42", "y = 'abc'"), "x = 42\ny = 'abc'\n")
def test_main():
test_support.run_unittest(
JSR223TestCase)
if __name__ == "__main__":
test_main()
| bsd-3-clause |
joshuamorton/kermit | Course.py | 1 | 5083 | """
:Author: Joshua Morton
"""
class And(object):
"""
represents a set of prerequisites that must be taken together
"""
def __init__(self, *components):
"""
initializes the And object
self - the And
components: List[Union[Course, Or]] - the set of prerequisites that
must each be taken
"""
self.courses = frozenset(components)
def __eq__(self, other):
return self.courses == other.courses
def __hash__(self):
return hash(self.courses)
def __iter__(self):
return self.courses.__iter__()
def __bool__(self):
return bool(len(self.courses))
class Or(object):
all_ors = dict()
def __init__(self, *components):
"""
initializes an Or object representing a set of courses of which only
one must be taken
self - the Or object
name:str - the internal name for the object, should be unique
components:List[Union[And, Course]] - the courses that combine to make
up the Or, for example the probability and statistics requirement
can be fulfilled using MATH3670, ISYE 3770, ISYE 2027 AND ISYE
202uU8 or various other options
In addition to setting the name and components, it constructs a course
object for use when rendering the graph of courses
"""
newcomponents = self._clean_components(components)
self.courses = frozenset(newcomponents)
if self.courses not in Or.all_ors:
Or.all_ors[self.courses] = Course(
" | ".join(c.name for c in sorted(self.courses,
key=lambda x: x.name)), 0,
prerequisites=newcomponents, description="OR")
self.course = Or.all_ors[self.courses]
self.course.height -= 1
# the course created doesn't actually contribute to the time it takes
# to complete later course
@staticmethod
def _clean_components(components):
"""
In the uncommon but possible case where a class has an And of
prerequisites, but one of those prerequisites is an Or, and that Or
itself contains another And, there's special proceessing that needs
to be done. Namely, for rendering as a graph on screen, we need to
create a virtual course named "AND" that is a child of the "OR", so
that the graph is human-parsable.
"""
newcomponents = []
for course in components:
if isinstance(course, And):
newcomponents.append(Course("("+" & ".join(c.name for c in
course.courses)
+ ")", 0,
prerequisites=course.courses,
description="AND"))
newcomponents[-1].height -= 1
else:
newcomponents.append(course)
return newcomponents
def __iter__(self):
return self.courses.__iter__()
def __getattr__(self, key):
try:
return object.__getattribute__(self, key)
except AttributeError:
return self.course.__getattribute__(key)
def __hash__(self):
return hash(self.course)
def __repr__(self):
return self.course.__repr__()
def __eq__(self, other):
return self.course == other.course
class Course(object):
"""
Represents a course
name - the name of the course, this must be unique
hours - number of credit hours
prerequisites - a set of prerequisites, must be an And object or None by
default
corequisites - a set of corequisite courses, must be an And object made up
of only or objects, or None by default
description - a short string, either the course name, or "AND" or "OR", for
rendering the graph on screen
"""
all_courses = dict()
def __init__(self, name, hours, prerequisites=None, corequisites=None,
description=None):
"""
"""
self.name = name
self.prerequisites = prerequisites or And()
self.corequisites = corequisites or And()
for coreq in self.corequisites:
coreq.corequisites = And(self, *[co for co in coreq.corequisites])
self.height = None # chain of prerequisites
self.hours = hours # class hours
if self.prerequisites:
self.height = max(req.height for req in self.prerequisites) + 1
else:
self.height = 0
if description is None:
self.description = self.name
else:
self.description = description
Course.all_courses[name] = self
def __str__(self):
return self.name + " (" + self.description + ")"
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.name == other.name
def __hash__(self):
return hash(self.name)
| mit |
alsrgv/tensorflow | tensorflow/python/ops/clip_ops_test.py | 19 | 3810 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Clip Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import numerics
from tensorflow.python.platform import test
class ClipOpsTest(test.TestCase):
def __init__(self, method_name="runTest"):
super(ClipOpsTest, self).__init__(method_name)
def _testClipTensorByNorm(self, inputs, max_norm, expected):
with self.cached_session() as sess:
input_op = constant_op.constant(inputs)
clipped = clip_ops.clip_by_norm(input_op, max_norm)
check_op = numerics.add_check_numerics_ops()
result, _ = self.evaluate([clipped, check_op])
self.assertAllClose(result, expected)
def _testClipIndexedSlicesByNorm(self, values, indices, shape, max_norm,
axes):
with self.cached_session() as sess:
values = constant_op.constant(values)
indices = constant_op.constant(indices)
shape = constant_op.constant(shape)
# IndexedSlices mode
indixed_slices = ops.IndexedSlices(values, indices, shape)
clipped = clip_ops.clip_by_norm(indixed_slices, max_norm, axes)
# clipped should be IndexedSlices
self.assertIsInstance(clipped, ops.IndexedSlices)
clipped = ops.convert_to_tensor(clipped)
# Tensor mode
dense_tensor = ops.convert_to_tensor(indixed_slices)
dense_clipped = clip_ops.clip_by_norm(dense_tensor, max_norm, axes)
result, expected = self.evaluate([clipped, dense_clipped])
self.assertAllClose(result, expected)
@test_util.run_deprecated_v1
def testClipTensorByNorm(self):
# Simple example
self._testClipTensorByNorm([[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]], 4.0,
[[-2.4, 0.0, 0.0], [3.2, 0.0, 0.0]])
# Zero norm
self._testClipTensorByNorm([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], 4.0,
[[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
def testClipIndexedSlicesByNorm(self):
values = [[[-3.0, 0.0, 0.0], [4.0, 0.0, 0.0]],
[[0.0, 2.0, 0.0], [0.0, 0.0, -1.0]]]
indices = [2, 6]
shape = [10, 2, 3]
# Axes == None
self._testClipIndexedSlicesByNorm(values, indices, shape, 4.0, None)
# Axes == 0
self._testClipIndexedSlicesByNorm(values, indices, shape, 4.0, 0)
# Axes == 1
self._testClipIndexedSlicesByNorm(values, indices, shape, 4.0, 1)
# Axes == 2
self._testClipIndexedSlicesByNorm(values, indices, shape, 4.0, 1)
# Axes == [0, 1]
self._testClipIndexedSlicesByNorm(values, indices, shape, 4.0, [0, 1])
# Axes == [0, 1]
self._testClipIndexedSlicesByNorm(values, indices, shape, 4.0, [0, 2])
# Axes == [0, 1]
self._testClipIndexedSlicesByNorm(values, indices, shape, 4.0, [1, 2])
# Axes == [0, 1]
self._testClipIndexedSlicesByNorm(values, indices, shape, 4.0, [0, 1, 2])
if __name__ == "__main__":
test.main()
| apache-2.0 |
raildo/nova | nova/tests/functional/api_sample_tests/test_user_data.py | 16 | 1802 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
from oslo_config import cfg
from nova.tests.functional.api_sample_tests import api_sample_base
from nova.tests.unit.image import fake
CONF = cfg.CONF
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class UserDataJsonTest(api_sample_base.ApiSampleTestBaseV21):
extension_name = "os-user-data"
def _get_flags(self):
f = super(UserDataJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.user_data.User_data')
return f
def test_user_data_post(self):
user_data_contents = '#!/bin/bash\n/bin/su\necho "I am in you!"\n'
user_data = base64.b64encode(user_data_contents)
subs = {
'image_id': fake.get_valid_image_id(),
'host': self._get_host(),
'user_data': user_data
}
response = self._do_post('servers', 'userdata-post-req', subs)
subs.update(self._get_regexes())
self._verify_response('userdata-post-resp', subs, response, 202)
| apache-2.0 |
races1986/SafeLanguage | CEM/wiktionary/meaningtest.py | 3 | 2688 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Unit tests for meaning.py"""
import meaning
import unittest
class KnownValues(unittest.TestCase):
knownParserValues = (
("*German: [[wichtig]]",
[('de','wichtig','',1,False,'')]
),
("*[[Esperanto]]: [[grava]]",
[('eo','grava','',1,False,'')]
),
("*{{fr}}: [[importante]] {{f}}",
[('fr','importante','f',1,False,'')]
),
("*Dutch: [[voorbeelden]] ''n, pl'', [[instructies]] {{f}}, {{p}}",
[('nl','voorbeelden','n',2,False,''),
('nl','instructies', 'f',2,False,'')]
),
("*Russian: [[шесток]] ''m'' (shestok)",
[('ru','шесток','m',1,False,'shestok')]
),
("*Kazakh: сәлем, салам, сәлеметсіздер(respectable)",
[('ka','сәлем','',1,False,''),
('ka','салам','',1,False,''),
('ka','сәлеметсіздер','',1,False,'respectable')]
),
("*Chinese(Mandarin):[[你好]](ni3 hao3), [[您好]](''formal'' nin2 hao3)",
[('zh','你好','',1,False,'ni3 hao3'),
('zh','您好','',1,False,"''formal'' nin2 hao3")]
),
("*German: [[Lamm]] ''n'' [[:de:Lamm|(de)]]",
[('de','Lamm','n',1,False,'')]
),
("*Italian: [[pronto#Italian|pronto]]",
[('it','pronto','',1,False,'')]
),
)
def testParser(self):
'''self.term, self.gender, self.number, self.diminutive and remark parsed correctly from Wiki format'''
for wikiline, results in self.knownParserValues:
ameaning = meaning.Meaning('en', 'dummy')
ameaning.parseTranslations(wikiline)
i=0
for termlang, thisterm, termgender, termnumber, termisadiminutive, remark in results:
resultterm = ameaning.translations[termlang]['alltrans'][i]['trans']
self.assertEqual(resultterm.getTerm(), thisterm)
self.assertEqual(resultterm.getGender(), termgender)
self.assertEqual(resultterm.getNumber(), termnumber)
# self.assertEqual(resultterm.getIsDiminutive(), termisadiminutive)
self.assertEqual(ameaning.translations[termlang]['alltrans'][i]['remark'], remark)
i+=1
if __name__ == "__main__":
unittest.main()
| epl-1.0 |
mcgachey/edx-platform | lms/djangoapps/courseware/tests/test_microsites.py | 13 | 10331 | """
Tests related to the Microsites feature
"""
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from nose.plugins.attrib import attr
from courseware.tests.helpers import LoginEnrollmentTestCase
from course_modes.models import CourseMode
from xmodule.course_module import (
CATALOG_VISIBILITY_CATALOG_AND_ABOUT, CATALOG_VISIBILITY_NONE)
from xmodule.modulestore.tests.factories import CourseFactory, ItemFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
@attr('shard_1')
class TestMicrosites(ModuleStoreTestCase, LoginEnrollmentTestCase):
"""
This is testing of the Microsite feature
"""
STUDENT_INFO = [('[email protected]', 'foo'), ('[email protected]', 'foo')]
def setUp(self):
super(TestMicrosites, self).setUp()
# use a different hostname to test Microsites since they are
# triggered on subdomain mappings
#
# NOTE: The Microsite Configuration is in lms/envs/test.py. The content for the Test Microsite is in
# test_microsites/test_microsite.
#
# IMPORTANT: For these tests to work, this domain must be defined via
# DNS configuration (either local or published)
self.course = CourseFactory.create(display_name='Robot_Super_Course', org='TestMicrositeX')
self.chapter0 = ItemFactory.create(parent_location=self.course.location,
display_name='Overview')
self.chapter9 = ItemFactory.create(parent_location=self.course.location,
display_name='factory_chapter')
self.section0 = ItemFactory.create(parent_location=self.chapter0.location,
display_name='Welcome')
self.section9 = ItemFactory.create(parent_location=self.chapter9.location,
display_name='factory_section')
self.course_outside_microsite = CourseFactory.create(display_name='Robot_Course_Outside_Microsite', org='FooX')
# have a course which explicitly sets visibility in catalog to False
self.course_hidden_visibility = CourseFactory.create(
display_name='Hidden_course',
org='TestMicrositeX',
catalog_visibility=CATALOG_VISIBILITY_NONE,
)
# have a course which explicitly sets visibility in catalog and about to true
self.course_with_visibility = CourseFactory.create(
display_name='visible_course',
org='TestMicrositeX',
course="foo",
catalog_visibility=CATALOG_VISIBILITY_CATALOG_AND_ABOUT,
)
def setup_users(self):
# Create student accounts and activate them.
for i in range(len(self.STUDENT_INFO)):
email, password = self.STUDENT_INFO[i]
username = 'u{0}'.format(i)
self.create_account(username, email, password)
self.activate_user(email)
@override_settings(SITE_NAME=settings.MICROSITE_TEST_HOSTNAME)
def test_microsite_anonymous_homepage_content(self):
"""
Verify that the homepage, when accessed via a Microsite domain, returns
HTML that reflects the Microsite branding elements
"""
resp = self.client.get('/', HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME)
self.assertEqual(resp.status_code, 200)
# assert various branding definitions on this Microsite
# as per the configuration and Microsite overrides
self.assertContains(resp, 'This is a Test Microsite Overlay') # Overlay test message
self.assertContains(resp, 'test_microsite/images/header-logo.png') # logo swap
self.assertContains(resp, 'test_microsite/css/test_microsite') # css override
self.assertContains(resp, 'Test Microsite') # page title
# assert that test course display name is visible
self.assertContains(resp, 'Robot_Super_Course')
# assert that test course with 'visible_in_catalog' to True is showing up
self.assertContains(resp, 'visible_course')
# assert that test course that is outside microsite is not visible
self.assertNotContains(resp, 'Robot_Course_Outside_Microsite')
# assert that a course that has visible_in_catalog=False is not visible
self.assertNotContains(resp, 'Hidden_course')
# assert that footer template has been properly overriden on homepage
self.assertContains(resp, 'This is a Test Microsite footer')
# assert that the edX partners section is not in the HTML
self.assertNotContains(resp, '<section class="university-partners university-partners2x6">')
# assert that the edX partners tag line is not in the HTML
self.assertNotContains(resp, 'Explore free courses from')
def test_not_microsite_anonymous_homepage_content(self):
"""
Make sure we see the right content on the homepage if we are not in a microsite
"""
resp = self.client.get('/')
self.assertEqual(resp.status_code, 200)
# assert various branding definitions on this Microsite ARE NOT VISIBLE
self.assertNotContains(resp, 'This is a Test Microsite Overlay') # Overlay test message
self.assertNotContains(resp, 'test_microsite/images/header-logo.png') # logo swap
self.assertNotContains(resp, 'test_microsite/css/test_microsite') # css override
self.assertNotContains(resp, '<title>Test Microsite</title>') # page title
# assert that test course display name IS NOT VISIBLE, since that is a Microsite only course
self.assertNotContains(resp, 'Robot_Super_Course')
# assert that test course that is outside microsite IS VISIBLE
self.assertContains(resp, 'Robot_Course_Outside_Microsite')
# assert that footer template has been properly overriden on homepage
self.assertNotContains(resp, 'This is a Test Microsite footer')
def test_no_redirect_on_homepage_when_no_enrollments(self):
"""
Verify that a user going to homepage will not redirect if he/she has no course enrollments
"""
self.setup_users()
email, password = self.STUDENT_INFO[0]
self.login(email, password)
resp = self.client.get(reverse('root'), HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME)
self.assertEquals(resp.status_code, 200)
def test_no_redirect_on_homepage_when_has_enrollments(self):
"""
Verify that a user going to homepage will not redirect to dashboard if he/she has
a course enrollment
"""
self.setup_users()
email, password = self.STUDENT_INFO[0]
self.login(email, password)
self.enroll(self.course, True)
resp = self.client.get(reverse('root'), HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME)
self.assertEquals(resp.status_code, 200)
def test_microsite_course_enrollment(self):
"""
Enroll user in a course scoped in a Microsite and one course outside of a Microsite
and make sure that they are only visible in the right Dashboards
"""
self.setup_users()
email, password = self.STUDENT_INFO[1]
self.login(email, password)
self.enroll(self.course, True)
self.enroll(self.course_outside_microsite, True)
# Access the microsite dashboard and make sure the right courses appear
resp = self.client.get(reverse('dashboard'), HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME)
self.assertContains(resp, 'Robot_Super_Course')
self.assertNotContains(resp, 'Robot_Course_Outside_Microsite')
# Now access the non-microsite dashboard and make sure the right courses appear
resp = self.client.get(reverse('dashboard'))
self.assertNotContains(resp, 'Robot_Super_Course')
self.assertContains(resp, 'Robot_Course_Outside_Microsite')
@override_settings(SITE_NAME=settings.MICROSITE_TEST_HOSTNAME)
def test_visible_about_page_settings(self):
"""
Make sure the Microsite is honoring the visible_about_page permissions that is
set in configuration
"""
url = reverse('about_course', args=[self.course_with_visibility.id.to_deprecated_string()])
resp = self.client.get(url, HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME)
self.assertEqual(resp.status_code, 200)
url = reverse('about_course', args=[self.course_hidden_visibility.id.to_deprecated_string()])
resp = self.client.get(url, HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME)
self.assertEqual(resp.status_code, 404)
@override_settings(SITE_NAME=settings.MICROSITE_TEST_HOSTNAME)
def test_paid_course_registration(self):
"""
Make sure that Microsite overrides on the ENABLE_SHOPPING_CART and
ENABLE_PAID_COURSE_ENROLLMENTS are honored
"""
course_mode = CourseMode(
course_id=self.course_with_visibility.id,
mode_slug="honor",
mode_display_name="honor cert",
min_price=10,
)
course_mode.save()
# first try on the non microsite, which
# should pick up the global configuration (where ENABLE_PAID_COURSE_REGISTRATIONS = False)
url = reverse('about_course', args=[self.course_with_visibility.id.to_deprecated_string()])
resp = self.client.get(url)
self.assertEqual(resp.status_code, 200)
self.assertIn("Enroll in {}".format(self.course_with_visibility.id.course), resp.content)
self.assertNotIn("Add {} to Cart ($10)".format(self.course_with_visibility.id.course), resp.content)
# now try on the microsite
url = reverse('about_course', args=[self.course_with_visibility.id.to_deprecated_string()])
resp = self.client.get(url, HTTP_HOST=settings.MICROSITE_TEST_HOSTNAME)
self.assertEqual(resp.status_code, 200)
self.assertNotIn("Enroll in {}".format(self.course_with_visibility.id.course), resp.content)
self.assertIn("Add {} to Cart <span>($10 USD)</span>".format(
self.course_with_visibility.id.course
), resp.content)
self.assertIn('$("#add_to_cart_post").click', resp.content)
| agpl-3.0 |
nwchandler/ansible | lib/ansible/module_utils/facts/system/selinux.py | 52 | 3061 | # Collect facts related to selinux
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.facts.collector import BaseFactCollector
try:
import selinux
HAVE_SELINUX = True
except ImportError:
HAVE_SELINUX = False
SELINUX_MODE_DICT = {1: 'enforcing',
0: 'permissive',
-1: 'disabled'}
class SelinuxFactCollector(BaseFactCollector):
name = 'selinux'
_fact_ids = set()
def collect(self, module=None, collected_facts=None):
facts_dict = {}
selinux_facts = {}
# This is weird. The value of the facts 'selinux' key can be False or a dict
if not HAVE_SELINUX:
facts_dict['selinux'] = False
facts_dict['selinux_python_present'] = False
return facts_dict
facts_dict['selinux_python_present'] = True
if not selinux.is_selinux_enabled():
selinux_facts['status'] = 'disabled'
# NOTE: this could just return in the above clause and the rest of this is up an indent -akl
else:
selinux_facts['status'] = 'enabled'
try:
selinux_facts['policyvers'] = selinux.security_policyvers()
except (AttributeError, OSError):
selinux_facts['policyvers'] = 'unknown'
try:
(rc, configmode) = selinux.selinux_getenforcemode()
if rc == 0:
selinux_facts['config_mode'] = SELINUX_MODE_DICT.get(configmode, 'unknown')
else:
selinux_facts['config_mode'] = 'unknown'
except (AttributeError, OSError):
selinux_facts['config_mode'] = 'unknown'
try:
mode = selinux.security_getenforce()
selinux_facts['mode'] = SELINUX_MODE_DICT.get(mode, 'unknown')
except (AttributeError, OSError):
selinux_facts['mode'] = 'unknown'
try:
(rc, policytype) = selinux.selinux_getpolicytype()
if rc == 0:
selinux_facts['type'] = policytype
else:
selinux_facts['type'] = 'unknown'
except (AttributeError, OSError):
selinux_facts['type'] = 'unknown'
facts_dict['selinux'] = selinux_facts
return facts_dict
| gpl-3.0 |
ashutoshvt/psi4 | samples/python/mints13/input.py | 24 | 4179 | #! test fragment decomposition + to/from_dict
import numpy as np
import psi4
from psi4.driver import qcdb
psi4.set_output_file("output.dat", False)
def test_chgmult(expected, cgmpdict, label):
rc, rfc, rm, rfm = expected
qcdb.compare_integers(rc, cgmpdict['molecular_charge'], label + ': c')
qcdb.compare_integers(rm, cgmpdict['molecular_multiplicity'], label + ': m')
qcdb.compare_integers(True, np.allclose(cgmpdict['fragment_charges'], rfc), label + ': fc')
qcdb.compare_integers(True, np.allclose(cgmpdict['fragment_multiplicities'], rfm), label + ': fm')
def test_dimer(mol, expected_cgmp, label, mtype):
mol.update_geometry()
dAB = mol.to_dict()
test_chgmult(expected_cgmp['AB'], dAB, label + ' AB')
mAB = mtype.from_dict(dAB)
qcdb.compare_molrecs(dAB, mAB.to_dict(), label + ' AB roundtrip', atol=1.e-6)
aB = mol.extract_subsets(2, 1)
daB = aB.to_dict()
test_chgmult(expected_cgmp['aB'], daB, label + ' aB')
maB = mtype.from_dict(daB)
qcdb.compare_molrecs(daB, maB.to_dict(), label + ' aB roundtrip', atol=1.e-6)
Ab = mol.extract_subsets(1, 2)
dAb = Ab.to_dict()
test_chgmult(expected_cgmp['Ab'], dAb, label + ' Ab')
mAb = mtype.from_dict(dAb)
qcdb.compare_molrecs(dAb, mAb.to_dict(), label + ' Ab roundtrip', atol=1.e-6)
A_ = mol.extract_subsets(1)
dA_ = A_.to_dict()
test_chgmult(expected_cgmp['A_'], dA_, label + ' A_')
mA_ = mtype.from_dict(dA_)
qcdb.compare_molrecs(dA_, mA_.to_dict(), label + ' A_ roundtrip', atol=1.e-6)
_B = mol.extract_subsets(2)
d_B = _B.to_dict()
test_chgmult(expected_cgmp['_B'], d_B, label + ' _B')
m_B = mtype.from_dict(d_B)
qcdb.compare_molrecs(d_B, m_B.to_dict(), label + ' _B roundtrip', atol=1.e-6)
qcdb.compare_integers(True, type(mol) == mtype, label + ': AB type')
qcdb.compare_integers(True, type(Ab) == mtype, label + ': Ab type')
eneyne = """
C 0.000000 -0.667578 -2.124659
C 0.000000 0.667578 -2.124659
H 0.923621 -1.232253 -2.126185
H -0.923621 -1.232253 -2.126185
H -0.923621 1.232253 -2.126185
H 0.923621 1.232253 -2.126185
--
C 0.000000 0.000000 2.900503
C 0.000000 0.000000 1.693240
H 0.000000 0.000000 0.627352
H 0.000000 0.000000 3.963929
"""
eneyne_cgmp = {
'AB': (0, [0, 0], 1, [1, 1]),
'aB': (0, [0, 0], 1, [1, 1]),
'Ab': (0, [0, 0], 1, [1, 1]),
'A_': (0, [0], 1, [1]),
'_B': (0, [0], 1, [1]),
}
negpos = """
-1 1
O 0.0 0.0 0.0
H 0.0 0.0 1.0
--
1 1
O 2.0 2.0 2.0
H 3.0 2.0 2.0
H 2.0 3.0 2.0
H 2.0 2.0 3.0
"""
negpos_cgmp = {
'AB': (0, [-1, 1], 1, [1, 1]),
'A_': (-1, [-1], 1, [1]),
'_B': (1, [1], 1, [1]),
'Ab': (-1, [-1, 0], 1, [1, 1]),
'aB': (1, [0, 1], 1, [1, 1]),
}
qeneyne = qcdb.Molecule(eneyne)
peneyne = psi4.geometry(eneyne)
qnegpos = qcdb.Molecule(negpos)
pnegpos = psi4.geometry(negpos)
test_dimer(qeneyne, eneyne_cgmp, 'Q: eneyne', qcdb.Molecule)
test_dimer(peneyne, eneyne_cgmp, 'P: eneyne', psi4.core.Molecule)
test_dimer(qnegpos, negpos_cgmp, 'Q: negpos', qcdb.Molecule)
test_dimer(pnegpos, negpos_cgmp, 'P: negpos', psi4.core.Molecule)
# Once user starts messing with cgmp other than in construction, user has
# no way to mess with fragment cgmp, and Psi/QCDB Molecule classes don't do
# much to set things in order. Upon to_dict, things get sorted into some
# physical reality, but fragment charges in a complicated system like this
# won't get sorted out to resemble thier initial state (could do more
# try/catch, but that's really the class's job). So really all that can be
# tested in the main dimer's total charge and total mult.
qnegpos.set_multiplicity(3)
qnegpos.set_molecular_charge(2)
qresetAB = qnegpos.to_dict()
qcdb.compare_integers(2, qresetAB['molecular_charge'], 'Q: reset-negpos: c')
qcdb.compare_integers(3, qresetAB['molecular_multiplicity'], 'Q: reset-negpos: m')
pnegpos.set_multiplicity(3)
pnegpos.set_molecular_charge(2)
presetAB = pnegpos.to_dict()
qcdb.compare_integers(2, presetAB['molecular_charge'], 'P: reset-negpos: c')
qcdb.compare_integers(3, presetAB['molecular_multiplicity'], 'P: reset-negpos: m')
| lgpl-3.0 |
nathanaevitas/odoo | openerp/addons/l10n_hu/__openerp__.py | 320 | 1815 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 InnOpen Group Kft (<http://www.innopen.eu>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Hungarian - Accounting',
'version': '1.0',
'category': 'Localization/Account Charts',
'description': """
Base module for Hungarian localization
==========================================
This module consists :
- Generic Hungarian chart of accounts
- Hungarian taxes
- Hungarian Bank information
""",
'author': 'InnOpen Group Kft',
'website': 'http://www.innopen.eu',
'license': 'AGPL-3',
'depends': ['account','account_chart'],
'data': [
'data/account.account.template.csv',
'data/account.tax.code.template.csv',
'data/account.chart.template.csv',
'data/account.tax.template.csv',
'data/account.fiscal.position.template.csv',
'data/account.fiscal.position.tax.template.csv',
'data/res.bank.csv',
],
'installable': True,
'auto_install': False,
}
| agpl-3.0 |
firerszd/kbengine | kbe/res/scripts/common/Lib/multiprocessing/semaphore_tracker.py | 100 | 4820 | #
# On Unix we run a server process which keeps track of unlinked
# semaphores. The server ignores SIGINT and SIGTERM and reads from a
# pipe. Every other process of the program has a copy of the writable
# end of the pipe, so we get EOF when all other processes have exited.
# Then the server process unlinks any remaining semaphore names.
#
# This is important because the system only supports a limited number
# of named semaphores, and they will not be automatically removed till
# the next reboot. Without this semaphore tracker process, "killall
# python" would probably leave unlinked semaphores.
#
import os
import signal
import sys
import threading
import warnings
import _multiprocessing
from . import spawn
from . import util
__all__ = ['ensure_running', 'register', 'unregister']
class SemaphoreTracker(object):
def __init__(self):
self._lock = threading.Lock()
self._fd = None
def getfd(self):
self.ensure_running()
return self._fd
def ensure_running(self):
'''Make sure that semaphore tracker process is running.
This can be run from any process. Usually a child process will use
the semaphore created by its parent.'''
with self._lock:
if self._fd is not None:
return
fds_to_pass = []
try:
fds_to_pass.append(sys.stderr.fileno())
except Exception:
pass
cmd = 'from multiprocessing.semaphore_tracker import main;main(%d)'
r, w = os.pipe()
try:
fds_to_pass.append(r)
# process will out live us, so no need to wait on pid
exe = spawn.get_executable()
args = [exe] + util._args_from_interpreter_flags()
args += ['-c', cmd % r]
util.spawnv_passfds(exe, args, fds_to_pass)
except:
os.close(w)
raise
else:
self._fd = w
finally:
os.close(r)
def register(self, name):
'''Register name of semaphore with semaphore tracker.'''
self._send('REGISTER', name)
def unregister(self, name):
'''Unregister name of semaphore with semaphore tracker.'''
self._send('UNREGISTER', name)
def _send(self, cmd, name):
self.ensure_running()
msg = '{0}:{1}\n'.format(cmd, name).encode('ascii')
if len(name) > 512:
# posix guarantees that writes to a pipe of less than PIPE_BUF
# bytes are atomic, and that PIPE_BUF >= 512
raise ValueError('name too long')
nbytes = os.write(self._fd, msg)
assert nbytes == len(msg)
_semaphore_tracker = SemaphoreTracker()
ensure_running = _semaphore_tracker.ensure_running
register = _semaphore_tracker.register
unregister = _semaphore_tracker.unregister
getfd = _semaphore_tracker.getfd
def main(fd):
'''Run semaphore tracker.'''
# protect the process from ^C and "killall python" etc
signal.signal(signal.SIGINT, signal.SIG_IGN)
signal.signal(signal.SIGTERM, signal.SIG_IGN)
for f in (sys.stdin, sys.stdout):
try:
f.close()
except Exception:
pass
cache = set()
try:
# keep track of registered/unregistered semaphores
with open(fd, 'rb') as f:
for line in f:
try:
cmd, name = line.strip().split(b':')
if cmd == b'REGISTER':
cache.add(name)
elif cmd == b'UNREGISTER':
cache.remove(name)
else:
raise RuntimeError('unrecognized command %r' % cmd)
except Exception:
try:
sys.excepthook(*sys.exc_info())
except:
pass
finally:
# all processes have terminated; cleanup any remaining semaphores
if cache:
try:
warnings.warn('semaphore_tracker: There appear to be %d '
'leaked semaphores to clean up at shutdown' %
len(cache))
except Exception:
pass
for name in cache:
# For some reason the process which created and registered this
# semaphore has failed to unregister it. Presumably it has died.
# We therefore unlink it.
try:
name = name.decode('ascii')
try:
_multiprocessing.sem_unlink(name)
except Exception as e:
warnings.warn('semaphore_tracker: %r: %s' % (name, e))
finally:
pass
| lgpl-3.0 |
nvl1109/openembeded | recipes/python/python-pyyaml/setup.py | 69 | 1936 | NAME = 'PyYAML'
VERSION = '3.06'
DESCRIPTION = "YAML parser and emitter for Python"
LONG_DESCRIPTION = """\
YAML is a data serialization format designed for human readability and
interaction with scripting languages. PyYAML is a YAML parser and
emitter for Python.
PyYAML features a complete YAML 1.1 parser, Unicode support, pickle
support, capable extension API, and sensible error messages. PyYAML
supports standard YAML tags and provides Python-specific tags that allow
to represent an arbitrary Python object.
PyYAML is applicable for a broad range of tasks from complex
configuration files to object serialization and persistance."""
AUTHOR = "Kirill Simonov"
AUTHOR_EMAIL = '[email protected]'
LICENSE = "MIT"
PLATFORMS = "Any"
URL = "http://pyyaml.org/wiki/PyYAML"
DOWNLOAD_URL = "http://pyyaml.org/download/pyyaml/%s-%s.tar.gz" % (NAME, VERSION)
CLASSIFIERS = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup",
]
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import sys, os.path
if __name__ == '__main__':
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
platforms=PLATFORMS,
url=URL,
download_url=DOWNLOAD_URL,
classifiers=CLASSIFIERS,
package_dir={'': 'lib'},
packages=['yaml'],
ext_modules = [
Extension( "_yaml", ["ext/_yaml.pyx"], libraries = ["yaml"] )
],
cmdclass={
'build_ext': build_ext,
},
)
| mit |
olehermanse/sim_game | sim_game/geometry.py | 1 | 2921 | import math
def limit(number, lower, upper):
assert lower < upper or (lower is None or upper is None)
if lower and number < lower:
number = lower
if upper and number > upper:
number = upper
# TODO: remove these asserts and make tests
assert number <= upper or not upper
assert number >= lower or not lower
return number
class Point:
def __init__(self, x,y):
self.x = x
self.y = y
def position(self):
return (self.x, self.y)
def xy(self):
return (self.x, self.y)
def set(self, x, y):
self.x = x
self.y = y
def __add__(self, other):
return Point(self.x+other.x, self.y+other.y)
def __sub__(self, other):
return Point(self.x-other.x, self.y-other.y)
def __getitem__(self, key):
if type(key) is not int:
raise TypeError
return self.xy()[key]
def distance(self, other):
x,y = self.x - other.x, self.y-other.y
return math.sqrt(x**2 + y**2)
class Rectangle:
def __init__(self, pos, dimensions, anchor=(0,0), offset=(0,0)):
self.position = Point(*pos)
self.dimensions = Point(*dimensions)
self.offset = Point(*offset)
self.anchor = Point(*anchor)
def set_pos(self, x, y):
self.position.set(x,y)
def xy(self):
return self.position.xy()
def offset_xy(self):
x,y = self.xy()
ox, oy = self.offset.xy()
x += ox
y += oy
return x,y
def top_left(self):
x,y = self.offset_xy()
w,h = self.dimensions.xy()
x,y = x-w/2, y-h/2
ax, ay = self.anchor.xy()
x = x + ax * w/2
y = y + ay * h/2
return x,y
def points(self):
x,y = self.top_left()
w,h = self.dimensions.xy()
return (Point(x, y),
Point(x + w, y),
Point(x + w, y + h),
Point(x, y + h))
def contains_point(self, point):
point = Point(*point)
sx,sy = self.top_left()
sw,sh = self.dimensions.xy()
x,y = point.position()
if ( x < sx
or y < sy
or x > sx + sw
or y > sy + sh ):
return False
return True
def contains_rectangle(self, rectangle):
for p in rectangle.points():
if p not in self:
return False
return True
def collision(self, other):
if type(other) is Point:
return other in self
for p in other.points():
if p not in self:
return False
return True
def __contains___(self, value):
if type(value) is Point:
return self.contains_point(value)
if type(value) is Rectangle:
return self.contains_rectangle(value)
raise TypeError("Unknown type for contains")
| mit |
sometallgit/AutoUploader | Python27/Lib/site-packages/tests/test__pkce.py | 15 | 1954 | # Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from oauth2client import _pkce
class PKCETests(unittest.TestCase):
@mock.patch('oauth2client._pkce.os.urandom')
def test_verifier(self, fake_urandom):
canned_randomness = (
b'\x98\x10D7\xf3\xb7\xaa\xfc\xdd\xd3M\xe2'
b'\xa3,\x06\xa0\xb0\xa9\xb4\x8f\xcb\xd0'
b'\xf5\x86N2p\x8c]!W\x9a\xed54\x99\x9d'
b'\x8dv\\\xa7/\x81\xf3J\x98\xc3\x90\xee'
b'\xb0\x8c\xb7Zc#\x05M0O\x08\xda\t\x1f\x07'
)
fake_urandom.return_value = canned_randomness
expected = (
b'mBBEN_O3qvzd003ioywGoLCptI_L0PWGTjJwjF0hV5rt'
b'NTSZnY12XKcvgfNKmMOQ7rCMt1pjIwVNME8I2gkfBw'
)
result = _pkce.code_verifier()
self.assertEqual(result, expected)
def test_verifier_too_long(self):
with self.assertRaises(ValueError) as caught:
_pkce.code_verifier(97)
self.assertIn("too long", str(caught.exception))
def test_verifier_too_short(self):
with self.assertRaises(ValueError) as caught:
_pkce.code_verifier(30)
self.assertIn("too short", str(caught.exception))
def test_challenge(self):
result = _pkce.code_challenge(b'SOME_VERIFIER')
expected = b'6xJCQsjTtS3zjUwd8_ZqH0SyviGHnp5PsHXWKOCqDuI'
self.assertEqual(result, expected)
| mit |
ekcs/congress | thirdparty/antlr3-antlr-3.5/runtime/Python/tests/t054main.py | 16 | 7926 | # -*- coding: utf-8 -*-
import unittest
import textwrap
import antlr3
import antlr3.tree
import testbase
import sys
from StringIO import StringIO
class T(testbase.ANTLRTest):
def setUp(self):
self.oldPath = sys.path[:]
sys.path.insert(0, self.baseDir)
def tearDown(self):
sys.path = self.oldPath
def testOverrideMain(self):
grammar = textwrap.dedent(
r"""lexer grammar T3;
options {
language = Python;
}
@main {
def main(argv):
raise RuntimeError("no")
}
ID: ('a'..'z' | '\u00c0'..'\u00ff')+;
WS: ' '+ { $channel = HIDDEN; };
""")
stdout = StringIO()
lexerMod = self.compileInlineGrammar(grammar, returnModule=True)
try:
lexerMod.main(
['lexer.py']
)
self.fail()
except RuntimeError:
pass
def testLexerFromFile(self):
input = "foo bar"
inputPath = self.writeFile("input.txt", input)
grammar = textwrap.dedent(
r"""lexer grammar T1;
options {
language = Python;
}
ID: 'a'..'z'+;
WS: ' '+ { $channel = HIDDEN; };
""")
stdout = StringIO()
lexerMod = self.compileInlineGrammar(grammar, returnModule=True)
lexerMod.main(
['lexer.py', inputPath],
stdout=stdout
)
self.failUnlessEqual(len(stdout.getvalue().splitlines()), 3)
def testLexerFromStdIO(self):
input = "foo bar"
grammar = textwrap.dedent(
r"""lexer grammar T2;
options {
language = Python;
}
ID: 'a'..'z'+;
WS: ' '+ { $channel = HIDDEN; };
""")
stdout = StringIO()
lexerMod = self.compileInlineGrammar(grammar, returnModule=True)
lexerMod.main(
['lexer.py'],
stdin=StringIO(input),
stdout=stdout
)
self.failUnlessEqual(len(stdout.getvalue().splitlines()), 3)
def testLexerEncoding(self):
input = u"föö bär".encode('utf-8')
grammar = textwrap.dedent(
r"""lexer grammar T3;
options {
language = Python;
}
ID: ('a'..'z' | '\u00c0'..'\u00ff')+;
WS: ' '+ { $channel = HIDDEN; };
""")
stdout = StringIO()
lexerMod = self.compileInlineGrammar(grammar, returnModule=True)
lexerMod.main(
['lexer.py', '--encoding', 'utf-8'],
stdin=StringIO(input),
stdout=stdout
)
self.failUnlessEqual(len(stdout.getvalue().splitlines()), 3)
def testCombined(self):
input = "foo bar"
grammar = textwrap.dedent(
r"""grammar T4;
options {
language = Python;
}
r returns [res]: (ID)+ EOF { $res = $text; };
ID: 'a'..'z'+;
WS: ' '+ { $channel = HIDDEN; };
""")
stdout = StringIO()
lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True)
parserMod.main(
['combined.py', '--rule', 'r'],
stdin=StringIO(input),
stdout=stdout
)
stdout = stdout.getvalue()
self.failUnlessEqual(len(stdout.splitlines()), 1, stdout)
def testCombinedOutputAST(self):
input = "foo + bar"
grammar = textwrap.dedent(
r"""grammar T5;
options {
language = Python;
output = AST;
}
r: ID OP^ ID EOF!;
ID: 'a'..'z'+;
OP: '+';
WS: ' '+ { $channel = HIDDEN; };
""")
stdout = StringIO()
lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True)
parserMod.main(
['combined.py', '--rule', 'r'],
stdin=StringIO(input),
stdout=stdout
)
stdout = stdout.getvalue().strip()
self.failUnlessEqual(stdout, "(+ foo bar)")
def testTreeParser(self):
grammar = textwrap.dedent(
r'''grammar T6;
options {
language = Python;
output = AST;
}
r: ID OP^ ID EOF!;
ID: 'a'..'z'+;
OP: '+';
WS: ' '+ { $channel = HIDDEN; };
''')
treeGrammar = textwrap.dedent(
r'''tree grammar T6Walker;
options {
language=Python;
ASTLabelType=CommonTree;
tokenVocab=T6;
}
r returns [res]: ^(OP a=ID b=ID)
{ $res = "\%s \%s \%s" \% ($a.text, $OP.text, $b.text) }
;
''')
lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True)
walkerMod = self.compileInlineGrammar(treeGrammar, returnModule=True)
stdout = StringIO()
walkerMod.main(
['walker.py', '--rule', 'r', '--parser', 'T6Parser', '--parser-rule', 'r', '--lexer', 'T6Lexer'],
stdin=StringIO("a+b"),
stdout=stdout
)
stdout = stdout.getvalue().strip()
self.failUnlessEqual(stdout, "u'a + b'")
def testTreeParserRewrite(self):
grammar = textwrap.dedent(
r'''grammar T7;
options {
language = Python;
output = AST;
}
r: ID OP^ ID EOF!;
ID: 'a'..'z'+;
OP: '+';
WS: ' '+ { $channel = HIDDEN; };
''')
treeGrammar = textwrap.dedent(
r'''tree grammar T7Walker;
options {
language=Python;
ASTLabelType=CommonTree;
tokenVocab=T7;
output=AST;
}
tokens {
ARG;
}
r: ^(OP a=ID b=ID) -> ^(OP ^(ARG ID) ^(ARG ID));
''')
lexerMod, parserMod = self.compileInlineGrammar(grammar, returnModule=True)
walkerMod = self.compileInlineGrammar(treeGrammar, returnModule=True)
stdout = StringIO()
walkerMod.main(
['walker.py', '--rule', 'r', '--parser', 'T7Parser', '--parser-rule', 'r', '--lexer', 'T7Lexer'],
stdin=StringIO("a+b"),
stdout=stdout
)
stdout = stdout.getvalue().strip()
self.failUnlessEqual(stdout, "(+ (ARG a) (ARG b))")
def testGrammarImport(self):
slave = textwrap.dedent(
r'''
parser grammar T8S;
options {
language=Python;
}
a : B;
''')
parserName = self.writeInlineGrammar(slave)[0]
# slave parsers are imported as normal python modules
# to force reloading current version, purge module from sys.modules
try:
del sys.modules[parserName+'Parser']
except KeyError:
pass
master = textwrap.dedent(
r'''
grammar T8M;
options {
language=Python;
}
import T8S;
s returns [res]: a { $res = $a.text };
B : 'b' ; // defines B from inherited token space
WS : (' '|'\n') {self.skip()} ;
''')
stdout = StringIO()
lexerMod, parserMod = self.compileInlineGrammar(master, returnModule=True)
parserMod.main(
['import.py', '--rule', 's'],
stdin=StringIO("b"),
stdout=stdout
)
stdout = stdout.getvalue().strip()
self.failUnlessEqual(stdout, "u'b'")
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
si618/pi-time | node_modules/grunt-pylint/tasks/lib/pylint/test/functional/inherit_non_class.py | 3 | 1287 | """Test that inheriting from something which is not
a class emits a warning. """
# pylint: disable=no-init, import-error, invalid-name
# pylint: disable=missing-docstring, too-few-public-methods, no-absolute-import
from missing import Missing
if 1:
Ambiguous = None
else:
Ambiguous = int
class Empty(object):
""" Empty class. """
def return_class():
""" Return a class. """
return Good3
class Bad(1): # [inherit-non-class]
""" Can't inherit from instance. """
class Bad1(lambda abc: 42): # [inherit-non-class]
""" Can't inherit from lambda. """
class Bad2(object()): # [inherit-non-class]
""" Can't inherit from an instance of object. """
class Bad3(return_class): # [inherit-non-class]
""" Can't inherit from function. """
class Bad4(Empty()): # [inherit-non-class]
""" Can't inherit from instance. """
class Good(object):
pass
class Good1(int):
pass
class Good2(type):
pass
class Good3(type(int)):
pass
class Good4(return_class()):
pass
class Good5(Good4, int, object):
pass
class Good6(Ambiguous):
""" Inherits from something ambiguous.
This could emit a warning when we will have
flow detection.
"""
class Unknown(Missing):
pass
class Unknown1(Good5 if True else Bad1):
pass
| gpl-3.0 |
amyvmiwei/kbengine | kbe/src/lib/python/Lib/gettext.py | 90 | 17661 | """Internationalization and localization support.
This module provides internationalization (I18N) and localization (L10N)
support for your Python programs by providing an interface to the GNU gettext
message catalog library.
I18N refers to the operation by which a program is made aware of multiple
languages. L10N refers to the adaptation of your program, once
internationalized, to the local language and cultural habits.
"""
# This module represents the integration of work, contributions, feedback, and
# suggestions from the following people:
#
# Martin von Loewis, who wrote the initial implementation of the underlying
# C-based libintlmodule (later renamed _gettext), along with a skeletal
# gettext.py implementation.
#
# Peter Funk, who wrote fintl.py, a fairly complete wrapper around intlmodule,
# which also included a pure-Python implementation to read .mo files if
# intlmodule wasn't available.
#
# James Henstridge, who also wrote a gettext.py module, which has some
# interesting, but currently unsupported experimental features: the notion of
# a Catalog class and instances, and the ability to add to a catalog file via
# a Python API.
#
# Barry Warsaw integrated these modules, wrote the .install() API and code,
# and conformed all C and Python code to Python's coding standards.
#
# Francois Pinard and Marc-Andre Lemburg also contributed valuably to this
# module.
#
# J. David Ibanez implemented plural forms. Bruno Haible fixed some bugs.
#
# TODO:
# - Lazy loading of .mo files. Currently the entire catalog is loaded into
# memory, but that's probably bad for large translated programs. Instead,
# the lexical sort of original strings in GNU .mo files should be exploited
# to do binary searches and lazy initializations. Or you might want to use
# the undocumented double-hash algorithm for .mo files with hash tables, but
# you'll need to study the GNU gettext code to do this.
#
# - Support Solaris .mo file formats. Unfortunately, we've been unable to
# find this format documented anywhere.
import locale, copy, io, os, re, struct, sys
from errno import ENOENT
__all__ = ['NullTranslations', 'GNUTranslations', 'Catalog',
'find', 'translation', 'install', 'textdomain', 'bindtextdomain',
'dgettext', 'dngettext', 'gettext', 'ngettext',
]
_default_localedir = os.path.join(sys.base_prefix, 'share', 'locale')
def c2py(plural):
"""Gets a C expression as used in PO files for plural forms and returns a
Python lambda function that implements an equivalent expression.
"""
# Security check, allow only the "n" identifier
import token, tokenize
tokens = tokenize.generate_tokens(io.StringIO(plural).readline)
try:
danger = [x for x in tokens if x[0] == token.NAME and x[1] != 'n']
except tokenize.TokenError:
raise ValueError('plural forms expression error, maybe unbalanced parenthesis')
else:
if danger:
raise ValueError('plural forms expression could be dangerous')
# Replace some C operators by their Python equivalents
plural = plural.replace('&&', ' and ')
plural = plural.replace('||', ' or ')
expr = re.compile(r'\!([^=])')
plural = expr.sub(' not \\1', plural)
# Regular expression and replacement function used to transform
# "a?b:c" to "b if a else c".
expr = re.compile(r'(.*?)\?(.*?):(.*)')
def repl(x):
return "(%s if %s else %s)" % (x.group(2), x.group(1),
expr.sub(repl, x.group(3)))
# Code to transform the plural expression, taking care of parentheses
stack = ['']
for c in plural:
if c == '(':
stack.append('')
elif c == ')':
if len(stack) == 1:
# Actually, we never reach this code, because unbalanced
# parentheses get caught in the security check at the
# beginning.
raise ValueError('unbalanced parenthesis in plural form')
s = expr.sub(repl, stack.pop())
stack[-1] += '(%s)' % s
else:
stack[-1] += c
plural = expr.sub(repl, stack.pop())
return eval('lambda n: int(%s)' % plural)
def _expand_lang(loc):
loc = locale.normalize(loc)
COMPONENT_CODESET = 1 << 0
COMPONENT_TERRITORY = 1 << 1
COMPONENT_MODIFIER = 1 << 2
# split up the locale into its base components
mask = 0
pos = loc.find('@')
if pos >= 0:
modifier = loc[pos:]
loc = loc[:pos]
mask |= COMPONENT_MODIFIER
else:
modifier = ''
pos = loc.find('.')
if pos >= 0:
codeset = loc[pos:]
loc = loc[:pos]
mask |= COMPONENT_CODESET
else:
codeset = ''
pos = loc.find('_')
if pos >= 0:
territory = loc[pos:]
loc = loc[:pos]
mask |= COMPONENT_TERRITORY
else:
territory = ''
language = loc
ret = []
for i in range(mask+1):
if not (i & ~mask): # if all components for this combo exist ...
val = language
if i & COMPONENT_TERRITORY: val += territory
if i & COMPONENT_CODESET: val += codeset
if i & COMPONENT_MODIFIER: val += modifier
ret.append(val)
ret.reverse()
return ret
class NullTranslations:
def __init__(self, fp=None):
self._info = {}
self._charset = None
self._output_charset = None
self._fallback = None
if fp is not None:
self._parse(fp)
def _parse(self, fp):
pass
def add_fallback(self, fallback):
if self._fallback:
self._fallback.add_fallback(fallback)
else:
self._fallback = fallback
def gettext(self, message):
if self._fallback:
return self._fallback.gettext(message)
return message
def lgettext(self, message):
if self._fallback:
return self._fallback.lgettext(message)
return message
def ngettext(self, msgid1, msgid2, n):
if self._fallback:
return self._fallback.ngettext(msgid1, msgid2, n)
if n == 1:
return msgid1
else:
return msgid2
def lngettext(self, msgid1, msgid2, n):
if self._fallback:
return self._fallback.lngettext(msgid1, msgid2, n)
if n == 1:
return msgid1
else:
return msgid2
def info(self):
return self._info
def charset(self):
return self._charset
def output_charset(self):
return self._output_charset
def set_output_charset(self, charset):
self._output_charset = charset
def install(self, names=None):
import builtins
builtins.__dict__['_'] = self.gettext
if hasattr(names, "__contains__"):
if "gettext" in names:
builtins.__dict__['gettext'] = builtins.__dict__['_']
if "ngettext" in names:
builtins.__dict__['ngettext'] = self.ngettext
if "lgettext" in names:
builtins.__dict__['lgettext'] = self.lgettext
if "lngettext" in names:
builtins.__dict__['lngettext'] = self.lngettext
class GNUTranslations(NullTranslations):
# Magic number of .mo files
LE_MAGIC = 0x950412de
BE_MAGIC = 0xde120495
def _parse(self, fp):
"""Override this method to support alternative .mo formats."""
unpack = struct.unpack
filename = getattr(fp, 'name', '')
# Parse the .mo file header, which consists of 5 little endian 32
# bit words.
self._catalog = catalog = {}
self.plural = lambda n: int(n != 1) # germanic plural by default
buf = fp.read()
buflen = len(buf)
# Are we big endian or little endian?
magic = unpack('<I', buf[:4])[0]
if magic == self.LE_MAGIC:
version, msgcount, masteridx, transidx = unpack('<4I', buf[4:20])
ii = '<II'
elif magic == self.BE_MAGIC:
version, msgcount, masteridx, transidx = unpack('>4I', buf[4:20])
ii = '>II'
else:
raise OSError(0, 'Bad magic number', filename)
# Now put all messages from the .mo file buffer into the catalog
# dictionary.
for i in range(0, msgcount):
mlen, moff = unpack(ii, buf[masteridx:masteridx+8])
mend = moff + mlen
tlen, toff = unpack(ii, buf[transidx:transidx+8])
tend = toff + tlen
if mend < buflen and tend < buflen:
msg = buf[moff:mend]
tmsg = buf[toff:tend]
else:
raise OSError(0, 'File is corrupt', filename)
# See if we're looking at GNU .mo conventions for metadata
if mlen == 0:
# Catalog description
lastk = k = None
for b_item in tmsg.split('\n'.encode("ascii")):
item = b_item.decode().strip()
if not item:
continue
if ':' in item:
k, v = item.split(':', 1)
k = k.strip().lower()
v = v.strip()
self._info[k] = v
lastk = k
elif lastk:
self._info[lastk] += '\n' + item
if k == 'content-type':
self._charset = v.split('charset=')[1]
elif k == 'plural-forms':
v = v.split(';')
plural = v[1].split('plural=')[1]
self.plural = c2py(plural)
# Note: we unconditionally convert both msgids and msgstrs to
# Unicode using the character encoding specified in the charset
# parameter of the Content-Type header. The gettext documentation
# strongly encourages msgids to be us-ascii, but some applications
# require alternative encodings (e.g. Zope's ZCML and ZPT). For
# traditional gettext applications, the msgid conversion will
# cause no problems since us-ascii should always be a subset of
# the charset encoding. We may want to fall back to 8-bit msgids
# if the Unicode conversion fails.
charset = self._charset or 'ascii'
if b'\x00' in msg:
# Plural forms
msgid1, msgid2 = msg.split(b'\x00')
tmsg = tmsg.split(b'\x00')
msgid1 = str(msgid1, charset)
for i, x in enumerate(tmsg):
catalog[(msgid1, i)] = str(x, charset)
else:
catalog[str(msg, charset)] = str(tmsg, charset)
# advance to next entry in the seek tables
masteridx += 8
transidx += 8
def lgettext(self, message):
missing = object()
tmsg = self._catalog.get(message, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.lgettext(message)
return message
if self._output_charset:
return tmsg.encode(self._output_charset)
return tmsg.encode(locale.getpreferredencoding())
def lngettext(self, msgid1, msgid2, n):
try:
tmsg = self._catalog[(msgid1, self.plural(n))]
if self._output_charset:
return tmsg.encode(self._output_charset)
return tmsg.encode(locale.getpreferredencoding())
except KeyError:
if self._fallback:
return self._fallback.lngettext(msgid1, msgid2, n)
if n == 1:
return msgid1
else:
return msgid2
def gettext(self, message):
missing = object()
tmsg = self._catalog.get(message, missing)
if tmsg is missing:
if self._fallback:
return self._fallback.gettext(message)
return message
return tmsg
def ngettext(self, msgid1, msgid2, n):
try:
tmsg = self._catalog[(msgid1, self.plural(n))]
except KeyError:
if self._fallback:
return self._fallback.ngettext(msgid1, msgid2, n)
if n == 1:
tmsg = msgid1
else:
tmsg = msgid2
return tmsg
# Locate a .mo file using the gettext strategy
def find(domain, localedir=None, languages=None, all=False):
# Get some reasonable defaults for arguments that were not supplied
if localedir is None:
localedir = _default_localedir
if languages is None:
languages = []
for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
val = os.environ.get(envar)
if val:
languages = val.split(':')
break
if 'C' not in languages:
languages.append('C')
# now normalize and expand the languages
nelangs = []
for lang in languages:
for nelang in _expand_lang(lang):
if nelang not in nelangs:
nelangs.append(nelang)
# select a language
if all:
result = []
else:
result = None
for lang in nelangs:
if lang == 'C':
break
mofile = os.path.join(localedir, lang, 'LC_MESSAGES', '%s.mo' % domain)
if os.path.exists(mofile):
if all:
result.append(mofile)
else:
return mofile
return result
# a mapping between absolute .mo file path and Translation object
_translations = {}
def translation(domain, localedir=None, languages=None,
class_=None, fallback=False, codeset=None):
if class_ is None:
class_ = GNUTranslations
mofiles = find(domain, localedir, languages, all=True)
if not mofiles:
if fallback:
return NullTranslations()
raise OSError(ENOENT, 'No translation file found for domain', domain)
# Avoid opening, reading, and parsing the .mo file after it's been done
# once.
result = None
for mofile in mofiles:
key = (class_, os.path.abspath(mofile))
t = _translations.get(key)
if t is None:
with open(mofile, 'rb') as fp:
t = _translations.setdefault(key, class_(fp))
# Copy the translation object to allow setting fallbacks and
# output charset. All other instance data is shared with the
# cached object.
t = copy.copy(t)
if codeset:
t.set_output_charset(codeset)
if result is None:
result = t
else:
result.add_fallback(t)
return result
def install(domain, localedir=None, codeset=None, names=None):
t = translation(domain, localedir, fallback=True, codeset=codeset)
t.install(names)
# a mapping b/w domains and locale directories
_localedirs = {}
# a mapping b/w domains and codesets
_localecodesets = {}
# current global domain, `messages' used for compatibility w/ GNU gettext
_current_domain = 'messages'
def textdomain(domain=None):
global _current_domain
if domain is not None:
_current_domain = domain
return _current_domain
def bindtextdomain(domain, localedir=None):
global _localedirs
if localedir is not None:
_localedirs[domain] = localedir
return _localedirs.get(domain, _default_localedir)
def bind_textdomain_codeset(domain, codeset=None):
global _localecodesets
if codeset is not None:
_localecodesets[domain] = codeset
return _localecodesets.get(domain)
def dgettext(domain, message):
try:
t = translation(domain, _localedirs.get(domain, None),
codeset=_localecodesets.get(domain))
except OSError:
return message
return t.gettext(message)
def ldgettext(domain, message):
try:
t = translation(domain, _localedirs.get(domain, None),
codeset=_localecodesets.get(domain))
except OSError:
return message
return t.lgettext(message)
def dngettext(domain, msgid1, msgid2, n):
try:
t = translation(domain, _localedirs.get(domain, None),
codeset=_localecodesets.get(domain))
except OSError:
if n == 1:
return msgid1
else:
return msgid2
return t.ngettext(msgid1, msgid2, n)
def ldngettext(domain, msgid1, msgid2, n):
try:
t = translation(domain, _localedirs.get(domain, None),
codeset=_localecodesets.get(domain))
except OSError:
if n == 1:
return msgid1
else:
return msgid2
return t.lngettext(msgid1, msgid2, n)
def gettext(message):
return dgettext(_current_domain, message)
def lgettext(message):
return ldgettext(_current_domain, message)
def ngettext(msgid1, msgid2, n):
return dngettext(_current_domain, msgid1, msgid2, n)
def lngettext(msgid1, msgid2, n):
return ldngettext(_current_domain, msgid1, msgid2, n)
# dcgettext() has been deemed unnecessary and is not implemented.
# James Henstridge's Catalog constructor from GNOME gettext. Documented usage
# was:
#
# import gettext
# cat = gettext.Catalog(PACKAGE, localedir=LOCALEDIR)
# _ = cat.gettext
# print _('Hello World')
# The resulting catalog object currently don't support access through a
# dictionary API, which was supported (but apparently unused) in GNOME
# gettext.
Catalog = translation
| lgpl-3.0 |
fernandezcuesta/ansible | test/units/modules/network/netscaler/test_netscaler_lb_vserver.py | 7 | 33531 |
# Copyright (c) 2017 Citrix Systems
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from ansible.compat.tests.mock import patch, Mock, MagicMock, call
from .netscaler_module import TestModule, nitro_base_patcher, set_module_args
import sys
if sys.version_info[:2] != (2, 6):
import requests
class TestNetscalerLBVServerModule(TestModule):
@classmethod
def setUpClass(cls):
class MockException(Exception):
pass
cls.MockException = MockException
m = MagicMock()
cls.server_mock = MagicMock()
cls.server_mock.__class__ = MagicMock(add=Mock())
nssrc_modules_mock = {
'nssrc.com.citrix.netscaler.nitro.resource.config.lb': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver.lbvserver': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver_service_binding.lbvserver_service_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver_servicegroup_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.lb.lbvserver_servicegroup_binding.lbvserver_servicegroup_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.ssl': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.ssl.sslvserver_sslcertkey_binding': m,
'nssrc.com.citrix.netscaler.nitro.resource.config.ssl.sslvserver_sslcertkey_binding.sslvserver_sslcertkey_binding': m,
}
cls.nitro_specific_patcher = patch.dict(sys.modules, nssrc_modules_mock)
cls.nitro_base_patcher = nitro_base_patcher
@classmethod
def tearDownClass(cls):
cls.nitro_base_patcher.stop()
cls.nitro_specific_patcher.stop()
def setUp(self):
self.nitro_base_patcher.start()
self.nitro_specific_patcher.start()
# Setup minimal required arguments to pass AnsibleModule argument parsing
def tearDown(self):
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
def test_graceful_nitro_api_import_error(self):
# Stop nitro api patching to cause ImportError
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
self.nitro_base_patcher.stop()
self.nitro_specific_patcher.stop()
from ansible.modules.network.netscaler import netscaler_lb_vserver
self.module = netscaler_lb_vserver
result = self.failed()
self.assertEqual(result['msg'], 'Could not load nitro python sdk')
def test_graceful_nitro_error_on_login(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
class MockException(Exception):
def __init__(self, *args, **kwargs):
self.errorcode = 0
self.message = ''
client_mock = Mock()
client_mock.login = Mock(side_effect=MockException)
m = Mock(return_value=client_mock)
with patch('ansible.modules.network.netscaler.netscaler_lb_vserver.get_nitro_client', m):
with patch('ansible.modules.network.netscaler.netscaler_lb_vserver.nitro_exception', MockException):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertTrue(result['msg'].startswith('nitro exception'), msg='nitro exception during login not handled properly')
def test_graceful_no_connection_error(self):
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.ConnectionError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=m,
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertTrue(result['msg'].startswith('Connection error'), msg='Connection error was not handled gracefully')
def test_graceful_login_error(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
if sys.version_info[:2] == (2, 6):
self.skipTest('requests library not available under python2.6')
client_mock = Mock()
attrs = {'login.side_effect': requests.exceptions.SSLError}
client_mock.configure_mock(**attrs)
m = Mock(return_value=client_mock)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=m,
nitro_exception=self.MockException,
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertTrue(result['msg'].startswith('SSL Error'), msg='SSL Error was not handled gracefully')
def test_save_config_called_on_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
client_mock = Mock()
m = Mock(return_value=client_mock)
lb_vserver_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=m,
lb_vserver_exists=Mock(side_effect=[False, True]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_lb_vserver
self.exited()
self.assertIn(call.save_config(), client_mock.mock_calls)
def test_save_config_called_on_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
client_mock = Mock()
m = Mock(return_value=client_mock)
lb_vserver_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=m,
lb_vserver_exists=Mock(side_effect=[True, False]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_lb_vserver
self.exited()
self.assertIn(call.save_config(), client_mock.mock_calls)
def test_save_config_not_called_on_state_present(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
client_mock = Mock()
m = Mock(return_value=client_mock)
lb_vserver_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=m,
lb_vserver_exists=Mock(side_effect=[False, True]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_lb_vserver
self.exited()
self.assertNotIn(call.save_config(), client_mock.mock_calls)
def test_save_config_not_called_on_state_absent(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
client_mock = Mock()
m = Mock(return_value=client_mock)
lb_vserver_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=m,
lb_vserver_exists=Mock(side_effect=[True, False]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_lb_vserver
self.exited()
self.assertNotIn(call.save_config(), client_mock.mock_calls)
def test_ensure_feature_is_enabled_called(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
client_mock = Mock()
lb_vserver_proxy_mock = Mock()
feature_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[True, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=feature_mock,
do_state_change=Mock(return_value=Mock(errorcode=0)),
):
self.module = netscaler_lb_vserver
self.exited()
feature_mock.assert_called_with(client_mock, 'LB')
def test_ensure_feature_is_enabled_nitro_exception_caught(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
client_mock = Mock()
lb_vserver_proxy_mock = Mock()
errorcode = 10
message = 'mock error'
class MockException(Exception):
def __init__(self):
self.errorcode = errorcode
self.message = message
feature_mock = Mock(side_effect=MockException)
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[True, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=feature_mock,
nitro_exception=MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
expected_msg = 'nitro exception errorcode=%s, message=%s' % (errorcode, message)
self.assertEqual(result['msg'], expected_msg, 'Failed to handle nitro exception')
def test_create_new_lb_vserver_workflow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=Mock()),
lb_vserver_exists=Mock(side_effect=[False, True]),
lb_vserver_identical=Mock(side_effect=[True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
do_state_change=Mock(return_value=Mock(errorcode=0)),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
):
self.module = netscaler_lb_vserver
result = self.exited()
lb_vserver_proxy_mock.assert_has_calls([call.add()])
self.assertTrue(result['changed'])
def test_update_lb_vserver_workflow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=Mock()),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=Mock(return_value=[]),
):
self.module = netscaler_lb_vserver
result = self.exited()
lb_vserver_proxy_mock.assert_has_calls([call.update()])
self.assertTrue(result['changed'])
def test_service_bindings_handling(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
configured_dict = {
'first': Mock(),
'second': Mock(has_equal_attributes=Mock(return_value=False)),
}
actual_dict = {
'second': Mock(),
'third': Mock(),
}
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[False, True]),
get_configured_service_bindings=Mock(return_value=configured_dict),
get_actual_service_bindings=Mock(return_value=actual_dict),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
):
self.module = netscaler_lb_vserver
result = self.exited()
configured_dict['first'].assert_has_calls([call.add()])
configured_dict['second'].assert_has_calls([call.has_equal_attributes(actual_dict['second']), call.add()])
actual_dict['second'].assert_has_calls([call.delete(client_mock, actual_dict['second'])])
actual_dict['third'].assert_has_calls([call.delete(client_mock, actual_dict['third'])])
self.assertTrue(result['changed'])
def test_servicegroup_bindings_handling(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
save_config=False,
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
configured_dict = {
'first': Mock(),
'second': Mock(has_equal_attributes=Mock(return_value=False)),
}
actual_dict = {
'second': Mock(),
'third': Mock(),
}
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[False, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
get_configured_servicegroup_bindings=Mock(return_value=configured_dict),
get_actual_servicegroup_bindings=Mock(return_value=actual_dict),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
):
self.module = netscaler_lb_vserver
result = self.exited()
configured_dict['first'].assert_has_calls([call.add()])
configured_dict['second'].assert_has_calls([call.has_equal_attributes(actual_dict['second']), call.add()])
actual_dict['second'].assert_has_calls([call.delete(client_mock, actual_dict['second'])])
actual_dict['third'].assert_has_calls([call.delete(client_mock, actual_dict['third'])])
self.assertTrue(result['changed'])
def test_ssl_bindings_handling(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
save_config=False,
servicetype='SSL',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
ssl_sync_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
ssl_certkey_bindings_identical=Mock(side_effect=[False, True]),
ssl_certkey_bindings_sync=ssl_sync_mock,
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.exited()
self.assertTrue(len(ssl_sync_mock.mock_calls) > 0, msg='ssl cert_key bindings not called')
self.assertTrue(result['changed'])
def test_ssl_bindings_not_called_for_non_ssl_service(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
save_config=False,
servicetype='HTTP',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
ssl_sync_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
ssl_certkey_bindings_identical=Mock(side_effect=[False, True]),
ssl_certkey_bindings_sync=ssl_sync_mock,
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
):
self.module = netscaler_lb_vserver
result = self.exited()
ssl_sync_mock.assert_not_called()
self.assertTrue(result['changed'])
def test_server_exists_sanity_check(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
ssl_sync_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[False, False]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
ssl_certkey_bindings_identical=Mock(side_effect=[False, True]),
ssl_certkey_bindings_sync=ssl_sync_mock,
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertEqual(result['msg'], 'Did not create lb vserver')
def test_server_identical_sanity_check(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
ssl_sync_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, False]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[True, True]),
ssl_certkey_bindings_identical=Mock(side_effect=[False, True]),
ssl_certkey_bindings_sync=ssl_sync_mock,
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertEqual(result['msg'], 'lb vserver is not configured correctly')
def test_service_bindings_sanity_check(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[True, True]),
service_bindings_identical=Mock(side_effect=[False, False]),
ssl_certkey_bindings_identical=Mock(side_effect=[False, False]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertEqual(result['msg'], 'service bindings are not identical')
def test_servicegroup_bindings_sanity_check(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[False, False]),
service_bindings_identical=Mock(side_effect=[True, True]),
ssl_certkey_bindings_identical=Mock(side_effect=[False, False]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertEqual(result['msg'], 'servicegroup bindings are not identical')
def test_server_servicegroup_bindings_sanity_check(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False, True]),
servicegroup_bindings_identical=Mock(side_effect=[False, False]),
service_bindings_identical=Mock(side_effect=[True, True]),
ssl_certkey_bindings_identical=Mock(side_effect=[False, False]),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=(Mock(return_value=[])),
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertEqual(result['msg'], 'servicegroup bindings are not identical')
def test_absent_state_workflow(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
lb_vserver_exists=Mock(side_effect=[True, False]),
):
self.module = netscaler_lb_vserver
result = self.exited()
lb_vserver_proxy_mock.assert_has_calls([call.delete()])
self.assertTrue(result['changed'])
def test_absent_state_sanity_check(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='absent',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
lb_vserver_exists=Mock(side_effect=[True, True]),
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
lb_vserver_proxy_mock.assert_has_calls([call.delete()])
self.assertEqual(result['msg'], 'lb vserver still exists')
def test_disabled_state_change_called(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
do_state_change_mock = Mock(return_value=Mock(errorcode=0))
client_mock = Mock()
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(return_value=True),
lb_vserver_exists=Mock(side_effect=[True, True]),
do_state_change=do_state_change_mock,
):
self.module = netscaler_lb_vserver
self.exited()
self.assertTrue(len(do_state_change_mock.mock_calls) > 0, msg='Did not call state change')
def test_get_immutables_failure(self):
set_module_args(dict(
nitro_user='user',
nitro_pass='pass',
nsip='1.1.1.1',
state='present',
))
from ansible.modules.network.netscaler import netscaler_lb_vserver
lb_vserver_proxy_mock = Mock()
client_mock = Mock()
m = Mock(return_value=['some'])
with patch.multiple(
'ansible.modules.network.netscaler.netscaler_lb_vserver',
get_nitro_client=Mock(return_value=client_mock),
ConfigProxy=Mock(return_value=lb_vserver_proxy_mock),
ensure_feature_is_enabled=Mock(),
lb_vserver_exists=Mock(side_effect=[True, True]),
lb_vserver_identical=Mock(side_effect=[False]),
do_state_change=Mock(return_value=Mock(errorcode=0)),
get_immutables_intersection=m,
nitro_exception=self.MockException,
):
self.module = netscaler_lb_vserver
result = self.failed()
self.assertTrue(
result['msg'].startswith('Cannot update immutable attributes'),
msg='Did not handle immutables error correctly',
)
| gpl-3.0 |
fmacias64/keras | tests/auto/test_graph_model.py | 22 | 10705 | from __future__ import print_function
import unittest
import numpy as np
np.random.seed(1337)
from keras.models import Graph, Sequential
from keras.layers import containers
from keras.layers.core import Dense, Activation
from keras.utils.test_utils import get_test_data
X = np.random.random((100, 32))
X2 = np.random.random((100, 32))
y = np.random.random((100, 4))
y2 = np.random.random((100,))
(X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(32,),
classification=False, output_shape=(4,))
(X2_train, y2_train), (X2_test, y2_test) = get_test_data(nb_train=1000, nb_test=200, input_shape=(32,),
classification=False, output_shape=(1,))
class TestGraph(unittest.TestCase):
def test_1o_1i(self):
print('test a non-sequential graph with 1 input and 1 output')
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(32, 4), name='dense2', input='input1')
graph.add_node(Dense(16, 4), name='dense3', input='dense1')
graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum')
graph.compile('rmsprop', {'output1': 'mse'})
history = graph.fit({'input1': X_train, 'output1': y_train}, nb_epoch=10)
out = graph.predict({'input1': X_test})
assert(type(out == dict))
assert(len(out) == 1)
loss = graph.test_on_batch({'input1': X_test, 'output1': y_test})
loss = graph.train_on_batch({'input1': X_test, 'output1': y_test})
loss = graph.evaluate({'input1': X_test, 'output1': y_test})
print(loss)
assert(loss < 2.5)
def test_1o_1i_2(self):
print('test a more complex non-sequential graph with 1 input and 1 output')
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(32, 4), name='dense2-0', input='input1')
graph.add_node(Activation('relu'), name='dense2', input='dense2-0')
graph.add_node(Dense(4, 16), name='dense3', input='dense2')
graph.add_node(Dense(16, 4), name='dense4', inputs=['dense1', 'dense3'], merge_mode='sum')
graph.add_output(name='output1', inputs=['dense2', 'dense4'], merge_mode='sum')
graph.compile('rmsprop', {'output1': 'mse'})
history = graph.fit({'input1': X_train, 'output1': y_train}, nb_epoch=10)
out = graph.predict({'input1': X_train})
assert(type(out == dict))
assert(len(out) == 1)
loss = graph.test_on_batch({'input1': X_test, 'output1': y_test})
loss = graph.train_on_batch({'input1': X_test, 'output1': y_test})
loss = graph.evaluate({'input1': X_test, 'output1': y_test})
print(loss)
assert(loss < 2.5)
graph.get_config(verbose=1)
def test_1o_2i(self):
print('test a non-sequential graph with 2 inputs and 1 output')
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_input(name='input2', ndim=2)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(32, 4), name='dense2', input='input2')
graph.add_node(Dense(16, 4), name='dense3', input='dense1')
graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum')
graph.compile('rmsprop', {'output1': 'mse'})
history = graph.fit({'input1': X_train, 'input2': X2_train, 'output1': y_train}, nb_epoch=10)
out = graph.predict({'input1': X_test, 'input2': X2_test})
assert(type(out == dict))
assert(len(out) == 1)
loss = graph.test_on_batch({'input1': X_test, 'input2': X2_test, 'output1': y_test})
loss = graph.train_on_batch({'input1': X_test, 'input2': X2_test, 'output1': y_test})
loss = graph.evaluate({'input1': X_test, 'input2': X2_test, 'output1': y_test})
print(loss)
assert(loss < 3.0)
graph.get_config(verbose=1)
def test_2o_1i_weights(self):
print('test a non-sequential graph with 1 input and 2 outputs')
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(32, 4), name='dense2', input='input1')
graph.add_node(Dense(16, 1), name='dense3', input='dense1')
graph.add_output(name='output1', input='dense2')
graph.add_output(name='output2', input='dense3')
graph.compile('rmsprop', {'output1': 'mse', 'output2': 'mse'})
history = graph.fit({'input1': X_train, 'output1': y_train, 'output2': y2_train}, nb_epoch=10)
out = graph.predict({'input1': X_test})
assert(type(out == dict))
assert(len(out) == 2)
loss = graph.test_on_batch({'input1': X_test, 'output1': y_test, 'output2': y2_test})
loss = graph.train_on_batch({'input1': X_test, 'output1': y_test, 'output2': y2_test})
loss = graph.evaluate({'input1': X_test, 'output1': y_test, 'output2': y2_test})
print(loss)
assert(loss < 4.)
print('test weight saving')
graph.save_weights('temp.h5', overwrite=True)
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(32, 4), name='dense2', input='input1')
graph.add_node(Dense(16, 1), name='dense3', input='dense1')
graph.add_output(name='output1', input='dense2')
graph.add_output(name='output2', input='dense3')
graph.compile('rmsprop', {'output1': 'mse', 'output2': 'mse'})
graph.load_weights('temp.h5')
nloss = graph.evaluate({'input1': X_test, 'output1': y_test, 'output2': y2_test})
print(nloss)
assert(loss == nloss)
def test_2o_1i_sample_weights(self):
print('test a non-sequential graph with 1 input and 2 outputs with sample weights')
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(32, 4), name='dense2', input='input1')
graph.add_node(Dense(16, 1), name='dense3', input='dense1')
graph.add_output(name='output1', input='dense2')
graph.add_output(name='output2', input='dense3')
weights1 = np.random.uniform(size=y_train.shape[0])
weights2 = np.random.uniform(size=y2_train.shape[0])
weights1_test = np.random.uniform(size=y_test.shape[0])
weights2_test = np.random.uniform(size=y2_test.shape[0])
graph.compile('rmsprop', {'output1': 'mse', 'output2': 'mse'})
history = graph.fit({'input1': X_train, 'output1': y_train, 'output2': y2_train}, nb_epoch=10,
sample_weight={'output1': weights1, 'output2': weights2})
out = graph.predict({'input1': X_test})
assert(type(out == dict))
assert(len(out) == 2)
loss = graph.test_on_batch({'input1': X_test, 'output1': y_test, 'output2': y2_test},
sample_weight={'output1': weights1_test, 'output2': weights2_test})
loss = graph.train_on_batch({'input1': X_train, 'output1': y_train, 'output2': y2_train},
sample_weight={'output1': weights1, 'output2': weights2})
loss = graph.evaluate({'input1': X_train, 'output1': y_train, 'output2': y2_train},
sample_weight={'output1': weights1, 'output2': weights2})
print(loss)
def test_recursive(self):
print('test layer-like API')
graph = containers.Graph()
graph.add_input(name='input1', ndim=2)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(32, 4), name='dense2', input='input1')
graph.add_node(Dense(16, 4), name='dense3', input='dense1')
graph.add_output(name='output1', inputs=['dense2', 'dense3'], merge_mode='sum')
seq = Sequential()
seq.add(Dense(32, 32, name='first_seq_dense'))
seq.add(graph)
seq.add(Dense(4, 4, name='last_seq_dense'))
seq.compile('rmsprop', 'mse')
history = seq.fit(X_train, y_train, batch_size=10, nb_epoch=10)
loss = seq.evaluate(X_test, y_test)
print(loss)
assert(loss < 2.5)
loss = seq.evaluate(X_test, y_test, show_accuracy=True)
pred = seq.predict(X_test)
seq.get_config(verbose=1)
def test_create_output(self):
print('test create_output argument')
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_node(Dense(32, 16), name='dense1', input='input1')
graph.add_node(Dense(32, 4), name='dense2', input='input1')
graph.add_node(Dense(16, 4), name='dense3', input='dense1')
graph.add_node(Dense(4, 4), name='output1', inputs=['dense2', 'dense3'], merge_mode='sum', create_output=True)
graph.compile('rmsprop', {'output1': 'mse'})
history = graph.fit({'input1': X_train, 'output1': y_train}, nb_epoch=10)
out = graph.predict({'input1': X_test})
assert(type(out == dict))
assert(len(out) == 1)
loss = graph.test_on_batch({'input1': X_test, 'output1': y_test})
loss = graph.train_on_batch({'input1': X_test, 'output1': y_test})
loss = graph.evaluate({'input1': X_test, 'output1': y_test})
print(loss)
assert(loss < 2.5)
def test_count_params(self):
print('test count params')
nb_units = 100
nb_classes = 2
graph = Graph()
graph.add_input(name='input1', ndim=2)
graph.add_input(name='input2', ndim=2)
graph.add_node(Dense(nb_units, nb_units),
name='dense1', input='input1')
graph.add_node(Dense(nb_units, nb_classes),
name='dense2', input='input2')
graph.add_node(Dense(nb_units, nb_classes),
name='dense3', input='dense1')
graph.add_output(name='output', inputs=['dense2', 'dense3'],
merge_mode='sum')
n = nb_units * nb_units + nb_units
n += nb_units * nb_classes + nb_classes
n += nb_units * nb_classes + nb_classes
self.assertEqual(n, graph.count_params())
graph.compile('rmsprop', {'output': 'binary_crossentropy'})
self.assertEqual(n, graph.count_params())
if __name__ == '__main__':
print('Test graph model')
unittest.main()
| mit |
pombredanne/rekall | rekall-core/rekall/addrspace_test.py | 3 | 2220 | from rekall import addrspace
from rekall import obj
from rekall import testlib
from rekall import session
class CustomRunsAddressSpace(addrspace.RunBasedAddressSpace):
def __init__(self, runs=None, data=None, **kwargs):
super(CustomRunsAddressSpace, self).__init__(**kwargs)
self.base = addrspace.BufferAddressSpace(data=data,
session=self.session)
for i in runs:
self.runs.insert(i)
class RunBasedTest(testlib.RekallBaseUnitTestCase):
"""Test the RunBasedAddressSpace implementation."""
def setUp(self):
self.session = session.Session()
self.contiguous_as = CustomRunsAddressSpace(session=self.session,
runs = [(1000, 0, 1), (1001, 1, 9)],
data="0123456789")
self.discontiguous_as = CustomRunsAddressSpace(session=self.session,
runs=[(1000, 0, 1), (1020, 1, 9)],
data="0123456789")
def testDiscontiguousRunsRead(self):
# Read from an address without data
self.assertEqual(self.discontiguous_as.read(0, 20),
"\x00" * 20)
# Read spanning two runs
self.assertEqual(self.discontiguous_as.read(1000, 30),
"0" + "\x00"*19 + "123456789" + "\x00")
# Read in the middle of a run
self.assertEqual(self.discontiguous_as.read(1025, 10),
"6789" + "\x00" * 6)
# Read past the end
self.assertEqual(self.discontiguous_as.read(2000, 10),
"\x00" * 10)
def testContiguousRunsRead(self):
# Read from an address without data
self.assertEqual(self.contiguous_as.read(0, 20),
"\x00" * 20)
# Read spanning two runs
self.assertEqual(self.contiguous_as.read(1000, 30),
"0123456789" + "\x00"*20)
# Read in the middle of a run
self.assertEqual(self.contiguous_as.read(1005, 10),
"56789" + "\x00" * 5)
# Read past the end
self.assertEqual(self.contiguous_as.read(2000, 10),
"\x00" * 10)
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
tux-00/ansible | lib/ansible/modules/system/mount.py | 60 | 21299 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Red Hat, inc
# Written by Seth Vidal
# based on the mount modules from salt and puppet
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: mount
short_description: Control active and configured mount points
description:
- This module controls active and configured mount points in C(/etc/fstab).
author:
- Ansible Core Team
- Seth Vidal
version_added: "0.6"
options:
path:
description:
- Path to the mount point (e.g. C(/mnt/files)).
- Before 2.3 this option was only usable as I(dest), I(destfile) and
I(name).
required: true
aliases: [ name ]
src:
description:
- Device to be mounted on I(path). Required when I(state) set to
C(present) or C(mounted).
required: false
default: null
fstype:
description:
- Filesystem type. Required when I(state) is C(present) or C(mounted).
required: false
default: null
opts:
description:
- Mount options (see fstab(5), or vfstab(4) on Solaris).
required: false
default: null
dump:
description:
- Dump (see fstab(5)). Note that if set to C(null) and I(state) set to
C(present), it will cease to work and duplicate entries will be made
with subsequent runs.
- Has no effect on Solaris systems.
required: false
default: 0
passno:
description:
- Passno (see fstab(5)). Note that if set to C(null) and I(state) set to
C(present), it will cease to work and duplicate entries will be made
with subsequent runs.
- Deprecated on Solaris systems.
required: false
default: 0
state:
description:
- If C(mounted) or C(unmounted), the device will be actively mounted or
unmounted as needed and appropriately configured in I(fstab).
- C(absent) and C(present) only deal with I(fstab) but will not affect
current mounting.
- If specifying C(mounted) and the mount point is not present, the mount
point will be created.
- Similarly, specifying C(absent) will remove the mount point directory.
required: true
choices: ["present", "absent", "mounted", "unmounted"]
fstab:
description:
- File to use instead of C(/etc/fstab). You shouldn't use this option
unless you really know what you are doing. This might be useful if
you need to configure mountpoints in a chroot environment. OpenBSD
does not allow specifying alternate fstab files with mount so do not
use this on OpenBSD with any state that operates on the live
filesystem.
required: false
default: /etc/fstab (/etc/vfstab on Solaris)
boot:
version_added: 2.2
description:
- Determines if the filesystem should be mounted on boot.
- Only applies to Solaris systems.
required: false
default: yes
choices: ["yes", "no"]
notes:
- As of Ansible 2.3, the I(name) option has been changed to I(path) as
default, but I(name) still works as well.
'''
EXAMPLES = '''
# Before 2.3, option 'name' was used instead of 'path'
- name: Mount DVD read-only
mount:
path: /mnt/dvd
src: /dev/sr0
fstype: iso9660
opts: ro
state: present
- name: Mount up device by label
mount:
path: /srv/disk
src: LABEL=SOME_LABEL
fstype: ext4
state: present
- name: Mount up device by UUID
mount:
path: /home
src: UUID=b3e48f45-f933-4c8e-a700-22a159ec9077
fstype: xfs
opts: noatime
state: present
'''
import os
from ansible.module_utils.basic import AnsibleModule, get_platform
from ansible.module_utils.ismount import ismount
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
def write_fstab(lines, path):
fs_w = open(path, 'w')
for l in lines:
fs_w.write(l)
fs_w.flush()
fs_w.close()
def _escape_fstab(v):
"""Escape invalid characters in fstab fields.
space (040)
ampersand (046)
backslash (134)
"""
if isinstance(v, int):
return v
else:
return(
v.
replace('\\', '\\134').
replace(' ', '\\040').
replace('&', '\\046'))
def set_mount(module, args):
"""Set/change a mount point location in fstab."""
to_write = []
exists = False
changed = False
escaped_args = dict([(k, _escape_fstab(v)) for k, v in iteritems(args)])
new_line = '%(src)s %(name)s %(fstype)s %(opts)s %(dump)s %(passno)s\n'
if get_platform() == 'SunOS':
new_line = (
'%(src)s - %(name)s %(fstype)s %(passno)s %(boot)s %(opts)s\n')
for line in open(args['fstab'], 'r').readlines():
if not line.strip():
to_write.append(line)
continue
if line.strip().startswith('#'):
to_write.append(line)
continue
# Check if we got a valid line for splitting
if (
get_platform() == 'SunOS' and len(line.split()) != 7 or
get_platform() != 'SunOS' and len(line.split()) != 6):
to_write.append(line)
continue
ld = {}
if get_platform() == 'SunOS':
(
ld['src'],
dash,
ld['name'],
ld['fstype'],
ld['passno'],
ld['boot'],
ld['opts']
) = line.split()
else:
(
ld['src'],
ld['name'],
ld['fstype'],
ld['opts'],
ld['dump'],
ld['passno']
) = line.split()
# Check if we found the correct line
if ld['name'] != escaped_args['name']:
to_write.append(line)
continue
# If we got here we found a match - let's check if there is any
# difference
exists = True
args_to_check = ('src', 'fstype', 'opts', 'dump', 'passno')
if get_platform() == 'SunOS':
args_to_check = ('src', 'fstype', 'passno', 'boot', 'opts')
for t in args_to_check:
if ld[t] != escaped_args[t]:
ld[t] = escaped_args[t]
changed = True
if changed:
to_write.append(new_line % ld)
else:
to_write.append(line)
if not exists:
to_write.append(new_line % escaped_args)
changed = True
if changed and not module.check_mode:
write_fstab(to_write, args['fstab'])
return (args['name'], changed)
def unset_mount(module, args):
"""Remove a mount point from fstab."""
to_write = []
changed = False
escaped_name = _escape_fstab(args['name'])
for line in open(args['fstab'], 'r').readlines():
if not line.strip():
to_write.append(line)
continue
if line.strip().startswith('#'):
to_write.append(line)
continue
# Check if we got a valid line for splitting
if (
get_platform() == 'SunOS' and len(line.split()) != 7 or
get_platform() != 'SunOS' and len(line.split()) != 6):
to_write.append(line)
continue
ld = {}
if get_platform() == 'SunOS':
(
ld['src'],
dash,
ld['name'],
ld['fstype'],
ld['passno'],
ld['boot'],
ld['opts']
) = line.split()
else:
(
ld['src'],
ld['name'],
ld['fstype'],
ld['opts'],
ld['dump'],
ld['passno']
) = line.split()
if ld['name'] != escaped_name:
to_write.append(line)
continue
# If we got here we found a match - continue and mark changed
changed = True
if changed and not module.check_mode:
write_fstab(to_write, args['fstab'])
return (args['name'], changed)
def _set_fstab_args(fstab_file):
result = []
if fstab_file and fstab_file != '/etc/fstab':
if get_platform().lower().endswith('bsd'):
result.append('-F')
else:
result.append('-T')
result.append(fstab_file)
return result
def mount(module, args):
"""Mount up a path or remount if needed."""
mount_bin = module.get_bin_path('mount', required=True)
name = args['name']
cmd = [mount_bin]
if get_platform().lower() == 'openbsd':
# Use module.params['fstab'] here as args['fstab'] has been set to the
# default value.
if module.params['fstab'] is not None:
module.fail_json(
msg=(
'OpenBSD does not support alternate fstab files. Do not '
'specify the fstab parameter for OpenBSD hosts'))
else:
cmd += _set_fstab_args(args['fstab'])
cmd += [name]
rc, out, err = module.run_command(cmd)
if rc == 0:
return 0, ''
else:
return rc, out+err
def umount(module, path):
"""Unmount a path."""
umount_bin = module.get_bin_path('umount', required=True)
cmd = [umount_bin, path]
rc, out, err = module.run_command(cmd)
if rc == 0:
return 0, ''
else:
return rc, out+err
def remount(module, args):
"""Try to use 'remount' first and fallback to (u)mount if unsupported."""
mount_bin = module.get_bin_path('mount', required=True)
cmd = [mount_bin]
# Multiplatform remount opts
if get_platform().lower().endswith('bsd'):
cmd += ['-u']
else:
cmd += ['-o', 'remount']
if get_platform().lower() == 'openbsd':
# Use module.params['fstab'] here as args['fstab'] has been set to the
# default value.
if module.params['fstab'] is not None:
module.fail_json(
msg=(
'OpenBSD does not support alternate fstab files. Do not '
'specify the fstab parameter for OpenBSD hosts'))
else:
cmd += _set_fstab_args(args['fstab'])
cmd += [args['name']]
out = err = ''
try:
if get_platform().lower().endswith('bsd'):
# Note: Forcing BSDs to do umount/mount due to BSD remount not
# working as expected (suspect bug in the BSD mount command)
# Interested contributor could rework this to use mount options on
# the CLI instead of relying on fstab
# https://github.com/ansible/ansible-modules-core/issues/5591
rc = 1
else:
rc, out, err = module.run_command(cmd)
except:
rc = 1
msg = ''
if rc != 0:
msg = out + err
rc, msg = umount(module, args['name'])
if rc == 0:
rc, msg = mount(module, args)
return rc, msg
# Note if we wanted to put this into module_utils we'd have to get permission
# from @jupeter -- https://github.com/ansible/ansible-modules-core/pull/2923
# @jtyr -- https://github.com/ansible/ansible-modules-core/issues/4439
# and @abadger to relicense from GPLv3+
def is_bind_mounted(module, linux_mounts, dest, src=None, fstype=None):
"""Return whether the dest is bind mounted
:arg module: The AnsibleModule (used for helper functions)
:arg dest: The directory to be mounted under. This is the primary means
of identifying whether the destination is mounted.
:kwarg src: The source directory. If specified, this is used to help
ensure that we are detecting that the correct source is mounted there.
:kwarg fstype: The filesystem type. If specified this is also used to
help ensure that we are detecting the right mount.
:kwarg linux_mounts: Cached list of mounts for Linux.
:returns: True if the dest is mounted with src otherwise False.
"""
is_mounted = False
if get_platform() == 'Linux' and linux_mounts is not None:
if src is None:
# That's for unmounted/absent
for m in linux_mounts:
if m['dst'] == dest:
is_mounted = True
else:
mounted_src = None
for m in linux_mounts:
if m['dst'] == dest:
mounted_src = m['src']
# That's for mounted
if mounted_src is not None and mounted_src == src:
is_mounted = True
else:
bin_path = module.get_bin_path('mount', required=True)
cmd = '%s -l' % bin_path
rc, out, err = module.run_command(cmd)
mounts = []
if len(out):
mounts = to_native(out).strip().split('\n')
for mnt in mounts:
arguments = mnt.split()
if (
(arguments[0] == src or src is None) and
arguments[2] == dest and
(arguments[4] == fstype or fstype is None)):
is_mounted = True
if is_mounted:
break
return is_mounted
def get_linux_mounts(module):
"""Gather mount information"""
mntinfo_file = "/proc/self/mountinfo"
try:
f = open(mntinfo_file)
except IOError:
return
lines = map(str.strip, f.readlines())
try:
f.close()
except IOError:
module.fail_json(msg="Cannot close file %s" % mntinfo_file)
mntinfo = []
for line in lines:
fields = line.split()
record = {
'id': int(fields[0]),
'parent_id': int(fields[1]),
'root': fields[3],
'dst': fields[4],
'opts': fields[5],
'fs': fields[-3],
'src': fields[-2]
}
mntinfo.append(record)
mounts = []
for mnt in mntinfo:
src = mnt['src']
if mnt['parent_id'] != 1:
# Find parent
for m in mntinfo:
if mnt['parent_id'] == m['id']:
if (
len(m['root']) > 1 and
mnt['root'].startswith("%s/" % m['root'])):
# Ommit the parent's root in the child's root
# == Example:
# 204 136 253:2 /rootfs / rw - ext4 /dev/sdb2 rw
# 141 140 253:2 /rootfs/tmp/aaa /tmp/bbb rw - ext4 /dev/sdb2 rw
# == Expected result:
# src=/tmp/aaa
mnt['root'] = mnt['root'][len(m['root']) + 1:]
# Prepend the parent's dst to the child's root
# == Example:
# 42 60 0:35 / /tmp rw - tmpfs tmpfs rw
# 78 42 0:35 /aaa /tmp/bbb rw - tmpfs tmpfs rw
# == Expected result:
# src=/tmp/aaa
if m['dst'] != '/':
mnt['root'] = "%s%s" % (m['dst'], mnt['root'])
src = mnt['root']
break
record = {
'dst': mnt['dst'],
'src': src,
'opts': mnt['opts'],
'fs': mnt['fs']
}
mounts.append(record)
return mounts
def main():
module = AnsibleModule(
argument_spec=dict(
boot=dict(default='yes', choices=['yes', 'no']),
dump=dict(),
fstab=dict(default=None),
fstype=dict(),
path=dict(required=True, aliases=['name'], type='path'),
opts=dict(),
passno=dict(type='str'),
src=dict(type='path'),
state=dict(
required=True,
choices=['present', 'absent', 'mounted', 'unmounted']),
),
supports_check_mode=True,
required_if=(
['state', 'mounted', ['src', 'fstype']],
['state', 'present', ['src', 'fstype']]
)
)
# solaris args:
# name, src, fstype, opts, boot, passno, state, fstab=/etc/vfstab
# linux args:
# name, src, fstype, opts, dump, passno, state, fstab=/etc/fstab
# Note: Do not modify module.params['fstab'] as we need to know if the user
# explicitly specified it in mount() and remount()
if get_platform().lower() == 'sunos':
args = dict(
name=module.params['path'],
opts='-',
passno='-',
fstab=module.params['fstab'],
boot='yes'
)
if args['fstab'] is None:
args['fstab'] = '/etc/vfstab'
else:
args = dict(
name=module.params['path'],
opts='defaults',
dump='0',
passno='0',
fstab=module.params['fstab']
)
if args['fstab'] is None:
args['fstab'] = '/etc/fstab'
# FreeBSD doesn't have any 'default' so set 'rw' instead
if get_platform() == 'FreeBSD':
args['opts'] = 'rw'
linux_mounts = []
# Cache all mounts here in order we have consistent results if we need to
# call is_bind_mouted() multiple times
if get_platform() == 'Linux':
linux_mounts = get_linux_mounts(module)
if linux_mounts is None:
args['warnings'] = (
'Cannot open file /proc/self/mountinfo. '
'Bind mounts might be misinterpreted.')
# Override defaults with user specified params
for key in ('src', 'fstype', 'passno', 'opts', 'dump', 'fstab'):
if module.params[key] is not None:
args[key] = module.params[key]
# If fstab file does not exist, we first need to create it. This mainly
# happens when fstab option is passed to the module.
if not os.path.exists(args['fstab']):
if not os.path.exists(os.path.dirname(args['fstab'])):
os.makedirs(os.path.dirname(args['fstab']))
open(args['fstab'], 'a').close()
# absent:
# Remove from fstab and unmounted.
# unmounted:
# Do not change fstab state, but unmount.
# present:
# Add to fstab, do not change mount state.
# mounted:
# Add to fstab if not there and make sure it is mounted. If it has
# changed in fstab then remount it.
state = module.params['state']
name = module.params['path']
changed = False
if state == 'absent':
name, changed = unset_mount(module, args)
if changed and not module.check_mode:
if ismount(name) or is_bind_mounted(module, linux_mounts, name):
res, msg = umount(module, name)
if res:
module.fail_json(
msg="Error unmounting %s: %s" % (name, msg))
if os.path.exists(name):
try:
os.rmdir(name)
except (OSError, IOError):
e = get_exception()
module.fail_json(msg="Error rmdir %s: %s" % (name, str(e)))
elif state == 'unmounted':
if ismount(name) or is_bind_mounted(module, linux_mounts, name):
if not module.check_mode:
res, msg = umount(module, name)
if res:
module.fail_json(
msg="Error unmounting %s: %s" % (name, msg))
changed = True
elif state == 'mounted':
if not os.path.exists(name) and not module.check_mode:
try:
os.makedirs(name)
except (OSError, IOError):
e = get_exception()
module.fail_json(
msg="Error making dir %s: %s" % (name, str(e)))
name, changed = set_mount(module, args)
res = 0
if (
ismount(name) or
is_bind_mounted(
module, linux_mounts, name, args['src'], args['fstype'])):
if changed and not module.check_mode:
res, msg = remount(module, args)
changed = True
else:
changed = True
if not module.check_mode:
res, msg = mount(module, args)
if res:
module.fail_json(msg="Error mounting %s: %s" % (name, msg))
elif state == 'present':
name, changed = set_mount(module, args)
else:
module.fail_json(msg='Unexpected position reached')
module.exit_json(changed=changed, **args)
if __name__ == '__main__':
main()
| gpl-3.0 |
erikdejonge/youtube-dl | youtube_dl/extractor/hidive.py | 23 | 4113 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
ExtractorError,
int_or_none,
url_or_none,
urlencode_postdata,
)
class HiDiveIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?hidive\.com/stream/(?P<title>[^/]+)/(?P<key>[^/?#&]+)'
# Using X-Forwarded-For results in 403 HTTP error for HLS fragments,
# so disabling geo bypass completely
_GEO_BYPASS = False
_NETRC_MACHINE = 'hidive'
_LOGIN_URL = 'https://www.hidive.com/account/login'
_TESTS = [{
'url': 'https://www.hidive.com/stream/the-comic-artist-and-his-assistants/s01e001',
'info_dict': {
'id': 'the-comic-artist-and-his-assistants/s01e001',
'ext': 'mp4',
'title': 'the-comic-artist-and-his-assistants/s01e001',
'series': 'the-comic-artist-and-his-assistants',
'season_number': 1,
'episode_number': 1,
},
'params': {
'skip_download': True,
},
'skip': 'Requires Authentication',
}]
def _real_initialize(self):
email, password = self._get_login_info()
if email is None:
return
webpage = self._download_webpage(self._LOGIN_URL, None)
form = self._search_regex(
r'(?s)<form[^>]+action="/account/login"[^>]*>(.+?)</form>',
webpage, 'login form')
data = self._hidden_inputs(form)
data.update({
'Email': email,
'Password': password,
})
self._download_webpage(
self._LOGIN_URL, None, 'Logging in', data=urlencode_postdata(data))
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
title, key = mobj.group('title', 'key')
video_id = '%s/%s' % (title, key)
settings = self._download_json(
'https://www.hidive.com/play/settings', video_id,
data=urlencode_postdata({
'Title': title,
'Key': key,
'PlayerId': 'f4f895ce1ca713ba263b91caeb1daa2d08904783',
}))
restriction = settings.get('restrictionReason')
if restriction == 'RegionRestricted':
self.raise_geo_restricted()
if restriction and restriction != 'None':
raise ExtractorError(
'%s said: %s' % (self.IE_NAME, restriction), expected=True)
formats = []
subtitles = {}
for rendition_id, rendition in settings['renditions'].items():
bitrates = rendition.get('bitrates')
if not isinstance(bitrates, dict):
continue
m3u8_url = url_or_none(bitrates.get('hls'))
if not m3u8_url:
continue
formats.extend(self._extract_m3u8_formats(
m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native',
m3u8_id='%s-hls' % rendition_id, fatal=False))
cc_files = rendition.get('ccFiles')
if not isinstance(cc_files, list):
continue
for cc_file in cc_files:
if not isinstance(cc_file, list) or len(cc_file) < 3:
continue
cc_lang = cc_file[0]
cc_url = url_or_none(cc_file[2])
if not isinstance(cc_lang, compat_str) or not cc_url:
continue
subtitles.setdefault(cc_lang, []).append({
'url': cc_url,
})
self._sort_formats(formats)
season_number = int_or_none(self._search_regex(
r's(\d+)', key, 'season number', default=None))
episode_number = int_or_none(self._search_regex(
r'e(\d+)', key, 'episode number', default=None))
return {
'id': video_id,
'title': video_id,
'subtitles': subtitles,
'formats': formats,
'series': title,
'season_number': season_number,
'episode_number': episode_number,
}
| unlicense |
secondscoin/secondscoin | contrib/pyminer/pyminer.py | 385 | 6434 | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| mit |
stefanreuther/bob | test/test_input_recipeset.py | 1 | 41799 | # Bob build tool
# Copyright (C) 2016 Jan Klötzke
#
# SPDX-License-Identifier: GPL-3.0-or-later
from tempfile import NamedTemporaryFile, TemporaryDirectory
from unittest import TestCase
from unittest.mock import Mock
import os
import textwrap
import yaml
from bob import DEBUG
from bob.input import RecipeSet
from bob.errors import ParseError, BobError
DEBUG['ngd'] = True
def pruneBuiltin(env):
return { k : v for k,v in env.items() if not k.startswith("BOB_") }
class RecipesTmp:
def setUp(self):
self.cwd = os.getcwd()
self.tmpdir = TemporaryDirectory()
os.chdir(self.tmpdir.name)
os.mkdir("recipes")
os.mkdir("classes")
def tearDown(self):
self.tmpdir.cleanup()
os.chdir(self.cwd)
def writeRecipe(self, name, content, layer=[]):
path = os.path.join("",
*(os.path.join("layers", l) for l in layer),
"recipes")
if path: os.makedirs(path, exist_ok=True)
with open(os.path.join(path, name+".yaml"), "w") as f:
f.write(textwrap.dedent(content))
def writeClass(self, name, content, layer=[]):
path = os.path.join("",
*(os.path.join("layers", l) for l in layer),
"classes")
if path: os.makedirs(path, exist_ok=True)
with open(os.path.join(path, name+".yaml"), "w") as f:
f.write(textwrap.dedent(content))
def writeConfig(self, content, layer=[]):
path = os.path.join("", *(os.path.join("layers", l) for l in layer))
if path: os.makedirs(path, exist_ok=True)
with open(os.path.join(path, "config.yaml"), "w") as f:
f.write(yaml.dump(content))
def generate(self, sandboxEnabled=False):
recipes = RecipeSet()
recipes.parse()
return recipes.generatePackages(lambda x,y: "unused",
sandboxEnabled=sandboxEnabled)
class TestUserConfig(TestCase):
def setUp(self):
self.cwd = os.getcwd()
def tearDown(self):
os.chdir(self.cwd)
def testEmptyTree(self):
"""Test parsing an empty receipe tree"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
recipeSet = RecipeSet()
recipeSet.parse()
def testDefaultEmpty(self):
"""Test parsing an empty default.yaml"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
with open("default.yaml", "w") as f:
f.write(" ")
recipeSet = RecipeSet()
recipeSet.parse()
def testDefaultValidation(self):
"""Test that default.yaml is validated with a schema"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
with open("default.yaml", "w") as f:
f.write("wrongkey: foo\n")
recipeSet = RecipeSet()
self.assertRaises(ParseError, recipeSet.parse)
def testDefaultInclude(self):
"""Test parsing default.yaml including another file"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
with open("default.yaml", "w") as f:
f.write("include:\n")
f.write(" - user\n")
with open("user.yaml", "w") as f:
f.write("whitelist: [FOO]\n")
recipeSet = RecipeSet()
recipeSet.parse()
assert "FOO" in recipeSet.envWhiteList()
def testDefaultIncludeMissing(self):
"""Test that default.yaml can include missing files"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
with open("default.yaml", "w") as f:
f.write("include:\n")
f.write(" - user\n")
recipeSet = RecipeSet()
recipeSet.parse()
self.assertEqual(pruneBuiltin(recipeSet.defaultEnv()), {})
def testDefaultIncludeOverrides(self):
"""Test that included files override settings of default.yaml"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
with open("default.yaml", "w") as f:
f.write("include:\n")
f.write(" - user\n")
f.write("environment:\n")
f.write(" FOO: BAR\n")
f.write(" BAR: BAZ\n")
with open("user.yaml", "w") as f:
f.write("environment:\n")
f.write(" FOO: BAZ\n")
recipeSet = RecipeSet()
recipeSet.parse()
self.assertEqual(pruneBuiltin(recipeSet.defaultEnv()),
{ "FOO":"BAZ", "BAR":"BAZ" })
def testUserConfigMissing(self):
"""Test that missing user config fails parsing"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
recipeSet = RecipeSet()
recipeSet.setConfigFiles(["user"])
self.assertRaises(ParseError, recipeSet.parse)
def testUserConfigOverrides(self):
"""Test that user configs override default.yaml w/ includes"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
with open("default.yaml", "w") as f:
f.write("include:\n")
f.write(" - included\n")
f.write("environment:\n")
f.write(" FOO: BAR\n")
with open("included.yaml", "w") as f:
f.write("environment:\n")
f.write(" FOO: BAZ\n")
with open("user.yaml", "w") as f:
f.write("environment:\n")
f.write(" FOO: USER\n")
recipeSet = RecipeSet()
recipeSet.setConfigFiles(["user"])
recipeSet.parse()
self.assertEqual(pruneBuiltin(recipeSet.defaultEnv()),
{ "FOO":"USER"})
def testDefaultRequire(self):
"""Test parsing default.yaml requiring another file"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
with open("default.yaml", "w") as f:
f.write("require:\n")
f.write(" - user\n")
with open("user.yaml", "w") as f:
f.write("whitelist: [FOO]\n")
recipeSet = RecipeSet()
recipeSet.parse()
assert "FOO" in recipeSet.envWhiteList()
def testDefaultRequireMissing(self):
"""Test that default.yaml barfs on required missing files"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
with open("default.yaml", "w") as f:
f.write("require:\n")
f.write(" - user\n")
recipeSet = RecipeSet()
self.assertRaises(ParseError, recipeSet.parse)
def testDefaultRequireLowerPrecedence(self):
"""Test that 'require' has lower precedence than 'include'"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
with open("default.yaml", "w") as f:
f.write("include:\n")
f.write(" - higher\n")
f.write("require:\n")
f.write(" - lower\n")
f.write("environment:\n")
f.write(" FOO: default\n")
f.write(" BAR: default\n")
f.write(" BAZ: default\n")
with open("lower.yaml", "w") as f:
f.write("environment:\n")
f.write(" BAR: lower\n")
f.write(" BAZ: lower\n")
with open("higher.yaml", "w") as f:
f.write("environment:\n")
f.write(" BAZ: higher\n")
recipeSet = RecipeSet()
recipeSet.parse()
self.assertEqual(pruneBuiltin(recipeSet.defaultEnv()),
{'FOO' : 'default', 'BAR' : 'lower', 'BAZ' : 'higher' })
def testDefaultRelativeIncludes(self):
"""Test relative includes work"""
with TemporaryDirectory() as tmp:
os.chdir(tmp)
os.mkdir("recipes")
os.makedirs("some/sub/dirs")
os.makedirs("other/directories")
with open("config.yaml", "w") as f:
f.write("policies:\n")
f.write(" relativeIncludes: True\n")
with open("default.yaml", "w") as f:
f.write("include:\n")
f.write(" - some/first\n")
f.write("require:\n")
f.write(" - other/second\n")
f.write("environment:\n")
f.write(" FOO: default\n")
f.write(" BAR: default\n")
f.write(" BAZ: default\n")
with open("other/second.yaml", "w") as f:
f.write('require: ["directories/lower"]')
with open("other/directories/lower.yaml", "w") as f:
f.write("environment:\n")
f.write(" BAR: lower\n")
f.write(" BAZ: lower\n")
with open("some/first.yaml", "w") as f:
f.write('include: ["sub/dirs/higher"]')
with open("some/sub/dirs/higher.yaml", "w") as f:
f.write("environment:\n")
f.write(" BAZ: higher\n")
recipeSet = RecipeSet()
recipeSet.parse()
self.assertEqual(pruneBuiltin(recipeSet.defaultEnv()),
{'FOO' : 'default', 'BAR' : 'lower', 'BAZ' : 'higher' })
class TestDependencies(RecipesTmp, TestCase):
def testDuplicateRemoval(self):
"""Test that provided dependencies do not replace real dependencies"""
self.writeRecipe("root", """\
root: True
depends: [a, b]
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("a", """\
depends: [b]
provideDeps: [b]
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("b", """\
buildScript: "true"
packageScript: "true"
""")
recipes = RecipeSet()
recipes.parse()
packages = recipes.generatePackages(lambda x,y: "unused")
# make sure "b" is addressable
p = packages.walkPackagePath("root/b")
self.assertEqual(p.getName(), "b")
def testIncompatible(self):
"""Incompatible provided dependencies must raise an error"""
self.writeRecipe("root", """\
root: True
depends: [a, b]
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("a", """\
depends:
-
name: c
environment: { FOO: A }
provideDeps: [c]
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("b", """\
depends:
-
name: c
environment: { FOO: B }
provideDeps: [c]
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("c", """\
buildVars: [FOO]
buildScript: "true"
packageScript: "true"
""")
recipes = RecipeSet()
recipes.parse()
packages = recipes.generatePackages(lambda x,y: "unused")
self.assertRaises(ParseError, packages.getRootPackage)
def testCyclic(self):
"""Cyclic dependencies must be detected during parsing"""
self.writeRecipe("a", """\
root: True
depends: [b]
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("b", """\
depends: [c]
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("c", """\
depends: [a]
buildScript: "true"
packageScript: "true"
""")
recipes = RecipeSet()
recipes.parse()
packages = recipes.generatePackages(lambda x,y: "unused")
self.assertRaises(ParseError, packages.getRootPackage)
def testCyclicSpecial(self):
"""Make sure cycles are detected on common sub-trees too"""
self.writeRecipe("root1", """\
root: True
depends: [b]
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("root2", """\
root: True
depends:
- name: b
if: "${TERMINATE:-1}"
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("b", """\
environment:
TERMINATE: "0"
depends: [c]
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("c", """\
depends: [root2]
buildScript: "true"
packageScript: "true"
""")
recipes = RecipeSet()
recipes.parse()
packages = recipes.generatePackages(lambda x,y: "unused")
self.assertRaises(ParseError, packages.getRootPackage)
def testIncompatibleNamedTwice(self):
"""Test that it is impossible to name the same dependency twice with
different variants."""
self.writeRecipe("root", """\
multiPackage:
"":
root: True
depends:
- name: root-lib
environment:
FOO: bar
- name: root-lib
use: [tools]
environment:
FOO: baz
buildScript: "true"
packageScript: "true"
lib:
packageVars: [FOO]
packageScript: "true"
provideTools:
t: "."
""")
recipes = RecipeSet()
recipes.parse()
packages = recipes.generatePackages(lambda x,y: "unused")
self.assertRaises(ParseError, packages.getRootPackage)
class TestNetAccess(RecipesTmp, TestCase):
def testOldPolicy(self):
"""Test that network access is enbled by default for old projects"""
self.writeRecipe("root", """\
root: True
""")
p = self.generate().walkPackagePath("root")
self.assertTrue(p.getBuildStep().hasNetAccess())
self.assertTrue(p.getPackageStep().hasNetAccess())
def testNewPolicy(self):
"""Test that network access is disabled by default"""
self.writeConfig({
"bobMinimumVersion" : "0.15",
})
self.writeRecipe("root", """\
root: True
""")
p = self.generate().walkPackagePath("root")
self.assertFalse(p.getBuildStep().hasNetAccess())
self.assertFalse(p.getPackageStep().hasNetAccess())
def testBuildNetAccess(self):
"""Test that a recipe can request network access for build step"""
self.writeConfig({
"bobMinimumVersion" : "0.15",
})
self.writeRecipe("root1", """\
root: True
buildNetAccess: True
buildScript: "true"
""")
self.writeRecipe("root2", """\
root: True
packageNetAccess: True
""")
packages = self.generate()
root1 = packages.walkPackagePath("root1")
self.assertTrue(root1.getBuildStep().hasNetAccess())
self.assertFalse(root1.getPackageStep().hasNetAccess())
root2 = packages.walkPackagePath("root2")
self.assertFalse(root2.getBuildStep().hasNetAccess())
self.assertTrue(root2.getPackageStep().hasNetAccess())
def testToolAccessBuild(self):
"""Test that a tool can force network access for build step."""
self.writeConfig({
"bobMinimumVersion" : "0.15",
})
self.writeRecipe("root", """\
root: True
depends:
- name: tool
use: [tools]
buildTools: [compiler]
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("tool", """\
provideTools:
compiler:
path: "."
netAccess: True
""")
p = self.generate().walkPackagePath("root")
self.assertTrue(p.getBuildStep().hasNetAccess())
self.assertTrue(p.getPackageStep().hasNetAccess())
def testToolAccessPackage(self):
"""Test that a tool can force network access for package step."""
self.writeConfig({
"bobMinimumVersion" : "0.15",
})
self.writeRecipe("root", """\
root: True
depends:
- name: tool
use: [tools]
buildScript: "true"
packageTools: [compiler]
packageScript: "true"
""")
self.writeRecipe("tool", """\
provideTools:
compiler:
path: "."
netAccess: True
""")
p = self.generate().walkPackagePath("root")
self.assertFalse(p.getBuildStep().hasNetAccess())
self.assertTrue(p.getPackageStep().hasNetAccess())
class TestToolEnvironment(RecipesTmp, TestCase):
def testEnvDefine(self):
"""Test that a tool can set environment."""
self.writeRecipe("root", """\
root: True
depends:
- name: tool
use: [tools]
environment:
FOO: unset
BAR: unset
packageTools: [compiler]
packageVars: [FOO, BAR]
packageScript: "true"
""")
self.writeRecipe("tool", """\
environment:
LOCAL: "foo"
provideTools:
compiler:
path: "."
environment:
FOO: "${LOCAL}"
BAR: "bar"
""")
p = self.generate().walkPackagePath("root")
self.assertEqual(p.getPackageStep().getEnv(),
{"FOO":"foo", "BAR":"bar"})
def testEnvCollides(self):
"""Test that colliding tool environment definitions are detected."""
self.writeRecipe("root", """\
root: True
depends:
- name: tool
use: [tools]
packageTools: [t1, t2]
packageScript: "true"
""")
self.writeRecipe("tool", """\
provideTools:
t1:
path: "."
environment:
FOO: "foo"
BAR: "bar"
t2:
path: "."
environment:
BAR: "bar"
BAZ: "baz"
""")
packages = self.generate()
self.assertRaises(ParseError, packages.getRootPackage)
class TestFingerprints(RecipesTmp, TestCase):
"""Test fingerprint impact.
Everything is done with sandbox. Without sandbox the handling moves to the
build-id that is implemented in the build backend. This should be covered
by the 'fingerprints' black box test.
"""
def setUp(self):
super().setUp()
self.writeRecipe("sandbox", """\
provideSandbox:
paths: ["/"]
""")
def testCheckoutNotFingerprinted(self):
"""Checkout steps are independent of fingerprints"""
self.writeRecipe("root", """\
root: True
depends:
- name: sandbox
use: [sandbox]
checkoutScript: "true"
buildScript: "true"
packageScript: "true"
multiPackage:
"1": { }
"2":
fingerprintScript: "echo bob"
fingerprintIf: True
""")
packages = self.generate(True)
r1 = packages.walkPackagePath("root-1")
r2 = packages.walkPackagePath("root-2")
self.assertEqual(r1.getCheckoutStep().getVariantId(),
r2.getCheckoutStep().getVariantId())
self.assertNotEqual(r1.getBuildStep().getVariantId(),
r2.getBuildStep().getVariantId())
self.assertNotEqual(r1.getPackageStep().getVariantId(),
r2.getPackageStep().getVariantId())
def testCheckoutToolFingerpintIndependent(self):
"""Checkout steps are not influenced by tool fingerprint scripts.
But the build and package steps must be still affetcted, though.
"""
common = textwrap.dedent("""\
root: True
depends:
- name: sandbox
use: [sandbox]
forward: True
- name: tool
use: [tools]
checkoutScript: "true"
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("root1", common + "checkoutTools: [plainTool]\n")
self.writeRecipe("root2", common + "checkoutTools: [fingerprintedTool]\n")
self.writeRecipe("tool", """\
provideTools:
plainTool:
path: "."
fingerprintedTool:
path: "."
fingerprintScript: "echo bob"
fingerprintIf: True
""")
packages = self.generate(True)
r1 = packages.walkPackagePath("root1")
r2 = packages.walkPackagePath("root2")
self.assertEqual(r1.getCheckoutStep().getVariantId(),
r2.getCheckoutStep().getVariantId())
self.assertNotEqual(r1.getBuildStep().getVariantId(),
r2.getBuildStep().getVariantId())
self.assertNotEqual(r1.getPackageStep().getVariantId(),
r2.getPackageStep().getVariantId())
def testResultTransitive(self):
"""Fingerprint is transitive when using a tainted result"""
self.writeRecipe("root", """\
root: True
depends:
- name: sandbox
use: [sandbox]
forward: True
buildScript: "true"
multiPackage:
clean:
depends:
- dep-clean
tainted:
depends:
- dep-tainted
""")
self.writeRecipe("dep", """\
packageScript: "true"
multiPackage:
clean: { }
tainted:
fingerprintScript: "echo bob"
fingerprintIf: True
""")
packages = self.generate(True)
r1 = packages.walkPackagePath("root-clean")
r2 = packages.walkPackagePath("root-tainted")
self.assertNotEqual(r1.getPackageStep().getVariantId(),
r2.getPackageStep().getVariantId())
def testToolNotTransitive(self):
"""Using a fingerprinted tool does not influence digest"""
self.writeRecipe("root", """\
root: True
depends:
- name: sandbox
use: [sandbox]
forward: True
buildTools: [ tool ]
buildScript: "true"
multiPackage:
clean:
depends:
- name: tools-clean
use: [tools]
tainted:
depends:
- name: tools-tainted
use: [tools]
""")
self.writeRecipe("tools", """\
packageScript: "true"
provideTools:
tool: "."
multiPackage:
clean: { }
tainted:
fingerprintScript: "echo bob"
fingerprintIf: True
""")
packages = self.generate(True)
r1 = packages.walkPackagePath("root-clean")
r2 = packages.walkPackagePath("root-tainted")
self.assertEqual(r1.getPackageStep().getVariantId(),
r2.getPackageStep().getVariantId())
self.assertFalse(packages.walkPackagePath("root-clean/tools-clean")
.getPackageStep()._isFingerprinted())
self.assertTrue(packages.walkPackagePath("root-tainted/tools-tainted")
.getPackageStep()._isFingerprinted())
def testSandboxNotTransitive(self):
"""Using a fingerprinted sandbox does not influence digest"""
self.writeRecipe("root", """\
root: True
multiPackage:
clean:
depends:
- name: sandbox-clean
use: [tools]
tainted:
depends:
- name: sandbox-tainted
use: [tools]
""")
self.writeRecipe("sandbox", """\
packageScript: "true"
provideSandbox:
paths: ["/"]
multiPackage:
clean: { }
tainted:
fingerprintScript: "echo bob"
fingerprintIf: True
""")
packages = self.generate(True)
r1 = packages.walkPackagePath("root-clean")
r2 = packages.walkPackagePath("root-tainted")
self.assertEqual(r1.getPackageStep().getVariantId(),
r2.getPackageStep().getVariantId())
self.assertFalse(packages.walkPackagePath("root-clean/sandbox-clean")
.getPackageStep()._isFingerprinted())
self.assertTrue(packages.walkPackagePath("root-tainted/sandbox-tainted")
.getPackageStep()._isFingerprinted())
def testByDefaultIncluded(self):
"""If no 'fingerprintIf' is given the 'fingerprintScript' must be evaluated.
Parsed without sandbox to make sure fingerprint scripts are considered.
"""
self.writeRecipe("root", """\
root: True
fingerprintScript: |
must-be-included
multiPackage:
clean: { }
tainted:
fingerprintScript: |
taint-script
fingerprintIf: True
""")
packages = self.generate()
ps = packages.walkPackagePath("root-clean").getPackageStep()
self.assertFalse(ps._isFingerprinted())
self.assertFalse("must-be-included" in ps._getFingerprintScript())
ps = packages.walkPackagePath("root-tainted").getPackageStep()
self.assertTrue(ps._isFingerprinted())
self.assertTrue("must-be-included" in ps._getFingerprintScript())
self.assertTrue("taint-script" in ps._getFingerprintScript())
def testToolCanEnable(self):
"""Tools must be able to amend and enable fingerprinting."""
self.writeRecipe("root", """\
root: True
depends:
- name: tools
use: [tools]
fingerprintIf: False
fingerprintScript: |
must-not-be-included
packageTools: [tool]
""")
self.writeRecipe("tools", """\
packageScript: "true"
provideTools:
tool:
path: "."
fingerprintScript: "tool-script"
fingerprintIf: True
""")
packages = self.generate()
ps = packages.walkPackagePath("root").getPackageStep()
self.assertTrue(ps._isFingerprinted())
self.assertFalse("must-not-be-included" in ps._getFingerprintScript())
self.assertTrue("tool-script" in ps._getFingerprintScript())
def testDisabledNotIncluded(self):
"""The 'fingerprintScript' must not be included if 'fingerprintIf' is False."""
self.writeClass("unspecified", """\
fingerprintScript: |
unspecified
""")
self.writeClass("static-disabled", """\
fingerprintIf: False
fingerprintScript: |
static-disabled
""")
self.writeClass("static-enabled", """\
fingerprintIf: True
fingerprintScript: |
static-enabled
""")
self.writeClass("dynamic", """\
fingerprintIf: "${ENABLE_FINGERPRINTING}"
fingerprintScript: |
dynamic
""")
self.writeRecipe("root", """\
root: True
inherit:
- unspecified
- static-disabled
- static-enabled
- dynamic
multiPackage:
dyn-enabled:
environment:
ENABLE_FINGERPRINTING: "true"
dyn-disabled:
environment:
ENABLE_FINGERPRINTING: "false"
""")
packages = self.generate()
ps = packages.walkPackagePath("root-dyn-enabled").getPackageStep()
self.assertTrue(ps._isFingerprinted())
self.assertTrue("unspecified" in ps._getFingerprintScript())
self.assertFalse("static-disabled" in ps._getFingerprintScript())
self.assertTrue("static-enabled" in ps._getFingerprintScript())
self.assertTrue("dynamic" in ps._getFingerprintScript())
ps = packages.walkPackagePath("root-dyn-disabled").getPackageStep()
self.assertTrue(ps._isFingerprinted())
self.assertTrue("unspecified" in ps._getFingerprintScript())
self.assertFalse("static-disabled" in ps._getFingerprintScript())
self.assertTrue("static-enabled" in ps._getFingerprintScript())
self.assertFalse("dynamic" in ps._getFingerprintScript())
class TestLayers(RecipesTmp, TestCase):
"""Test layer support.
Test the various properties of layers and their error handling.
"""
def setUp(self):
super().setUp()
self.writeConfig({
"bobMinimumVersion" : "0.15",
"layers" : [ "l1_n1", "l1_n2" ],
})
self.writeRecipe("root", """\
root: True
depends:
- foo
- bar
buildScript: "true"
packageScript: "true"
""")
self.writeConfig({
"bobMinimumVersion" : "0.15",
"layers" : [ "l2" ],
}, layer=["l1_n1"])
self.writeRecipe("foo", """\
depends:
- baz
buildScript: "true"
packageScript: "true"
""",
layer=["l1_n1"])
self.writeRecipe("baz", """\
buildScript: "true"
packageScript: "true"
""",
layer=["l1_n1", "l2"])
self.writeRecipe("bar", """\
buildScript: "true"
packageScript: "true"
""",
layer=["l1_n2"])
def testRegular(self):
"""Test that layers can be parsed"""
self.generate()
def testRecipeObstruction(self):
"""Test that layers must not provide identical recipes"""
self.writeRecipe("foo", """\
depends:
- baz
buildScript: "true"
packageScript: "true"
""",
layer=["l1_n2"])
self.assertRaises(ParseError, self.generate)
def testClassObstruction(self):
"""Test that layers must not provide identical classes"""
self.writeClass("c", "", layer=["l1_n1", "l2"])
self.writeClass("c", "", layer=["l1_n2"])
self.assertRaises(ParseError, self.generate)
def testMinimumVersion(self):
"""Test that (sub-)layers cannot request a higher minimum version"""
self.writeConfig({
"bobMinimumVersion" : "0.14",
"layers" : [ "l1_n1", "l1_n2" ],
})
self.assertRaises(ParseError, self.generate)
class TestIfExpression(RecipesTmp, TestCase):
""" Test if expressions """
def setUp(self):
super().setUp()
self.writeRecipe("root", """\
root: True
depends:
- if: !expr |
"${USE_DEPS}" == "1"
depends:
- bar-1
- name: bar-2
if: !expr |
"${BAR}" == "bar2"
buildScript: "true"
packageScript: "true"
""")
self.writeRecipe("bar", """\
multiPackage:
"1":
buildScript: "true"
"2":
buildScript: "true"
packageScript: "true"
""")
def testRegular(self):
"""Test that if expressions can be parsed"""
self.generate()
def testNested(self):
"""Test that nested if expressions are working"""
recipes = RecipeSet()
recipes.parse()
ps = recipes.generatePackages(lambda x,y: "unused",
envOverrides={"USE_DEPS" : "0", "BAR" : "bar2"})
self.assertRaises(BobError, ps.walkPackagePath, "root/bar-1")
self.assertRaises(BobError, ps.walkPackagePath, "root/bar-2")
ps = recipes.generatePackages(lambda x,y: "unused",
envOverrides={"USE_DEPS" : "1"})
ps.walkPackagePath("root/bar-1")
self.assertRaises(BobError, ps.walkPackagePath, "root/bar-2")
ps = recipes.generatePackages(lambda x,y: "unused",
envOverrides={"USE_DEPS" : "1", "BAR" : "bar2"})
ps.walkPackagePath("root/bar-1")
ps.walkPackagePath("root/bar-2")
class TestNoUndefinedToolsPolicy(RecipesTmp, TestCase):
""" Test behaviour of noUndefinedTools policy"""
def setUp(self):
super().setUp()
self.writeRecipe("root", """\
root: True
packageTools: ["undefined"]
packageScript: "true"
""")
def testOldBehaviour(self):
"""Test that undefined tools are permissable on old policy setting.
The tool is silently ignored and dropped.
"""
self.writeConfig({
"bobMinimumVersion" : "0.17",
"policies" : { "noUndefinedTools" : False },
})
packages = self.generate()
ps = packages.walkPackagePath("root").getPackageStep()
self.assertEqual(list(ps.getTools().keys()), [])
def testNewBehaviour(self):
"""Test that undefined tools generate a parsing error on new policy setting"""
self.writeConfig({
"bobMinimumVersion" : "0.17",
"policies" : { "noUndefinedTools" : True },
})
with self.assertRaises(ParseError):
packages = self.generate()
packages.walkPackagePath("root").getPackageStep()
class TestToolsWeak(RecipesTmp, TestCase):
"""Test behaviour or weak tools"""
def setUp(self):
super().setUp()
self.writeConfig({
"bobMinimumVersion" : "0.17",
"policies" : { "noUndefinedTools" : False },
})
self.writeRecipe("tool", """\
multiPackage:
"1":
provideTools:
tool: "."
packageScript: "foo"
"2":
provideTools:
tool: "."
packageScript: "bat"
""")
def testWeak(self):
"""Weak tools have no impact on package"""
self.writeRecipe("r1", """\
root: True
depends:
- name: tool-1
use: [tools]
packageToolsWeak: [tool]
""")
self.writeRecipe("r2", """\
root: True
depends:
- name: tool-2
use: [tools]
packageToolsWeak: [tool]
""")
packages = self.generate()
r1 = packages.walkPackagePath("r1").getPackageStep()
r2 = packages.walkPackagePath("r2").getPackageStep()
self.assertEqual(r1.getVariantId(), r2.getVariantId())
self.assertNotEqual(r1.getTools()["tool"].getStep().getVariantId(),
r2.getTools()["tool"].getStep().getVariantId())
def testWeakMissing(self):
"""Weak tools that are missing still make a difference"""
self.writeRecipe("r1", """\
root: True
depends:
- name: tool-1
use: [tools]
packageTools: [tool]
""")
self.writeRecipe("r2", """\
root: True
packageTools: [tool]
""")
packages = self.generate()
r1 = packages.walkPackagePath("r1").getPackageStep()
r2 = packages.walkPackagePath("r2").getPackageStep()
self.assertNotEqual(r1.getVariantId(), r2.getVariantId())
def testStrongOverride(self):
"""A weak and strong tool refence is treated as strong"""
self.writeRecipe("r1", """\
root: True
depends:
- name: tool-1
use: [tools]
packageTools: [tool]
packageToolsWeak: [tool]
""")
self.writeRecipe("r2", """\
root: True
depends:
- name: tool-2
use: [tools]
packageTools: [tool]
packageToolsWeak: [tool]
""")
packages = self.generate()
r1 = packages.walkPackagePath("r1").getPackageStep()
r2 = packages.walkPackagePath("r2").getPackageStep()
self.assertNotEqual(r1.getVariantId(), r2.getVariantId())
class TestScmIgnoreUserPolicy(RecipesTmp, TestCase):
""" Test behaviour of scmIgnoreUser policy"""
def setUp(self):
super().setUp()
self.writeRecipe("git", """\
root: True
buildScript: "true"
packageScript: "true"
multiPackage:
a:
checkoutSCM:
scm: git
url: [email protected]:path/to/repo.git
b:
checkoutSCM:
scm: git
url: [email protected]:path/to/repo.git
""")
self.writeRecipe("url", """\
root: True
buildScript: "true"
packageScript: "true"
multiPackage:
a:
checkoutSCM:
scm: url
url: https://[email protected]/file
b:
checkoutSCM:
scm: url
url: https://[email protected]/file
""")
def testOldBehaviour(self):
"""Test that user name of URL is part of the variantId"""
self.writeConfig({
"bobMinimumVersion" : "0.17",
"policies" : { "scmIgnoreUser" : False },
})
packages = self.generate()
git_a = packages.walkPackagePath("git-a").getPackageStep()
git_b = packages.walkPackagePath("git-b").getPackageStep()
self.assertNotEqual(git_a.getVariantId(), git_b.getVariantId())
url_a = packages.walkPackagePath("url-a").getPackageStep()
url_b = packages.walkPackagePath("url-b").getPackageStep()
self.assertNotEqual(url_a.getVariantId(), url_b.getVariantId())
def testNewBehaviour(self):
"""Test that user name in URL is not part of variantId on new policy setting"""
self.writeConfig({
"bobMinimumVersion" : "0.17",
"policies" : { "scmIgnoreUser" : True },
})
packages = self.generate()
git_a = packages.walkPackagePath("git-a").getPackageStep()
git_b = packages.walkPackagePath("git-b").getPackageStep()
self.assertEqual(git_a.getVariantId(), git_b.getVariantId())
url_a = packages.walkPackagePath("url-a").getPackageStep()
url_b = packages.walkPackagePath("url-b").getPackageStep()
self.assertEqual(url_a.getVariantId(), url_b.getVariantId())
class TestPruneImportScmPolicy(RecipesTmp, TestCase):
""" Test behaviour of pruneImportScm policy"""
def setUp(self):
super().setUp()
self.writeRecipe("root", """\
root: True
checkoutSCM:
scm: import
url: ./recipes
buildScript: "true"
packageScript: "true"
""")
def testOldBehaviour(self):
"""Test that prune was disabled in the past by default"""
self.writeConfig({
"bobMinimumVersion" : "0.17",
"policies" : { "pruneImportScm" : False },
})
pkg = self.generate().walkPackagePath("root")
self.assertFalse(pkg.getCheckoutStep().getScmList()[0].getProperties(False)["prune"])
def testNewBehaviour(self):
"""Test that prune is the new default"""
self.writeConfig({
"bobMinimumVersion" : "0.17",
"policies" : { "pruneImportScm" : True },
})
pkg = self.generate().walkPackagePath("root")
self.assertTrue(pkg.getCheckoutStep().getScmList()[0].getProperties(False)["prune"])
| gpl-3.0 |
usc-isi/essex-baremetal-support | nova/db/sqlalchemy/migrate_repo/versions/074_change_flavor_local_gb.py | 5 | 4484 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import select, Column, Integer, MetaData, Table
from nova import exception
from nova import flags
FLAGS = flags.FLAGS
def upgrade_libvirt(instances, instance_types):
# Update instance_types first
tiny = None
for inst_type in instance_types.select().execute():
if inst_type['name'] == 'm1.tiny':
tiny = inst_type['id']
root_gb = 0
else:
root_gb = 10
instance_types.update()\
.values(root_gb=root_gb,
ephemeral_gb=inst_type['local_gb'])\
.where(instance_types.c.id == inst_type['id'])\
.execute()
# then update instances following same pattern
instances.update()\
.values(root_gb=10,
ephemeral_gb=instances.c.local_gb)\
.execute()
if tiny is not None:
instances.update()\
.values(root_gb=0,
ephemeral_gb=instances.c.local_gb)\
.where(instances.c.instance_type_id == tiny)\
.execute()
def upgrade_other(instances, instance_types):
for table in (instances, instance_types):
table.update().values(root_gb=table.c.local_gb,
ephemeral_gb=0).execute()
def check_instance_presence(migrate_engine, instances_table):
result = migrate_engine.execute(instances_table.select().limit(1))
return result.fetchone() is not None
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
data_present = check_instance_presence(migrate_engine, instances)
if data_present and not FLAGS.connection_type:
msg = ("Found instance records in database. You must specify "
"connection_type to run migration migration")
raise exception.Error(msg)
instance_types = Table('instance_types', meta, autoload=True)
for table in (instances, instance_types):
root_gb = Column('root_gb', Integer)
root_gb.create(table)
ephemeral_gb = Column('ephemeral_gb', Integer)
ephemeral_gb.create(table)
# Since this migration is part of the work to get all drivers
# working the same way, we need to treat the new root_gb and
# ephemeral_gb columns differently depending on what the
# driver implementation used to behave like.
if FLAGS.connection_type == 'libvirt':
upgrade_libvirt(instances, instance_types)
else:
upgrade_other(instances, instance_types)
default_local_device = instances.c.default_local_device
default_local_device.alter(name='default_ephemeral_device')
for table in (instances, instance_types):
table.drop_column('local_gb')
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
instances = Table('instances', meta, autoload=True)
instance_types = Table('instance_types', meta, autoload=True)
for table in (instances, instance_types):
local_gb = Column('local_gb', Integer)
local_gb.create(table)
try:
for table in (instances, instance_types):
if FLAGS.connection_type == 'libvirt':
column = table.c.ephemeral_gb
else:
column = table.c.root_gb
table.update().values(local_gb=column).execute()
except Exception:
for table in (instances, instance_types):
table.drop_column('local_gb')
raise
default_ephemeral_device = instances.c.default_ephemeral_device
default_ephemeral_device.alter(name='default_local_device')
for table in (instances, instance_types):
table.drop_column('root_gb')
table.drop_column('ephemeral_gb')
| apache-2.0 |
bbbLinux/kernel | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
Antreasgr/ol3 | bin/check-whitespace.py | 29 | 1633 | import logging
import re
import sys
logging.basicConfig(format='%(asctime)s %(name)s: %(message)s',
level=logging.INFO)
logger = logging.getLogger('check-whitespace')
CR_RE = re.compile(r'\r')
LEADING_WHITESPACE_RE = re.compile(r'\s+')
TRAILING_WHITESPACE_RE = re.compile(r'\s+\n\Z')
NO_NEWLINE_RE = re.compile(r'[^\n]\Z')
ALL_WHITESPACE_RE = re.compile(r'\s+\Z')
def check_whitespace(*filenames):
errors = 0
for filename in sorted(filenames):
whitespace = False
for lineno, line in enumerate(open(filename, 'rU')):
if lineno == 0 and LEADING_WHITESPACE_RE.match(line):
logger.info('%s:%d: leading whitespace', filename, lineno + 1)
errors += 1
if CR_RE.search(line):
logger.info('%s:%d: carriage return character in line',
filename, lineno + 1)
errors += 1
if TRAILING_WHITESPACE_RE.search(line):
logger.info('%s:%d: trailing whitespace', filename, lineno + 1)
errors += 1
if NO_NEWLINE_RE.search(line):
logger.info('%s:%d: no newline at end of file', filename,
lineno + 1)
errors += 1
whitespace = ALL_WHITESPACE_RE.match(line)
if whitespace:
logger.info('%s: trailing whitespace at end of file', filename)
errors += 1
return errors
if __name__ == "__main__":
errors = check_whitespace(*sys.argv[1:])
if errors > 0:
logger.error('%d whitespace errors' % (errors,))
sys.exit(1)
| bsd-2-clause |
stuyCTF/stuyCTF-Platform | api/api/common.py | 11 | 5349 | """ The common module contains general-purpose functions potentially used by multiple modules in the system."""
import uuid
from pymongo import MongoClient
from pymongo.errors import ConnectionFailure, InvalidName
from werkzeug.contrib.cache import SimpleCache
from voluptuous import Invalid, MultipleInvalid
from hashlib import md5
allowed_protocols = []
allowed_ports = []
cache = SimpleCache()
admin_emails = None
__connection = None
__client = None
mongo_addr = "127.0.0.1"
mongo_port = 27017
mongo_db_name = ""
external_client = None
def get_conn():
"""
Get a database connection
Ensures that only one global database connection exists per thread.
If the connection does not exist a new one is created and returned.
"""
if external_client is not None:
return external_client
global __client, __connection
if not __connection:
try:
__client = MongoClient(mongo_addr, mongo_port)
__connection = __client[mongo_db_name]
except ConnectionFailure:
raise SevereInternalException("Could not connect to mongo database {} at {}:{}".format(mongo_db_name, mongo_addr, mongo_port))
except InvalidName as error:
raise SevereInternalException("Database {} is invalid! - {}".format(mongo_db_name, error))
return __connection
def esc(s):
"""
Escapes a string to prevent html injection
Returns a string with special HTML characters replaced.
Used to sanitize output to prevent XSS. We looked at
alternatives but there wasn't anything of an appropriate
scope that we could find. In the long-term this should be
replaced with a proper sanitization function written by
someone else."""
return s\
.replace('&', '&')\
.replace('<', '<')\
.replace('>', '>')\
.replace('"', '"')\
.replace("'", ''')
def token():
"""
Generate a token, should be random but does not have to be secure necessarily. Speed is a priority.
Returns:
The randomly generated token
"""
return str(uuid.uuid4().hex)
def hash(string):
"""
Hashes a string
Args:
string: string to be hashed.
Returns:
The hex digest of the string.
"""
return md5(string.encode("utf-8")).hexdigest()
class APIException(Exception):
"""
Exception thrown by the API.
"""
data = {}
def WebSuccess(message=None, data=None):
"""
Successful web request wrapper.
"""
return {
"status": 1,
"message": message,
"data": data
}
def WebError(message=None, data=None):
"""
Unsuccessful web request wrapper.
"""
return {
"status": 0,
"message": message,
"data": data
}
class WebException(APIException):
"""
Errors that are thrown that need to be displayed to the end user.
"""
pass
class InternalException(APIException):
"""
Exceptions thrown by the API constituting mild errors.
"""
pass
class SevereInternalException(InternalException):
"""
Exceptions thrown by the API constituting critical errors.
"""
pass
def flat_multi(multidict):
"""
Flattens any single element lists in a multidict.
Args:
multidict: multidict to be flattened.
Returns:
Partially flattened database.
"""
flat = {}
for key, values in multidict.items():
flat[key] = values[0] if type(values) == list and len(values) == 1 \
else values
return flat
def check(*callback_tuples):
"""
Voluptuous wrapper function to raise our APIException
Args:
callback_tuples: a callback_tuple should contain (status, msg, callbacks)
Returns:
Returns a function callback for the Schema
"""
def v(value):
"""
Trys to validate the value with the given callbacks.
Args:
value: the item to validate
Raises:
APIException with the given error code and msg.
Returns:
The value if the validation callbacks are satisfied.
"""
for msg, callbacks in callback_tuples:
for callback in callbacks:
try:
result = callback(value)
if not result and type(result) == bool:
raise Invalid()
except Exception:
raise WebException(msg)
return value
return v
def validate(schema, data):
"""
A wrapper around the call to voluptuous schema to raise the proper exception.
Args:
schema: The voluptuous Schema object
data: The validation data for the schema object
Raises:
APIException with status 0 and the voluptuous error message
"""
try:
schema(data)
except MultipleInvalid as error:
raise APIException(0, None, error.msg)
def safe_fail(f, *args, **kwargs):
"""
Safely calls a function that can raise an APIException.
Args:
f: function to call
*args: positional arguments
**kwargs: keyword arguments
Returns:
The function result or None if an exception was raised.
"""
try:
return f(*args, **kwargs)
except APIException:
return None
| mit |
liberation/sesql | sesql/management/commands/build_search_query_index.py | 1 | 4441 | # -*- coding: utf-8 -*-
# Copyright (c) Pilot Systems and Libération, 2011
# This file is part of SeSQL.
# SeSQL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# SeSQL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SeSQL. If not, see <http://www.gnu.org/licenses/>.
"""
This should be runned in a cron to process search histories and compute stats
"""
from optparse import make_option
from django.core.management.base import BaseCommand
from sesql import config
from sesql.lemmatize import lemmatize
from sesql.models import SearchHit
from sesql.models import SearchQuery
from sesql.models import SearchHitHistoric
from sesql.suggest import phonex
class Command(BaseCommand):
help = """Build SearchQuery index"""
option_list = BaseCommand.option_list + (
make_option('-e','--erode',
action='store_true',
dest='erode',
help = 'tell if we must erode result or not'),
make_option('-f','--filter',
dest ='filter',
type='int',
default=config.HISTORY_DEFAULT_FILTER,
help = 'how many time a search must occur to be treated'))
def handle(self, *apps, **options):
self.process_hits(options['filter'])
if options['erode']:
self.erode()
def erode(self):
for search_query in SearchQuery.objects.all():
search_query.pondered_search_nb = (config.HISTORY_ALPHA
* search_query.pondered_search_nb
+ (1-config.HISTORY_ALPHA)
* search_query.nb_recent_search)
search_query.nb_recent_search = 0
search_query.save()
def process_hits(self, filter_nb):
last_hits = SearchHit.objects.all()
processed_hits = []
for hit in last_hits:
query = hit.query
# blacklist
if query in config.HISTORY_BLACKLIST:
continue
if hit.nb_results < filter_nb:
SearchHitHistoric(query=hit.query,
nb_results=hit.nb_results,
date=hit.date).save()
hit.delete()
continue
# manual get_or_create
try:
search_query = SearchQuery.objects.get(query=query)
created = False
except SearchQuery.DoesNotExist:
search_query = SearchQuery(query=query)
created = True
# if it's a new one, initialize it
if created:
search_query.phonex = phonex(query)
# clean the query, the '_' char cause bugy clean_query
query = query.replace('_', '')
lems = lemmatize(query.split())
clean_query = [lem for lem in lems if lem]
clean_query = ' '.join(clean_query)
clean_phonex = phonex(clean_query)
search_query.clean_query = clean_query
search_query.clean_phonex = clean_phonex
search_query.nb_total_search = 0
search_query.pondered_search_nb = 0
search_query.nb_recent_search = 0
search_query.nb_results = hit.nb_results
search_query.nb_total_search += 1
search_query.pondered_search_nb += 1
search_query.nb_recent_search += 1
weight = (search_query.pondered_search_nb * config.HISTORY_BETA +
search_query.nb_results * config.HISTORY_GAMMA)
search_query.weight = weight
search_query.save()
# we can now create SearchHitHistoric
SearchHitHistoric(query=hit.query,
nb_results=hit.nb_results,
date=hit.date).save()
hit.delete()
| gpl-2.0 |
Antiun/yelizariev-addons | web_sessions_management/main.py | 16 | 3811 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# ThinkOpen Solutions Brasil
# Copyright (C) Thinkopen Solutions <http://www.tkobr.com>.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import openerp
from openerp.osv import fields, osv, orm
from datetime import date, datetime, time, timedelta
from openerp.addons.base.ir.ir_cron import _intervalTypes
from openerp import SUPERUSER_ID
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.http import request
from openerp.tools.translate import _
from openerp import http
_logger = logging.getLogger(__name__)
class Home_tkobr(openerp.addons.web.controllers.main.Home):
@http.route('/web/login', type='http', auth="none")
def web_login(self, redirect=None, **kw):
openerp.addons.web.controllers.main.ensure_db()
if request.httprequest.method == 'GET' and redirect and request.session.uid:
return http.redirect_with_hash(redirect)
if not request.uid:
request.uid = openerp.SUPERUSER_ID
values = request.params.copy()
if not redirect:
redirect = '/web?' + request.httprequest.query_string
values['redirect'] = redirect
try:
values['databases'] = http.db_list()
except openerp.exceptions.AccessDenied:
values['databases'] = None
if request.httprequest.method == 'POST':
old_uid = request.uid
uid = request.session.authenticate(request.session.db,
request.params['login'], request.params['password'])
if uid is not False:
self.save_session(request.cr, uid, request.context)
return http.redirect_with_hash(redirect)
request.uid = old_uid
values['error'] = 'Login failed due to one of the following reasons:'
values['reason1'] = '- Wrong login/password'
values['reason2'] = '- User not allowed to have multiple logins'
values['reason3'] = '- User not allowed to login at this specific time or day'
return request.render('web.login', values)
def save_session(self, cr, uid, context=None):
if not request.uid:
request.uid = openerp.SUPERUSER_ID
sid = request.httprequest.session.sid
uid = request.httprequest.session.uid
session_obj = request.registry.get('ir.sessions')
user_obj = request.registry.get('res.users')
u_exp_date, seconds = user_obj.get_expiring_date(cr, request.uid,
uid, context)
return session_obj.create(cr, SUPERUSER_ID, {'user_id': uid,
'session_id': sid,
'expiration_seconds': seconds,
'date_login': fields.datetime.now(),
'date_last_activity': fields.datetime.now(),
'logged_in': True},
context=context)
| lgpl-3.0 |
hieupham007/Titanium_Mobile | apidoc/generators/jsduck_generator.py | 1 | 19686 | #!/usr/bin/env python
#
# Copyright (c) 2011 Appcelerator, Inc. All Rights Reserved.
# Licensed under the Apache Public License (version 2)
import os, sys, re
this_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.abspath(os.path.join(this_dir, "..")))
from common import dict_has_non_empty_member
# We package the python markdown module already in /support/module/support/markdown.
module_support_dir = os.path.abspath(os.path.join(this_dir, "..", "..", "support", "module", "support"))
sys.path.append(module_support_dir)
import markdown
android_support_dir = os.path.abspath(os.path.join(this_dir, "..", "..", "support", "android"))
sys.path.append(android_support_dir)
from tilogger import *
log = TiLogger(None)
all_annotated_apis = None
apis = None
# Avoid obliterating our four spaces pattern with a careless %s:/ /^I/
FOUR_SPACES=' ' + ' '
# compiling REs ahead of time, since we use them heavily.
link_parts_re = re.compile(r"(?:\[([^\]]+?)\]\(([^\)\s]+?)\)|\<([^\>\s]+)\>)", re.MULTILINE)
find_links_re = re.compile(r"(\[[^\]]+?\]\([^\)\s]+?\)|\<[^\>\s]+\>)", re.MULTILINE)
html_scheme_re = re.compile(r"^http:|^https:")
doc_site_url_re = re.compile(r"http://docs.appcelerator.com/titanium/.*(#!.*)")
# we use this to distinguish inline HTML tags from Markdown links. Not foolproof, and a
# we should probably find a better technique in the long run.
html_element_re = re.compile("([a-z]|\/)")
try:
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import get_lexer_by_name
except:
print >> sys.stderr, "You don't have Pygments!\n"
print >> sys.stderr, "You can install it with:\n"
print >> sys.stderr, "> easy_install Pygments"
print ""
sys.exit(1)
# write unicode strings safely
def write_utf8(file, string):
file.write(string.encode('utf8', 'replace'))
def convert_string_to_jsduck_link(obj_specifier):
global all_annotated_apis
if obj_specifier in all_annotated_apis:
return obj_specifier
else:
# Maybe a method, property or event
parts = obj_specifier.split(".")
if len(parts) > 1:
parent = ".".join(parts[:-1])
member_name = parts[-1]
if parent in all_annotated_apis:
obj = all_annotated_apis[parent]
list_names = {
"methods": 'method-',
"properties": 'property-',
"events": 'event-'
}
for list_name in list_names.keys():
if hasattr(obj, list_name) and type(getattr(obj, list_name)) == list:
for m in getattr(obj, list_name):
if m.name == member_name:
return parent + '#' + list_names[list_name] + member_name
else:
return "#" + obj_specifier
return obj_specifier
def process_markdown_links(s):
new_string = s
results = find_links_re.findall(new_string)
if results is not None and len(results) > 0:
for link in results:
match = link_parts_re.match(link)
if match == None:
print "no match:" + link
continue
# Process links with a defined name [foo](url)
if match.group(1) != None and match.group(2)!= None:
url = match.group(2)
name = match.group(1)
# For simple markdown links, such as <Titanium.Analytics> or <www.google.com>
# skip links that look like HTML elements (<span>).
elif match.group(3) != None and not html_element_re.match(link, 1):
url = match.group(3)
name = None
# Otherwise, our "link" was probably an HTML tag, so we leave it alone
else:
continue
# Process URLs
docs_site_link = False
api_link = False
# For links back to the doc site -- guides pages, videos, etc.
# extract just the part following the hash, to avoid re-loading the site
# [Quick Start](http://docs.appcelerator.com/titanium/2.1/index.html#!/guide/Quick_Start) ->
# [Quick Start](#!/guide/Quick_Start Quick Start)
#
# Generic absolute URLs pass through unchanged
# [Facebook Graph API](http://developers.facebook.com/docs/reference/api/) -> unchanged
if url.startswith("http"):
url_match = doc_site_url_re.match(url)
if url_match:
url = url_match.group(1)
docs_site_link = True
if not name:
name = url
new_string = new_string.replace(link, "[%s](%s)" % (name, url))
else:
# Reformat API object links so jsduck can process them.
# [systemId](Titanium.XML.Entity.systemId -> {@link Titanium.XML.Entity#systemId systemId}
url = convert_string_to_jsduck_link(url)
if name:
new_string = new_string.replace(link, "{@link %s %s}" % (url, name))
else:
new_string = new_string.replace(link, "{@link %s}" % url)
return new_string
def markdown_to_html(s, obj=None):
if s is None or len(s) == 0:
return ""
if "<" in s or "[" in s:
s = process_markdown_links(s)
return markdown.markdown(s)
# remove <p> and </p> if a string is enclosed with them
def remove_p_tags(str):
if str is None or len(str) == 0:
return ""
if str.startswith("<p>"):
str = str[3:]
if str.endswith("</p>"):
str = str[:-4]
return str
# Print two digit version if third digit is 0.
def format_version(version_str):
digits = version_str.split(".")
if len(digits) <= 2:
return version_str
else:
if digits[2] == '0':
return ".".join(digits[0:2])
else:
return ".".join(digits)
def output_properties_for_obj(annotated_obj):
obj = annotated_obj.api_obj
res = []
# Only output platforms if platforms or since versions are different from
# containing object.
if obj.has_key("platforms") or obj.has_key("since"):
for platform in annotated_obj.platforms:
res.append("@platform %s %s" % (platform["name"], format_version(platform["since"])))
if obj.has_key("availability") and obj['availability'] == 'creation':
res.append("@creationOnly")
if obj.has_key("availability") and obj['availability'] == 'not-creation':
res.append("@nonCreation")
if obj.has_key("extends"):
res.append("@extends %s" % (obj["extends"]))
if(len(res) == 0):
return ""
return "\t * " + "\n\t * ".join(res) + "\n"
# @deprecated and @removed are multi-line tags, so this must be
# inserted after the summary and description, or the summary will get
# included as part of the deprecation.
def output_deprecation_for_obj(annotated_obj):
obj = annotated_obj.api_obj
if obj.has_key("deprecated"):
if obj["deprecated"].has_key("removed"):
str = "@removed %s" % (obj["deprecated"]["removed"])
else:
str = "@deprecated %s" % (obj["deprecated"]["since"])
if obj["deprecated"].has_key("notes"):
str += " %s" % markdown_to_html(obj["deprecated"]["notes"])
str = str.replace("\n", "\n\t * ")
return "\t * %s\n" % str
else:
return ""
def output_example(desc, code, convert_empty_code):
if len(desc) == 0 and len(code) == 0:
return None
# sometimes if there is only one example
if len(code) == 0 and convert_empty_code == True:
# no code? probably desc contains the code
code = desc
desc = []
# determine if we need t remove leading spaces from all code lines
need_strip = True
for line in code:
if len(line) > 0 and line[0:4] != FOUR_SPACES:
need_strip = False
break
if need_strip:
stripped_code = []
for line in code:
stripped_code.append(line[4:])
code = stripped_code
# hack - insert ­ to avoid having closing comment sign within JSDUck markup
code = "\n".join(code).replace("&", "&").replace("<", "<").replace(">", ">").replace("*/", "*­/")
desc = "\n".join(desc)
if len(desc) > 0 and len(code) > 0:
return "<p>%s</p><pre>%s</pre>" % (markdown_to_html(desc), code)
elif len(desc) == 0 and len(code) > 0:
return "<pre>%s</pre>" % (code)
elif len(desc) > 0 and len(code) == 0:
return "<p>%s</p>" % markdown_to_html(desc)
def output_examples_for_obj(obj):
res = []
if obj.has_key("examples"):
if len(obj['examples']) == 1:
res.append("<h3>Example</h3>")
else:
res.append("<h3>Examples</h3>")
for example in obj['examples']:
res.append("<h4>%s</h4>" % (example['title']))
body = example['example']
code = []
desc = []
desc_finished = False
prev_line_empty = False
first_code_block = True
for line in body.splitlines():
# parse description part until code starts
# skip empty string between desc and code
if not desc_finished:
if prev_line_empty == True and (line.find(FOUR_SPACES) == 0 or line.find('\t') == 0):
desc_finished = True
else:
# parsing code until code finishes or another description starts
if line.find(FOUR_SPACES) != 0 and line.find('\t') != 0 and len(line) != 0:
# code block finished - another description started - flush content
desc_finished = False
res.append(output_example(desc, code, first_code_block))
first_code_block = False
code = []
desc = []
if not desc_finished:
desc.append(line)
else:
code.append(line)
prev_line_empty = len(line.strip()) == 0
res.append(output_example(desc, code, first_code_block))
res = filter(None, res)
if(len(res) == 0):
return ""
return "\t * " + "\n\t * ".join(res) + "\n"
def transform_type(type):
if isinstance(type, list):
# type consist of more then one type
return "/".join(map((lambda typ: transform_type(typ)), type))
if type.startswith("Array<"):
type = re.sub(r'Array<(.*?)>', r'\1', type)
type = transform_type(type) + "[]"
elif type == "Dictionary":
type = "Dictionary"
elif type.startswith("Dictionary<"):
type = re.sub(r'Dictionary<(.*?)>', r'\1', type)
type = "Dictionary<%s>" % (type)
elif type == 'Callback':
type = "Function"
elif type.startswith("Callback<"):
type = re.sub(r'Callback<(.*?)>', r'\1', type)
type = "Callback<%s>" % (type)
return type
def has_ancestor(one_type, ancestor_name):
if one_type["name"] == ancestor_name:
return True
if "extends" in one_type and one_type["extends"] == ancestor_name:
return True
elif "extends" not in one_type:
if ancestor_name == 'Global':
# special case for "Global" types - they do not have @extends statement
return one_type["name"].find('Global') == 0
return False
else:
parent_type_name = one_type["extends"]
if (parent_type_name is None or not isinstance(parent_type_name, basestring) or
parent_type_name.lower() == "object"):
return False
if not parent_type_name in apis:
log.warn("%s extends %s but %s type information not found" % (one_type["name"],
parent_type_name, parent_type_name))
return False
return has_ancestor(apis[parent_type_name], ancestor_name)
def get_summary_and_description(api_obj):
summary = None
desc = None
if api_obj.has_key("summary"):
summary = markdown_to_html(api_obj["summary"])
if api_obj.has_key("description"):
desc = markdown_to_html(api_obj["description"])
res = u""
if summary != None:
res = u"\t * " + summary + "\n"
if desc != None:
res += u"\t * @description " + desc + "\n"
elif desc != None:
# use description if there is no summary
res = u"\t * " + desc
return res
# Side effect of hiding properties is that the accessors do not get hidden
# Explicitly hide accessors for JSDuck
def hide_accessors(parent_name, property_name):
res = ""
parent_obj = all_annotated_apis[parent_name].api_obj
if "properties" in parent_obj:
parent_properties = parent_obj["properties"]
property_dict = dict((p["name"], p) for p in parent_properties)
if property_name in property_dict:
setter = True;
getter = True;
if "accessors" in property_dict[property_name] and not property_dict[property_name]["accessors"]:
return res
if "availability" in property_dict[property_name] and property_dict[property_name]["availability"] == "creation":
setter = False;
if "permission" in property_dict[property_name]:
if property_dict[property_name]["permission"] == "read-only":
setter = False;
elif property_dict[property_name]["permission"] == "write-only":
getter = False;
upperFirst = property_name[0].upper() + property_name[1:]
if getter:
getter = "get" + upperFirst
res += "/**\n\t * @method " + getter + " \n\t * @hide\n*/\n"
if setter:
setter = "set" + upperFirst
res += "/**\n\t * @method " + setter + " \n\t * @hide\n*/\n"
if "extends" in parent_obj:
parent_name = parent_obj["extends"]
return res + hide_accessors(parent_name, property_name)
else:
return res
def generate(raw_apis, annotated_apis, options):
global all_annotated_apis, apis
all_annotated_apis = annotated_apis
apis = raw_apis
if options is not None and (not hasattr(options, "output") or options.output is None or len(options.output) == 0):
log.error ("'output' option not provided")
if options is not None and not os.path.exists(options.output):
os.makedirs(options.output)
# Write the output files
if options is not None:
log.info("Creating titanium.js in %s" % options.output)
output = open(os.path.join(options.output, "titanium.js"), "w")
for name in annotated_apis:
annotated_obj = annotated_apis[name]
write_utf8(output, "/**\n\t * @class %s\n" % (annotated_obj.name))
if annotated_obj.typestr == "module" and annotated_obj.parent is None:
write_utf8(output, '\t * @typestr Module\n')
else:
typestr = ''
if annotated_obj.typestr == "module":
typestr = "Submodule"
elif annotated_obj.typestr == "proxy":
typestr = "Object"
elif annotated_obj.typestr == "method":
typestr = "Function"
elif annotated_obj.typestr == "property":
typestr = "Property"
elif annotated_obj.typestr == "event":
typestr = "Event"
elif annotated_obj.typestr == "parameter":
typestr = "Parameter"
if len(typestr) > 0 and annotated_obj.parent is not None:
write_utf8(output, '\t * @typestr %s of %s\n' % (typestr, annotated_obj.parent.name))
else:
write_utf8(output, '\t * @typestr %s\n' % (typestr))
if not (has_ancestor(raw_apis[name], "Titanium.Proxy") or has_ancestor(raw_apis[name], "Global")):
write_utf8(output, "\t * @pseudo\n")
write_utf8(output, output_properties_for_obj(annotated_obj))
write_utf8(output, get_summary_and_description(annotated_obj.api_obj))
write_utf8(output, output_examples_for_obj(annotated_obj.api_obj))
write_utf8(output, output_deprecation_for_obj(annotated_obj))
write_utf8(output, "\t */\n\n")
p = annotated_obj.properties
for k in p:
# Do not insert records for inherited members
if k.inherited_from:
continue
obj = k.api_obj
getter_ok = True
setter_ok = True
if k.permission == "read-only" or k.availability == "creation":
setter_ok = False
if k.permission == "write-only":
getter_ok = False
if "accessors" in obj and not obj["accessors"]:
getter_ok = setter_ok = False
if k.default is not None:
default_val = remove_p_tags(markdown_to_html(str(k.default)))
write_utf8(output, '/**\n\t * @property [%s=%s]\n' % (k.name, default_val))
else:
write_utf8(output, "/**\n\t * @property %s\n" % (k.name))
if obj.has_key('type'):
write_utf8(output, "\t * @type %s\n" % (transform_type(obj["type"])))
if obj.has_key('permission') and obj["permission"] == "read-only":
write_utf8(output, "\t * @readonly\n")
write_utf8(output, output_properties_for_obj(k))
write_utf8(output, get_summary_and_description(obj))
write_utf8(output, output_examples_for_obj(obj))
write_utf8(output, output_deprecation_for_obj(k))
write_utf8(output, " */\n\n")
p = annotated_obj.methods
for k in p:
# Do not insert records for inherited members
if k.inherited_from:
continue
obj = k.api_obj
write_utf8(output, "/**\n\t * @method %s\n" % (k.name))
write_utf8(output, get_summary_and_description(obj))
write_utf8(output, output_examples_for_obj(obj))
write_utf8(output, output_deprecation_for_obj(k))
if obj.has_key("parameters"):
for param in obj["parameters"]:
if "summary" in param:
summary = param["summary"]
if "repeatable" in param and param["repeatable"]:
repeatable = "..."
else:
repeatable = ""
type = "{" + transform_type(param["type"]) + repeatable + "}" if param.has_key("type") else ""
optional = "(optional)" if param.has_key('optional') and param["optional"] == True else ""
if param.has_key('default'):
default_val = remove_p_tags(markdown_to_html(str(param['default'])))
write_utf8(output, "\t * @param %s [%s=%s] %s\n\t * %s\n" % (type, param['name'], default_val, optional, markdown_to_html(summary)))
else:
write_utf8(output, "\t * @param %s %s %s\n\t * %s\n" % (type, param['name'], optional, markdown_to_html(summary)))
if obj.has_key("returns"):
returntypes = obj["returns"]
summary = ""
# check for the object form first
if "type" in returntypes:
type = "{" + transform_type(returntypes["type"]) + "}"
summary = returntypes["summary"] if "summary" in returntypes else ""
else:
# could be an array, check if it's iterable
if hasattr(returntypes, "__getitem__") or hasattr(returntypes, "__iter__"):
type = ""
for one_returntype in returntypes:
if type == "":
type = "{" + transform_type(one_returntype["type"])
else:
type = type + "/" + transform_type(one_returntype["type"])
# Can't handle multiple summaries, only take one.
if summary == "" and summary in one_returntype:
summary = one_returntype["summary"]
type = type + "}"
else:
log.warn("returns for %s should be an array or a dict." % obj["name"]);
write_utf8(output, "\t * @return %s %s\n" % (type, markdown_to_html(summary)))
else:
write_utf8(output, "\t * @return void\n")
write_utf8(output, output_properties_for_obj(k))
write_utf8(output, "\t*/\n\n")
p = annotated_obj.events
for k in p:
# Do not insert records for inherited members
if k.inherited_from:
continue
obj = k.api_obj
write_utf8(output, "/**\n\t * @event %s\n" % (k.name))
write_utf8(output, get_summary_and_description(obj))
write_utf8(output, output_examples_for_obj(obj))
if k.properties is not None:
for param in k.properties:
if "deprecated" in param.api_obj:
deprecated = "(deprecated)"
else:
deprecated = ""
platforms = "("+" ".join(param.api_obj['platforms'])+")" if param.api_obj.has_key('platforms') and param.api_obj["platforms"] else ""
if param.api_obj.has_key('type'):
write_utf8(output, "\t * @param {%s} %s %s %s\n" % (transform_type(param.api_obj['type']), deprecated, platforms, param.name))
else:
write_utf8(output, "\t * @param %s %s %s\n" % (deprecated, platforms, param.name))
write_utf8(output, get_summary_and_description(param.api_obj))
write_utf8(output, output_properties_for_obj(k))
write_utf8(output, "\t*/\n\n")
# handle excluded members
api_obj = annotated_obj.api_obj
if "excludes" in api_obj:
for member_type in [ "properties", "methods", "events" ]:
if member_type in api_obj["excludes"]:
annotation_string = { "properties":"@property", "methods":"@method",
"events":"@event" }[member_type]
excluded_members = api_obj["excludes"][member_type]
for one_member in excluded_members:
write_utf8(output, "/**\n\t * %s %s \n\t * @hide\n*/\n" % (annotation_string, one_member))
# Explicitly hide accessors
if member_type == "properties" and "extends" in api_obj:
parent_name = api_obj["extends"]
hide_methods = hide_accessors(parent_name, one_member)
if hide_methods:
write_utf8(output, "%s" % (hide_methods))
output.close()
| apache-2.0 |
peoplepower/botengine | com.ppc.Bot/domain.py | 1 | 2296 | '''
Created on May 25, 2017
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', which is part of this source code package.
@author: David Moss
'''
# Organization short name, which allows us to send emails to this organization's administrators
ORGANIZATION_SHORT_NAME = "family"
# NOTE: Name of the service
SERVICE_NAME = "People Power Family"
# Name of the pack of products people can purchase
PACK_NAME = "Family Pack"
# URL to purchase the pack of products
PACK_PURCHASE_URL = ""
# Professional Monitoring Subscription Name
PROFESSIONAL_MONITORING_SUBSCRIPTION_NAME = "Avantguard"
# Professional Monitoring Callback Number
PROFESSIONAL_MONITORING_CALLBACK_NUMBER = "1-844-950-0582"
# Notification case-sensitive brand, which can be different from the organization short name. Use this to force a specific branded template.
# See https://presence.atlassian.net/wiki/spaces/CLOUD/pages/23385710/Branding+Configuration
ORGANIZATION_BRAND = ""
# Default language for this brand
DEFAULT_LANGUAGE = 'en'
# Default timezone for this brand
DEFAULT_TIMEZONE = 'US/Pacific'
# MixPanel token
MIXPANEL_TOKEN = None
# Amplitude tokens
AMPLITUDE_TOKENS = {
"app.presencepro.com": "",
"sboxall.presencepro.com": ""
}
# iOS download URL
APP_IOS_URL = ""
# Android download URL
APP_ANDROID_URL = ""
# Customer support scheduling URL
CUSTOMER_SUPPORT_URL = ""
# True to declare that the people who run this service are part of a "drug trial" and in the control group
DRUG_TESTING_CONTROL_GROUP = False
# True to allow security events to escalate to professional monitoring. False to keep it trusted circle monitored.
ALLOW_PROFESSIONAL_MONITORING_SECURITY = False
# True to allow the emergency call center to be contacted twice if the action plan calls for it, usually to dispatch.
ALLOW_SECONDARY_PROFESSIONAL_MONITORING = True
# Available services to tailor messaging to the users
CARE_SERVICES = True
ENERGY_SERVICES = True
SECURITY_SERVICES = True
# True if this brand can support a siren.
HAS_SIREN = True
# Automatic tagging for people who run this service.
ADD_TAGS = []
REMOVE_TAGS = []
# User facing modes. { "MODE": "User Facing Name" }
USER_FACING_MODES = {
"HOME": "OFF",
"AWAY": "AWAY",
"STAY": "STAY",
"TEST": "TEST"
}
| apache-2.0 |
thebongy/MakeMyOutputs | docx/styles/styles.py | 12 | 5625 | # encoding: utf-8
"""
Styles object, container for all objects in the styles part.
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from warnings import warn
from . import BabelFish
from .latent import LatentStyles
from ..shared import ElementProxy
from .style import BaseStyle, StyleFactory
class Styles(ElementProxy):
"""
A collection providing access to the styles defined in a document.
Accessed using the :attr:`.Document.styles` property. Supports ``len()``,
iteration, and dictionary-style access by style name.
"""
__slots__ = ()
def __contains__(self, name):
"""
Enables `in` operator on style name.
"""
internal_name = BabelFish.ui2internal(name)
for style in self._element.style_lst:
if style.name_val == internal_name:
return True
return False
def __getitem__(self, key):
"""
Enables dictionary-style access by UI name. Lookup by style id is
deprecated, triggers a warning, and will be removed in a near-future
release.
"""
style_elm = self._element.get_by_name(BabelFish.ui2internal(key))
if style_elm is not None:
return StyleFactory(style_elm)
style_elm = self._element.get_by_id(key)
if style_elm is not None:
msg = (
'style lookup by style_id is deprecated. Use style name as '
'key instead.'
)
warn(msg, UserWarning)
return StyleFactory(style_elm)
raise KeyError("no style with name '%s'" % key)
def __iter__(self):
return (StyleFactory(style) for style in self._element.style_lst)
def __len__(self):
return len(self._element.style_lst)
def add_style(self, name, style_type, builtin=False):
"""
Return a newly added style object of *style_type* and identified
by *name*. A builtin style can be defined by passing True for the
optional *builtin* argument.
"""
style_name = BabelFish.ui2internal(name)
if style_name in self:
raise ValueError("document already contains style '%s'" % name)
style = self._element.add_style_of_type(
style_name, style_type, builtin
)
return StyleFactory(style)
def default(self, style_type):
"""
Return the default style for *style_type* or |None| if no default is
defined for that type (not common).
"""
style = self._element.default_for(style_type)
if style is None:
return None
return StyleFactory(style)
def get_by_id(self, style_id, style_type):
"""
Return the style of *style_type* matching *style_id*. Returns the
default for *style_type* if *style_id* is not found or is |None|, or
if the style having *style_id* is not of *style_type*.
"""
if style_id is None:
return self.default(style_type)
return self._get_by_id(style_id, style_type)
def get_style_id(self, style_or_name, style_type):
"""
Return the id of the style corresponding to *style_or_name*, or
|None| if *style_or_name* is |None|. If *style_or_name* is not
a style object, the style is looked up using *style_or_name* as
a style name, raising |ValueError| if no style with that name is
defined. Raises |ValueError| if the target style is not of
*style_type*.
"""
if style_or_name is None:
return None
elif isinstance(style_or_name, BaseStyle):
return self._get_style_id_from_style(style_or_name, style_type)
else:
return self._get_style_id_from_name(style_or_name, style_type)
@property
def latent_styles(self):
"""
A |LatentStyles| object providing access to the default behaviors for
latent styles and the collection of |_LatentStyle| objects that
define overrides of those defaults for a particular named latent
style.
"""
return LatentStyles(self._element.get_or_add_latentStyles())
def _get_by_id(self, style_id, style_type):
"""
Return the style of *style_type* matching *style_id*. Returns the
default for *style_type* if *style_id* is not found or if the style
having *style_id* is not of *style_type*.
"""
style = self._element.get_by_id(style_id)
if style is None or style.type != style_type:
return self.default(style_type)
return StyleFactory(style)
def _get_style_id_from_name(self, style_name, style_type):
"""
Return the id of the style of *style_type* corresponding to
*style_name*. Returns |None| if that style is the default style for
*style_type*. Raises |ValueError| if the named style is not found in
the document or does not match *style_type*.
"""
return self._get_style_id_from_style(self[style_name], style_type)
def _get_style_id_from_style(self, style, style_type):
"""
Return the id of *style*, or |None| if it is the default style of
*style_type*. Raises |ValueError| if style is not of *style_type*.
"""
if style.type != style_type:
raise ValueError(
"assigned style is type %s, need type %s" %
(style.type, style_type)
)
if style == self.default(style_type):
return None
return style.style_id
| mit |
lubomir/django-rest-framework | setup.py | 47 | 2970 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
import shutil
import sys
from setuptools import setup
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.search("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
version = get_version('rest_framework')
if sys.argv[-1] == 'publish':
if os.system("pip freeze | grep wheel"):
print("wheel not installed.\nUse `pip install wheel`.\nExiting.")
sys.exit()
if os.system("pip freeze | grep twine"):
print("twine not installed.\nUse `pip install twine`.\nExiting.")
sys.exit()
os.system("python setup.py sdist bdist_wheel")
os.system("twine upload dist/*")
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
shutil.rmtree('dist')
shutil.rmtree('build')
shutil.rmtree('djangorestframework.egg-info')
sys.exit()
setup(
name='djangorestframework',
version=version,
url='http://www.django-rest-framework.org',
license='BSD',
description='Web APIs for Django, made easy.',
author='Tom Christie',
author_email='[email protected]', # SEE NOTE BELOW (*)
packages=get_packages('rest_framework'),
package_data=get_package_data('rest_framework'),
install_requires=[],
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
]
)
# (*) Please direct queries to the discussion group, rather than to me directly
# Doing so helps ensure your question is helpful to other users.
# Queries directly to my email are likely to receive a canned response.
#
# Many thanks for your understanding.
| bsd-2-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/sklearn/examples/datasets/plot_iris_dataset.py | 1 | 2738 |
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Set1,
edgecolor='k')
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=y,
cmap=plt.cm.Set1, edgecolor='k', s=40)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
#plt.show()
pltshow(plt)
| mit |
le9i0nx/ansible | test/units/modules/network/nxos/test_nxos_config.py | 5 | 5079 | #!/usr/bin/env python
#
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_config
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosConfigModule(TestNxosModule):
module = nxos_config
def setUp(self):
super(TestNxosConfigModule, self).setUp()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_config.get_config')
self.get_config = self.mock_get_config.start()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_config.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestNxosConfigModule, self).tearDown()
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_config', 'config.cfg')
self.load_config.return_value = None
def test_nxos_config_no_change(self):
args = dict(lines=['hostname localhost'])
set_module_args(args)
result = self.execute_module()
def test_nxos_config_src(self):
args = dict(src=load_fixture('nxos_config', 'candidate.cfg'))
set_module_args(args)
result = self.execute_module(changed=True)
config = ['hostname switch01', 'interface Ethernet1',
'description test interface', 'no shutdown', 'ip routing']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
def test_nxos_config_lines(self):
args = dict(lines=['hostname switch01', 'ip domain-name eng.ansible.com'])
set_module_args(args)
result = self.execute_module(changed=True)
config = ['hostname switch01']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
def test_nxos_config_before(self):
args = dict(lines=['hostname switch01', 'ip domain-name eng.ansible.com'],
before=['before command'])
set_module_args(args)
result = self.execute_module(changed=True)
config = ['before command', 'hostname switch01']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
self.assertEqual('before command', result['commands'][0])
def test_nxos_config_after(self):
args = dict(lines=['hostname switch01', 'ip domain-name eng.ansible.com'],
after=['after command'])
set_module_args(args)
result = self.execute_module(changed=True)
config = ['after command', 'hostname switch01']
self.assertEqual(sorted(config), sorted(result['commands']), result['commands'])
self.assertEqual('after command', result['commands'][-1])
def test_nxos_config_parents(self):
args = dict(lines=['ip address 1.2.3.4/5', 'no shutdown'], parents=['interface Ethernet10'])
set_module_args(args)
result = self.execute_module(changed=True)
config = ['interface Ethernet10', 'ip address 1.2.3.4/5', 'no shutdown']
self.assertEqual(config, result['commands'], result['commands'])
def test_nxos_config_src_and_lines_fails(self):
args = dict(src='foo', lines='foo')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_match_exact_requires_lines(self):
args = dict(match='exact')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_match_strict_requires_lines(self):
args = dict(match='strict')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_replace_block_requires_lines(self):
args = dict(replace='block')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_replace_config_requires_src(self):
args = dict(replace='config')
set_module_args(args)
result = self.execute_module(failed=True)
def test_nxos_config_backup_returns__backup__(self):
args = dict(backup=True)
set_module_args(args)
result = self.execute_module()
self.assertIn('__backup__', result)
| gpl-3.0 |
KarolBedkowski/photomagic | photomagick/filters/exposure.py | 1 | 1270 | #!usr/bin/python
# -*- coding: utf-8 -*-
__plugins__ = ('LowContrast', 'HiContrast', 'OverExposed', 'UnderExposed')
__version__ = '2011-03-20'
__author__ = 'Karol Będkowski'
__copyright__ = "Copyright (c) Karol Będkowski, 2011"
import ImageEnhance
from photomagick.common.base_filter import BaseFilter
from photomagick.common.const import CATEGORY_BASE
class LowContrast(BaseFilter):
NAME = _('Low Contrast')
STEPS = 2
CATEGORY = CATEGORY_BASE
def process(self, image):
yield 'Contrast...', image
image = ImageEnhance.Contrast(image).enhance(0.8)
yield 'Done', image
class HiContrast(BaseFilter):
NAME = _('Hi Contrast')
STEPS = 2
CATEGORY = CATEGORY_BASE
def process(self, image):
yield 'Contrast...', image
image = ImageEnhance.Contrast(image).enhance(1.4)
yield 'Done', image
class OverExposed(BaseFilter):
NAME = _('Over Exposed')
STEPS = 2
CATEGORY = CATEGORY_BASE
def process(self, image):
yield 'Contrast...', image
image = ImageEnhance.Brightness(image).enhance(1.4)
yield 'Done', image
class UnderExposed(BaseFilter):
NAME = _('Under Exposed')
STEPS = 2
CATEGORY = CATEGORY_BASE
def process(self, image):
yield 'Contrast...', image
image = ImageEnhance.Brightness(image).enhance(0.8)
yield 'Done', image
| gpl-2.0 |
hasgeek/coaster | coaster/views/classview.py | 1 | 33503 | """
Class-based views
-----------------
Group related views into a class for easier management.
"""
from functools import update_wrapper, wraps
from typing import Any, Dict, List, Optional, Tuple
from urllib.parse import urlsplit, urlunsplit
from sqlalchemy.orm.attributes import InstrumentedAttribute
from sqlalchemy.orm.descriptor_props import SynonymProperty
from sqlalchemy.orm.mapper import Mapper
from sqlalchemy.orm.properties import RelationshipProperty
from sqlalchemy.orm.query import Query
# mypy can't find _request_ctx_stack in flask
from flask import ( # type: ignore[attr-defined]
Blueprint,
_request_ctx_stack,
abort,
has_request_context,
make_response,
redirect,
request,
)
from werkzeug.local import LocalProxy
from werkzeug.routing import parse_rule
from ..auth import add_auth_attribute, current_auth
from ..typing import SimpleDecorator
from ..utils import InspectableSet
__all__ = [
'rulejoin',
'current_view', # Functions
'ClassView',
'ModelView', # View base classes
'route',
'viewdata',
'url_change_check',
'requires_roles', # View decorators
'UrlChangeCheck',
'UrlForView',
'InstanceLoader', # Mixin classes
]
#: Type for URL rules in classviews
RouteRuleOptions = Dict[str, Any]
#: A proxy object that holds the currently executing :class:`ClassView` instance,
#: for use in templates as context. Exposed to templates by
#: :func:`coaster.app.init_app`. Note that the current view handler method within the
#: class is named :attr:`~current_view.current_handler`, so to examine it, use
#: :attr:`current_view.current_handler`.
current_view = LocalProxy(
lambda: getattr(_request_ctx_stack.top, 'current_view', None)
if has_request_context()
else None
)
# :func:`route` wraps :class:`ViewHandler` so that it can have an independent __doc__
def route(rule, **options):
"""
Decorator for defining routes on a :class:`ClassView` and its methods.
Accepts the same parameters that Flask's ``app.``:meth:`~flask.Flask.route`
accepts. See :class:`ClassView` for usage notes.
"""
return ViewHandler(rule, rule_options=options)
def viewdata(**kwargs):
"""
Decorator for adding additional data to a view method, to be used
alongside :func:`route`. This data is accessible as the ``data``
attribute on the view handler.
"""
return ViewHandler(None, viewdata=kwargs)
def rulejoin(class_rule, method_rule):
"""
Join class and method rules. Used internally by :class:`ClassView` to
combine rules from the :func:`route` decorators on the class and on the
individual view handler methods::
>>> rulejoin('/', '')
'/'
>>> rulejoin('/', 'first')
'/first'
>>> rulejoin('/first', '/second')
'/second'
>>> rulejoin('/first', 'second')
'/first/second'
>>> rulejoin('/first/', 'second')
'/first/second'
>>> rulejoin('/first/<second>', '')
'/first/<second>'
>>> rulejoin('/first/<second>', 'third')
'/first/<second>/third'
"""
if method_rule.startswith('/'):
return method_rule
return (
class_rule
+ ('' if class_rule.endswith('/') or not method_rule else '/')
+ method_rule
)
class ViewHandler:
"""
Internal object created by the :func:`route` and :func:`viewdata` functions.
"""
def __init__(
self,
rule,
rule_options=None,
viewdata=None, # skipcq: PYL-W0621
requires_roles=None, # skipcq: PYL-W0621
):
if rule is not None:
self.routes = [(rule, rule_options or {})]
else:
self.routes = []
self.data = viewdata or {}
self.requires_roles = requires_roles or {}
self.endpoints = set()
# Stubs for the decorator to fill
self.name = None
self.endpoint = None
self.func = None
def reroute(self, f):
# Use type(self) instead of ViewHandler so this works for (future) subclasses
# of ViewHandler
r = type(self)(None)
r.routes = self.routes
r.data = self.data
return r.__call__(f)
def copy_for_subclass(self):
# Like reroute, but just a copy
r = type(self)(None)
r.routes = self.routes
r.data = self.data
r.func = (
self.func
) # Copy func but not wrapped_func, as it will be re-wrapped by init_app
r.name = self.name
r.endpoint = self.endpoint
r.__doc__ = self.__doc__
r.endpoints = set()
return r
def __call__(self, decorated):
# Are we decorating a ClassView? If so, annotate the ClassView and return it
if type(decorated) is type and issubclass(decorated, ClassView):
if '__routes__' not in decorated.__dict__:
decorated.__routes__ = []
decorated.__routes__.extend(self.routes)
return decorated
# Are we decorating another ViewHandler? If so, copy routes and
# wrapped method from it.
if isinstance(decorated, (ViewHandler, ViewHandlerWrapper)):
self.routes.extend(decorated.routes)
newdata = dict(decorated.data)
newdata.update(self.data)
self.data = newdata
self.func = decorated.func
# If neither ClassView nor ViewHandler, assume it's a callable method
else:
self.func = decorated
self.name = self.func.__name__
# self.endpoint will change once init_app calls __set_name__
self.endpoint = self.name
self.__doc__ = self.func.__doc__ # skipcq: PYL-W0201
return self
# Normally Python 3.6+, but called manually by :meth:`ClassView.init_app`
def __set_name__(self, owner, name):
self.name = name
self.endpoint = owner.__name__ + '_' + self.name
def __get__(self, obj, cls=None):
return ViewHandlerWrapper(self, obj, cls)
def init_app(self, app, cls, callback=None):
"""
Register routes for a given app and :class:`ClassView` class. At the
time of this call, we will always be in the view class even if we were
originally defined in a base class. :meth:`ClassView.init_app`
ensures this. :meth:`init_app` therefore takes the liberty of adding
additional attributes to ``self``:
* :attr:`wrapped_func`: The function wrapped with all decorators added by the
class
* :attr:`view_func`: The view function registered as a Flask view handler
* :attr:`endpoints`: The URL endpoints registered to this view handler
"""
def view_func(**view_args):
# view_func does not make any reference to variables from init_app to avoid
# creating a closure. Instead, the code further below sticks all relevant
# variables into view_func's namespace.
# Instantiate the view class. We depend on its __init__ requiring no
# parameters
viewinst = view_func.view_class()
# Declare ourselves (the ViewHandler) as the current view. The wrapper makes
# equivalence tests possible, such as ``self.current_handler == self.index``
viewinst.current_handler = ViewHandlerWrapper(
view_func.view, viewinst, view_func.view_class
)
# Place view arguments in the instance, in case they are needed outside the
# dispatch process
viewinst.view_args = view_args
# Place the view instance on the request stack for :obj:`current_view` to
# discover
_request_ctx_stack.top.current_view = viewinst
# Call the view instance's dispatch method. View classes can customise this
# for desired behaviour.
return viewinst.dispatch_request(view_func.wrapped_func, view_args)
# Decorate the wrapped view function with the class's desired decorators.
# Mixin classes may provide their own decorators, and all of them will be
# applied. The oldest defined decorators (from mixins) will be applied first,
# and the class's own decorators last. Within the list of decorators, we reverse
# the list again, so that a list specified like this:
#
# __decorators__ = [first, second]
#
# Has the same effect as writing this:
#
# @first
# @second
# def myview(self):
# pass
wrapped_func = self.func
for base in reversed(cls.__mro__):
if '__decorators__' in base.__dict__:
for decorator in reversed(base.__dict__['__decorators__']):
wrapped_func = decorator(wrapped_func)
wrapped_func.__name__ = self.name # See below
# Make view_func resemble the underlying view handler method...
view_func = update_wrapper(view_func, wrapped_func)
# ...but give view_func the name of the method in the class (self.name),
# self.name will differ from __name__ only if the view handler method
# was defined outside the class and then added to the class with a
# different name.
view_func.__name__ = self.name
# Stick `wrapped_func` and `cls` into view_func to avoid creating a closure.
view_func.wrapped_func = wrapped_func
view_func.view_class = cls
view_func.view = self
# Keep a copy of these functions (we already have self.func)
self.wrapped_func = wrapped_func # skipcq: PYL-W0201
self.view_func = view_func # skipcq: PYL-W0201
for class_rule, class_options in cls.__routes__:
for method_rule, method_options in self.routes:
use_options = dict(method_options)
use_options.update(class_options)
endpoint = use_options.pop('endpoint', self.endpoint)
self.endpoints.add(endpoint)
use_rule = rulejoin(class_rule, method_rule)
app.add_url_rule(use_rule, endpoint, view_func, **use_options)
if callback:
callback(use_rule, endpoint, view_func, **use_options)
class ViewHandlerWrapper:
"""Wrapper for a view at runtime"""
def __init__(self, viewh, obj, cls=None):
# obj is the ClassView instance
self._viewh = viewh
self._obj = obj
self._cls = cls
def __call__(self, *args, **kwargs):
"""Treat this like a call to the method (and not to the view)"""
# As per the __decorators__ spec, we call .func, not .wrapped_func
return self._viewh.func(self._obj, *args, **kwargs)
def __getattr__(self, name):
return getattr(self._viewh, name)
def __eq__(self, other):
return (
isinstance(other, ViewHandlerWrapper)
and self._viewh == other._viewh
and self._obj == other._obj
and self._cls == other._cls
)
def __ne__(self, other): # pragma: no cover
return not self.__eq__(other)
def is_available(self):
"""Indicates whether this view is available in the current context"""
if hasattr(self._viewh.wrapped_func, 'is_available'):
return self._viewh.wrapped_func.is_available(self._obj)
return True
class ClassView:
"""
Base class for defining a collection of views that are related to each
other. Subclasses may define methods decorated with :func:`route`. When
:meth:`init_app` is called, these will be added as routes to the app.
Typical use::
@route('/')
class IndexView(ClassView):
@viewdata(title="Homepage")
@route('')
def index():
return render_template('index.html.jinja2')
@route('about')
@viewdata(title="About us")
def about():
return render_template('about.html.jinja2')
IndexView.init_app(app)
The :func:`route` decorator on the class specifies the base rule, which is
prefixed to the rule specified on each view method. This example produces
two view handlers, for ``/`` and ``/about``. Multiple :func:`route`
decorators may be used in both places.
The :func:`viewdata` decorator can be used to specify additional data, and
may appear either before or after the :func:`route` decorator, but only
adjacent to it. Data specified here is available as the :attr:`data`
attribute on the view handler, or at runtime in templates as
``current_view.current_handler.data``.
A rudimentary CRUD view collection can be assembled like this::
@route('/doc/<name>')
class DocumentView(ClassView):
@route('')
@render_with('mydocument.html.jinja2', json=True)
def view(self, name):
document = MyDocument.query.filter_by(name=name).first_or_404()
return document.current_access()
@route('edit', methods=['POST'])
@requestform('title', 'content')
def edit(self, name, title, content):
document = MyDocument.query.filter_by(name=name).first_or_404()
document.title = title
document.content = content
return 'edited!'
DocumentView.init_app(app)
See :class:`ModelView` for a better way to build views around a model.
"""
# If the class did not get a @route decorator, provide a fallback route
__routes__: List[Tuple[str, RouteRuleOptions]] = [('', {})]
#: Track all the views registered in this class
__views__ = ()
#: Subclasses may define decorators here. These will be applied to every
#: view handler in the class, but only when called as a view and not
#: as a Python method call.
__decorators__: List[SimpleDecorator] = []
#: Indicates whether meth:`is_available` should simply return `True`
#: without conducting a test. Subclasses should not set this flag. It will
#: be set by :meth:`init_app` if any view handler is missing an
#: ``is_available`` method, as it implies that view is always available.
is_always_available = False
#: When a view is called, this will point to the current view handler,
#: an instance of :class:`ViewHandler`.
current_handler = None
#: When a view is called, this will be replaced with a dictionary of
#: arguments to the view.
view_args: Optional[dict] = None
def __eq__(self, other):
return type(other) is type(self)
def dispatch_request(self, view, view_args):
"""
View dispatcher that calls before_request, the view, and then after_request.
Subclasses may override this to provide a custom flow. :class:`ModelView`
does this to insert a model loading phase.
:param view: View method wrapped in specified decorators. The dispatcher
must call this
:param dict view_args: View arguments, to be passed on to the view method
"""
# Call the :meth:`before_request` method
resp = self.before_request()
if resp:
return self.after_request(make_response(resp))
# Call the view handler method, then pass the response to :meth:`after_response`
return self.after_request(make_response(view(self, **view_args)))
def before_request(self):
"""
This method is called after the app's ``before_request`` handlers, and
before the class's view method. Subclasses and mixin classes may define
their own :meth:`before_request` to pre-process requests. This method
receives context via `self`, in particular via :attr:`current_handler`
and :attr:`view_args`.
"""
return None
def after_request(self, response):
"""
This method is called with the response from the view handler method.
It must return a valid response object. Subclasses and mixin classes
may override this to perform any necessary post-processing::
class MyView(ClassView):
...
def after_request(self, response):
response = super().after_request(response)
... # Process here
return response
:param response: Response from the view handler method
:return: Response object
"""
return response
def is_available(self):
"""
Returns `True` if *any* view handler in the class is currently
available via its `is_available` method.
"""
if self.is_always_available:
return True
for viewname in self.__views__:
if getattr(self, viewname).is_available():
return True
return False
@classmethod
def __get_raw_attr(cls, name):
for base in cls.__mro__:
if name in base.__dict__:
return base.__dict__[name]
raise AttributeError(name)
@classmethod
def add_route_for(cls, _name, rule, **options):
"""
Add a route for an existing method or view. Useful for modifying routes
that a subclass inherits from a base class::
class BaseView(ClassView):
def latent_view(self):
return 'latent-view'
@route('other')
def other_view(self):
return 'other-view'
@route('/path')
class SubView(BaseView):
pass
SubView.add_route_for('latent_view', 'latent')
SubView.add_route_for('other_view', 'another')
SubView.init_app(app)
# Created routes:
# /path/latent -> SubView.latent (added)
# /path/other -> SubView.other (inherited)
# /path/another -> SubView.other (added)
:param _name: Name of the method or view on the class
:param rule: URL rule to be added
:param options: Additional options for :meth:`~flask.Flask.add_url_rule`
"""
setattr(cls, _name, route(rule, **options)(cls.__get_raw_attr(_name)))
@classmethod
def init_app(cls, app, callback=None):
"""
Register views on an app. If :attr:`callback` is specified, it will
be called after ``app.``:meth:`~flask.Flask.add_url_rule`, with the same
parameters.
"""
processed = set()
cls.__views__ = set()
cls.is_always_available = False
for base in cls.__mro__:
for name, attr in base.__dict__.items():
if name in processed:
continue
processed.add(name)
if isinstance(attr, ViewHandler):
if base != cls: # Copy ViewHandler instances into subclasses
# TODO: Don't do this during init_app. Use a metaclass
# and do this when the class is defined.
attr = attr.copy_for_subclass()
setattr(cls, name, attr)
attr.__set_name__(cls, name) # Required for Python < 3.6
cls.__views__.add(name)
attr.init_app(app, cls, callback=callback)
if not hasattr(attr.wrapped_func, 'is_available'):
cls.is_always_available = True
class ModelView(ClassView):
"""
Base class for constructing views around a model. Functionality is provided
via mixin classes that must precede :class:`ModelView` in base class order.
Two mixins are provided: :class:`UrlForView` and :class:`InstanceLoader`.
Sample use::
@route('/doc/<document>')
class DocumentView(UrlForView, InstanceLoader, ModelView):
model = Document
route_model_map = {
'document': 'name'
}
@route('')
@render_with(json=True)
def view(self):
return self.obj.current_access()
Document.views.main = DocumentView
DocumentView.init_app(app)
Views will not receive view arguments, unlike in :class:`ClassView`. If
necessary, they are available as `self.view_args`.
"""
#: The model that this view class represents, to be specified by subclasses.
model: Optional[Any] = None
#: A base query to use if the model needs special handling.
query: Optional[Query] = None
#: A mapping of URL rule variables to attributes on the model. For example,
#: if the URL rule is ``/<parent>/<document>``, the attribute map can be::
#:
#: model = MyModel
#: route_model_map = {
#: 'document': 'name', # Map 'document' in URL to MyModel.name
#: 'parent': 'parent.name', # Map 'parent' to MyModel.parent.name
#: }
#:
#: The :class:`InstanceLoader` mixin class will convert this mapping into
#: SQLAlchemy attribute references to load the instance object.
route_model_map: Dict[str, str] = {}
def __init__(self, obj=None):
super().__init__()
self.obj = obj
def __eq__(self, other):
return type(other) is type(self) and other.obj == self.obj
def dispatch_request(self, view, view_args):
"""
View dispatcher that calls :meth:`before_request`, :meth:`loader`,
:meth:`after_loader`, the view, and then :meth:`after_request`.
:param view: View method wrapped in specified decorators.
:param dict view_args: View arguments, to be passed on to the view method
"""
# Call the :meth:`before_request` method
resp = self.before_request()
if resp:
return self.after_request(make_response(resp))
# Load the database model
self.obj = self.loader(**view_args)
# Trigger pre-view processing of the loaded object
resp = self.after_loader()
if resp:
return self.after_request(make_response(resp))
# Call the view handler method, then pass the response to :meth:`after_response`
return self.after_request(make_response(view(self)))
def loader(self, **view_args): # pragma: no cover
"""
Subclasses or mixin classes may override this method to provide a model
instance loader. The return value of this method will be placed at
``self.obj``.
:return: Object instance loaded from database
"""
raise NotImplementedError("View class is missing a loader method")
def after_loader(self):
# Determine permissions available on the object for the current actor,
# but only if the view method has a requires_permission decorator
if hasattr(self.current_handler.wrapped_func, 'requires_permission'):
if isinstance(self.obj, tuple):
perms = None
for subobj in self.obj:
if hasattr(subobj, 'permissions'):
perms = subobj.permissions(current_auth.actor, perms)
perms = InspectableSet(perms or set())
elif hasattr(self.obj, 'current_permissions'):
# current_permissions always returns an InspectableSet
perms = self.obj.current_permissions
else:
perms = InspectableSet()
add_auth_attribute('permissions', perms)
return None
def requires_roles(roles):
"""
Decorator for :class:`ModelView` views that limits access to the specified
roles.
"""
def inner(f):
def is_available_here(context):
return context.obj.roles_for(current_auth.actor).has_any(roles)
def is_available(context):
result = is_available_here(context)
if result and hasattr(f, 'is_available'):
# We passed, but we're wrapping another test, so ask there as well
return f.is_available(context)
return result
@wraps(f)
def wrapper(self, *args, **kwargs):
add_auth_attribute('login_required', True)
if not is_available_here(self):
abort(403)
return f(self, *args, **kwargs)
wrapper.requires_roles = roles
wrapper.is_available = is_available
return wrapper
return inner
class UrlForView:
"""
Mixin class for :class:`ModelView` that registers view handler methods with
:class:`~coaster.sqlalchemy.mixins.UrlForMixin`'s
:meth:`~coaster.sqlalchemy.mixins.UrlForMixin.is_url_for`.
"""
@classmethod
def init_app(cls, app, callback=None):
def register_view_on_model(rule, endpoint, view_func, **options):
# Only pass in the attrs that are included in the rule.
# 1. Extract list of variables from the rule
rulevars = [v for c, a, v in parse_rule(rule)]
if options.get('host'):
rulevars.extend(v for c, a, v in parse_rule(options['host']))
if options.get('subdomain'):
rulevars.extend(v for c, a, v in parse_rule(options['subdomain']))
# Make a subset of cls.route_model_map with the required variables
params = {
v: cls.route_model_map[v] for v in rulevars if v in cls.route_model_map
}
# Register endpoint with the view function's name, endpoint name and
# parameters. Register the view for a specific app, unless we're in a
# Blueprint, in which case it's not an app.
# FIXME: The behaviour of a Blueprint + multi-app combo is unknown and needs
# tests.
if isinstance(app, Blueprint):
prefix = app.name + '.'
reg_app = None
else:
prefix = ''
reg_app = app
cls.model.register_endpoint(
action=view_func.__name__,
endpoint=prefix + endpoint,
app=reg_app,
roles=getattr(view_func, 'requires_roles', None),
paramattrs=params,
)
cls.model.register_view_for(
app=reg_app,
action=view_func.__name__,
classview=cls,
attr=view_func.__name__,
)
if callback: # pragma: no cover
callback(rule, endpoint, view_func, **options)
super().init_app(app, callback=register_view_on_model)
def url_change_check(f):
"""
View method decorator that checks the URL of the loaded object in
``self.obj`` against the URL in the request (using
``self.obj.url_for(__name__)``). If the URLs do not match,
and the request is a ``GET``, it issues a redirect to the correct URL.
Usage::
@route('/doc/<document>')
class MyModelView(UrlForView, InstanceLoader, ModelView):
model = MyModel
route_model_map = {'document': 'url_id_name'}
@route('')
@url_change_check
@render_with(json=True)
def view(self):
return self.obj.current_access()
If the decorator is required for all view handlers in the class, use
:class:`UrlChangeCheck`.
This decorator will only consider the URLs to be different if:
* Schemes differ (``http`` vs ``https`` etc)
* Hostnames differ (apart from a case difference, as user agents use lowercase)
* Paths differ
The current URL's query will be copied to the redirect URL. The URL fragment
(``#target_id``) is not available to the server and will be lost.
"""
@wraps(f)
def wrapper(self, *args, **kwargs):
if request.method == 'GET' and self.obj is not None:
correct_url = self.obj.url_for(f.__name__, _external=True)
if correct_url != request.base_url:
# What's different? If it's a case difference in hostname, or different
# port number, username, password, query or fragment, ignore. For any
# other difference (scheme, hostname or path), do a redirect.
correct_url_parts = urlsplit(correct_url)
request_url_parts = urlsplit(request.base_url)
reconstructed_url = urlunsplit(
(
correct_url_parts.scheme,
correct_url_parts.hostname.lower(), # Replace netloc
correct_url_parts.path,
'', # Drop query
'', # Drop fragment
)
)
reconstructed_ref = urlunsplit(
(
request_url_parts.scheme,
request_url_parts.hostname.lower(), # Replace netloc
request_url_parts.path,
'', # Drop query
'', # Drop fragment
)
)
if reconstructed_url != reconstructed_ref:
if request.query_string:
correct_url = urlunsplit(
correct_url_parts._replace(
query=request.query_string.decode('utf-8')
)
)
return redirect(
correct_url
) # TODO: Decide if this should be 302 (default) or 301
return f(self, *args, **kwargs)
return wrapper
class UrlChangeCheck(UrlForView):
"""
Mixin class for :class:`ModelView` and
:class:`~coaster.sqlalchemy.mixins.UrlForMixin` that applies the
:func:`url_change_check` decorator to all view handler methods. Subclasses
:class:`UrlForView`, which it depends on to register the view with the
model so that URLs can be generated. Usage::
@route('/doc/<document>')
class MyModelView(UrlChangeCheck, InstanceLoader, ModelView):
model = MyModel
route_model_map = {'document': 'url_id_name'}
@route('')
@render_with(json=True)
def view(self):
return self.obj.current_access()
"""
__decorators__ = [url_change_check]
class InstanceLoader:
"""
Mixin class for :class:`ModelView` that provides a :meth:`loader` that
attempts to load an instance of the model based on attributes in the
:attr:`~ModelView.route_model_map` dictionary.
:class:`InstanceLoader` will traverse relationships (many-to-one or
one-to-one) and perform a SQL ``JOIN`` with the target class.
"""
def loader(self, **view_args):
if any((name in self.route_model_map for name in view_args)):
# We have a URL route attribute that matches one of the model's attributes.
# Attempt to load the model instance
filters = {
self.route_model_map[key]: value
for key, value in view_args.items()
if key in self.route_model_map
}
query = self.query or self.model.query
joined_models = set()
for name, value in filters.items():
if '.' in name:
# Did we get something like `parent.name`?
# Dig into it to find the source column
source = self.model
for subname in name.split('.'):
attr = relattr = getattr(source, subname)
# Did we get to something like 'parent'?
# 1. If it's a synonym, get the attribute it is a synonym for
# 2. If it's a relationship, find the source class, join it to
# the query, and then continue looking for attributes over there
if hasattr(attr, 'original_property') and isinstance(
attr.original_property, SynonymProperty
):
attr = getattr(source, attr.original_property.name)
if isinstance(attr, InstrumentedAttribute) and isinstance(
attr.property, RelationshipProperty
):
if isinstance(attr.property.argument, Mapper):
attr = (
attr.property.argument.class_
) # Unlikely to be used. pragma: no cover
else:
attr = attr.property.argument
if attr not in joined_models:
# SQL JOIN the other model on the basis of
# the relationship that led us to this join
query = query.join(attr, relattr)
# But ensure we don't JOIN twice
joined_models.add(attr)
source = attr
query = query.filter(source == value)
else:
query = query.filter(getattr(self.model, name) == value)
obj = query.one_or_404()
return obj
| bsd-2-clause |
CalHoll/SoundMoose | server/project/conf/base.py | 3 | 5770 | import os
import sys
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(
os.path.join(BASE_DIR, 'apps')
)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = False
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'core',
'rest_framework',
'rest_framework_swagger',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'core.middleware.corsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.normpath(os.path.join(BASE_DIR, 'templates')),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'soundmoose',
'USER': 'postgres',
'PASSWORD': 'hrr20soundmoose',
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
# STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = os.path.join(ROOT_DIR, 'assets')
# See:
# https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
# END STATIC FILE CONFIGURATION
# MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = os.path.normpath(os.path.join(ROOT_DIR, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
# END MEDIA CONFIGURATION
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
},
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%d/%b/%Y %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
},
'file': {
'level': 'ERROR',
'class': 'logging.FileHandler',
'filters': ['require_debug_false'],
'filename': 'log/error.log',
'formatter': 'verbose'
},
},
'loggers': {
'django.db.backends': {
'level': 'DEBUG',
'handlers': ['console'],
},
'django.request': {
'handlers': ['file'],
'level': 'ERROR',
'propagate': True,
},
}
}
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',)
}
TEMPLATE_LOADERS = [
'django.template.loaders.eggs.Loader',
]
# CORS Middleware https://github.com/ottoyiu/django-cors-headers/
CORS_ORIGIN_ALLOW_ALL = True
CORS_ORIGIN_WHITELIST = (
'soundmoose.com',
'www.soundmoose.com',
'localhost:3000',
)
CORS_ALLOW_METHODS = (
'DELETE',
'GET',
'OPTIONS',
'PATCH',
'POST',
'PUT',
)
| mit |
Y3K/django | django/middleware/locale.py | 358 | 2983 | "This is the locale selecting middleware that will look at accept headers"
from django.conf import settings
from django.core.urlresolvers import (
LocaleRegexURLResolver, get_resolver, get_script_prefix, is_valid_path,
)
from django.http import HttpResponseRedirect
from django.utils import translation
from django.utils.cache import patch_vary_headers
from django.utils.functional import cached_property
class LocaleMiddleware(object):
"""
This is a very simple middleware that parses a request
and decides what translation object to install in the current
thread context. This allows pages to be dynamically
translated to the language the user desires (if the language
is available, of course).
"""
response_redirect_class = HttpResponseRedirect
def process_request(self, request):
language = translation.get_language_from_request(
request, check_path=self.is_language_prefix_patterns_used)
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
def process_response(self, request, response):
language = translation.get_language()
language_from_path = translation.get_language_from_path(request.path_info)
if (response.status_code == 404 and not language_from_path
and self.is_language_prefix_patterns_used):
urlconf = getattr(request, 'urlconf', None)
language_path = '/%s%s' % (language, request.path_info)
path_valid = is_valid_path(language_path, urlconf)
path_needs_slash = (
not path_valid and (
settings.APPEND_SLASH and not language_path.endswith('/')
and is_valid_path('%s/' % language_path, urlconf)
)
)
if path_valid or path_needs_slash:
script_prefix = get_script_prefix()
# Insert language after the script prefix and before the
# rest of the URL
language_url = request.get_full_path(force_append_slash=path_needs_slash).replace(
script_prefix,
'%s%s/' % (script_prefix, language),
1
)
return self.response_redirect_class(language_url)
if not (self.is_language_prefix_patterns_used
and language_from_path):
patch_vary_headers(response, ('Accept-Language',))
if 'Content-Language' not in response:
response['Content-Language'] = language
return response
@cached_property
def is_language_prefix_patterns_used(self):
"""
Returns `True` if the `LocaleRegexURLResolver` is used
at root level of the urlpatterns, else it returns `False`.
"""
for url_pattern in get_resolver(None).url_patterns:
if isinstance(url_pattern, LocaleRegexURLResolver):
return True
return False
| bsd-3-clause |
DoubleNegativeVisualEffects/cortex | test/IECoreRI/DoubleSided.py | 7 | 2640 | ##########################################################################
#
# Copyright (c) 2008-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import IECore
import IECoreRI
import os.path
import os
class DoubleSidedTest( IECoreRI.TestCase ) :
def test( self ) :
r = IECoreRI.Renderer( "test/IECoreRI/output/testDoubleSided.rib" )
self.assertEqual( r.getAttribute( "doubleSided" ), IECore.BoolData( True ) )
r.setAttribute( "doubleSided", IECore.BoolData( False ) )
self.assertEqual( r.getAttribute( "doubleSided" ), IECore.BoolData( False ) )
del r
l = "".join( file( "test/IECoreRI/output/testDoubleSided.rib" ).readlines() )
self.assert_( "Sides 1" in l )
r = IECoreRI.Renderer( "test/IECoreRI/output/testDoubleSided.rib" )
r.setAttribute( "doubleSided", IECore.BoolData( True ) )
del r
l = "".join( file( "test/IECoreRI/output/testDoubleSided.rib" ).readlines() )
self.assert_( "Sides 2" in l )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
vishesh92/redash | old_migrations/0011_migrate_bigquery_to_json.py | 20 | 1391 | from base64 import b64encode
import json
from redash.models import DataSource
def convert_p12_to_pem(p12file):
from OpenSSL import crypto
with open(p12file, 'rb') as f:
p12 = crypto.load_pkcs12(f.read(), "notasecret")
return crypto.dump_privatekey(crypto.FILETYPE_PEM, p12.get_privatekey())
if __name__ == '__main__':
for ds in DataSource.select(DataSource.id, DataSource.type, DataSource.options):
if ds.type == 'bigquery':
options = json.loads(ds.options)
if 'jsonKeyFile' in options:
continue
new_options = {
'projectId': options['projectId'],
'jsonKeyFile': b64encode(json.dumps({
'client_email': options['serviceAccount'],
'private_key': convert_p12_to_pem(options['privateKey'])
}))
}
ds.options = json.dumps(new_options)
ds.save(only=ds.dirty_fields)
elif ds.type == 'google_spreadsheets':
options = json.loads(ds.options)
if 'jsonKeyFile' in options:
continue
with open(options['credentialsFilePath']) as f:
new_options = {
'jsonKeyFile': b64encode(f.read())
}
ds.options = json.dumps(new_options)
ds.save(only=ds.dirty_fields)
| bsd-2-clause |
lache/RacingKingLee | monitor/engine.win64/2.74/python/lib/site-packages/numpy/f2py/auxfuncs.py | 75 | 19979 | #!/usr/bin/env python
"""
Auxiliary functions for f2py2e.
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <[email protected]>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy (BSD style) LICENSE.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/07/24 19:01:55 $
Pearu Peterson
"""
from __future__ import division, absolute_import, print_function
import pprint
import sys
import types
from functools import reduce
from . import __version__
from . import cfuncs
f2py_version = __version__.version
errmess=sys.stderr.write
#outmess=sys.stdout.write
show=pprint.pprint
options={}
debugoptions=[]
wrapfuncs = 1
def outmess(t):
if options.get('verbose', 1):
sys.stdout.write(t)
def debugcapi(var):
return 'capi' in debugoptions
def _isstring(var):
return 'typespec' in var and var['typespec']=='character' and (not isexternal(var))
def isstring(var):
return _isstring(var) and not isarray(var)
def ischaracter(var):
return isstring(var) and 'charselector' not in var
def isstringarray(var):
return isarray(var) and _isstring(var)
def isarrayofstrings(var):
# leaving out '*' for now so that
# `character*(*) a(m)` and `character a(m,*)`
# are treated differently. Luckily `character**` is illegal.
return isstringarray(var) and var['dimension'][-1]=='(*)'
def isarray(var):
return 'dimension' in var and (not isexternal(var))
def isscalar(var):
return not (isarray(var) or isstring(var) or isexternal(var))
def iscomplex(var):
return isscalar(var) and var.get('typespec') in ['complex', 'double complex']
def islogical(var):
return isscalar(var) and var.get('typespec')=='logical'
def isinteger(var):
return isscalar(var) and var.get('typespec')=='integer'
def isreal(var):
return isscalar(var) and var.get('typespec')=='real'
def get_kind(var):
try:
return var['kindselector']['*']
except KeyError:
try:
return var['kindselector']['kind']
except KeyError:
pass
def islong_long(var):
if not isscalar(var):
return 0
if var.get('typespec') not in ['integer', 'logical']:
return 0
return get_kind(var)=='8'
def isunsigned_char(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var)=='-1'
def isunsigned_short(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var)=='-2'
def isunsigned(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var)=='-4'
def isunsigned_long_long(var):
if not isscalar(var):
return 0
if var.get('typespec') != 'integer':
return 0
return get_kind(var)=='-8'
def isdouble(var):
if not isscalar(var):
return 0
if not var.get('typespec')=='real':
return 0
return get_kind(var)=='8'
def islong_double(var):
if not isscalar(var):
return 0
if not var.get('typespec')=='real':
return 0
return get_kind(var)=='16'
def islong_complex(var):
if not iscomplex(var):
return 0
return get_kind(var)=='32'
def iscomplexarray(var):
return isarray(var) and var.get('typespec') in ['complex', 'double complex']
def isint1array(var):
return isarray(var) and var.get('typespec')=='integer' \
and get_kind(var)=='1'
def isunsigned_chararray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='-1'
def isunsigned_shortarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='-2'
def isunsignedarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='-4'
def isunsigned_long_longarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='-8'
def issigned_chararray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='1'
def issigned_shortarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='2'
def issigned_array(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='4'
def issigned_long_longarray(var):
return isarray(var) and var.get('typespec') in ['integer', 'logical']\
and get_kind(var)=='8'
def isallocatable(var):
return 'attrspec' in var and 'allocatable' in var['attrspec']
def ismutable(var):
return not (not 'dimension' in var or isstring(var))
def ismoduleroutine(rout):
return 'modulename' in rout
def ismodule(rout):
return ('block' in rout and 'module'==rout['block'])
def isfunction(rout):
return ('block' in rout and 'function'==rout['block'])
#def isfunction_wrap(rout):
# return wrapfuncs and (iscomplexfunction(rout) or isstringfunction(rout)) and (not isexternal(rout))
def isfunction_wrap(rout):
if isintent_c(rout):
return 0
return wrapfuncs and isfunction(rout) and (not isexternal(rout))
def issubroutine(rout):
return ('block' in rout and 'subroutine'==rout['block'])
def issubroutine_wrap(rout):
if isintent_c(rout):
return 0
return issubroutine(rout) and hasassumedshape(rout)
def hasassumedshape(rout):
if rout.get('hasassumedshape'):
return True
for a in rout['args']:
for d in rout['vars'].get(a, {}).get('dimension', []):
if d==':':
rout['hasassumedshape'] = True
return True
return False
def isroutine(rout):
return isfunction(rout) or issubroutine(rout)
def islogicalfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return islogical(rout['vars'][a])
return 0
def islong_longfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return islong_long(rout['vars'][a])
return 0
def islong_doublefunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return islong_double(rout['vars'][a])
return 0
def iscomplexfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return iscomplex(rout['vars'][a])
return 0
def iscomplexfunction_warn(rout):
if iscomplexfunction(rout):
outmess("""\
**************************************************************
Warning: code with a function returning complex value
may not work correctly with your Fortran compiler.
Run the following test before using it in your applications:
$(f2py install dir)/test-site/{b/runme_scalar,e/runme}
When using GNU gcc/g77 compilers, codes should work correctly.
**************************************************************\n""")
return 1
return 0
def isstringfunction(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return isstring(rout['vars'][a])
return 0
def hasexternals(rout):
return 'externals' in rout and rout['externals']
def isthreadsafe(rout):
return 'f2pyenhancements' in rout and 'threadsafe' in rout['f2pyenhancements']
def hasvariables(rout):
return 'vars' in rout and rout['vars']
def isoptional(var):
return ('attrspec' in var and 'optional' in var['attrspec'] and 'required' not in var['attrspec']) and isintent_nothide(var)
def isexternal(var):
return ('attrspec' in var and 'external' in var['attrspec'])
def isrequired(var):
return not isoptional(var) and isintent_nothide(var)
def isintent_in(var):
if 'intent' not in var:
return 1
if 'hide' in var['intent']:
return 0
if 'inplace' in var['intent']:
return 0
if 'in' in var['intent']:
return 1
if 'out' in var['intent']:
return 0
if 'inout' in var['intent']:
return 0
if 'outin' in var['intent']:
return 0
return 1
def isintent_inout(var):
return 'intent' in var and ('inout' in var['intent'] or 'outin' in var['intent']) and 'in' not in var['intent'] and 'hide' not in var['intent'] and 'inplace' not in var['intent']
def isintent_out(var):
return 'out' in var.get('intent', [])
def isintent_hide(var):
return ('intent' in var and ('hide' in var['intent'] or ('out' in var['intent'] and 'in' not in var['intent'] and (not l_or(isintent_inout, isintent_inplace)(var)))))
def isintent_nothide(var):
return not isintent_hide(var)
def isintent_c(var):
return 'c' in var.get('intent', [])
# def isintent_f(var):
# return not isintent_c(var)
def isintent_cache(var):
return 'cache' in var.get('intent', [])
def isintent_copy(var):
return 'copy' in var.get('intent', [])
def isintent_overwrite(var):
return 'overwrite' in var.get('intent', [])
def isintent_callback(var):
return 'callback' in var.get('intent', [])
def isintent_inplace(var):
return 'inplace' in var.get('intent', [])
def isintent_aux(var):
return 'aux' in var.get('intent', [])
def isintent_aligned4(var):
return 'aligned4' in var.get('intent', [])
def isintent_aligned8(var):
return 'aligned8' in var.get('intent', [])
def isintent_aligned16(var):
return 'aligned16' in var.get('intent', [])
isintent_dict = {isintent_in: 'INTENT_IN', isintent_inout: 'INTENT_INOUT',
isintent_out: 'INTENT_OUT', isintent_hide: 'INTENT_HIDE',
isintent_cache: 'INTENT_CACHE',
isintent_c: 'INTENT_C', isoptional: 'OPTIONAL',
isintent_inplace: 'INTENT_INPLACE',
isintent_aligned4: 'INTENT_ALIGNED4',
isintent_aligned8: 'INTENT_ALIGNED8',
isintent_aligned16: 'INTENT_ALIGNED16',
}
def isprivate(var):
return 'attrspec' in var and 'private' in var['attrspec']
def hasinitvalue(var):
return '=' in var
def hasinitvalueasstring(var):
if not hasinitvalue(var):
return 0
return var['='][0] in ['"', "'"]
def hasnote(var):
return 'note' in var
def hasresultnote(rout):
if not isfunction(rout):
return 0
if 'result' in rout:
a=rout['result']
else:
a=rout['name']
if a in rout['vars']:
return hasnote(rout['vars'][a])
return 0
def hascommon(rout):
return 'common' in rout
def containscommon(rout):
if hascommon(rout):
return 1
if hasbody(rout):
for b in rout['body']:
if containscommon(b):
return 1
return 0
def containsmodule(block):
if ismodule(block):
return 1
if not hasbody(block):
return 0
for b in block['body']:
if containsmodule(b):
return 1
return 0
def hasbody(rout):
return 'body' in rout
def hascallstatement(rout):
return getcallstatement(rout) is not None
def istrue(var):
return 1
def isfalse(var):
return 0
class F2PYError(Exception):
pass
class throw_error:
def __init__(self, mess):
self.mess = mess
def __call__(self, var):
mess = '\n\n var = %s\n Message: %s\n' % (var, self.mess)
raise F2PYError(mess)
def l_and(*f):
l, l2='lambda v', []
for i in range(len(f)):
l='%s,f%d=f[%d]'%(l, i, i)
l2.append('f%d(v)'%(i))
return eval('%s:%s'%(l, ' and '.join(l2)))
def l_or(*f):
l, l2='lambda v', []
for i in range(len(f)):
l='%s,f%d=f[%d]'%(l, i, i)
l2.append('f%d(v)'%(i))
return eval('%s:%s'%(l, ' or '.join(l2)))
def l_not(f):
return eval('lambda v,f=f:not f(v)')
def isdummyroutine(rout):
try:
return rout['f2pyenhancements']['fortranname']==''
except KeyError:
return 0
def getfortranname(rout):
try:
name = rout['f2pyenhancements']['fortranname']
if name=='':
raise KeyError
if not name:
errmess('Failed to use fortranname from %s\n'%(rout['f2pyenhancements']))
raise KeyError
except KeyError:
name = rout['name']
return name
def getmultilineblock(rout,blockname,comment=1,counter=0):
try:
r = rout['f2pyenhancements'].get(blockname)
except KeyError:
return
if not r: return
if counter > 0 and isinstance(r, str):
return
if isinstance(r, list):
if counter>=len(r): return
r = r[counter]
if r[:3]=="'''":
if comment:
r = '\t/* start ' + blockname + ' multiline ('+repr(counter)+') */\n' + r[3:]
else:
r = r[3:]
if r[-3:]=="'''":
if comment:
r = r[:-3] + '\n\t/* end multiline ('+repr(counter)+')*/'
else:
r = r[:-3]
else:
errmess("%s multiline block should end with `'''`: %s\n" \
% (blockname, repr(r)))
return r
def getcallstatement(rout):
return getmultilineblock(rout, 'callstatement')
def getcallprotoargument(rout,cb_map={}):
r = getmultilineblock(rout, 'callprotoargument', comment=0)
if r: return r
if hascallstatement(rout):
outmess('warning: callstatement is defined without callprotoargument\n')
return
from .capi_maps import getctype
arg_types, arg_types2 = [], []
if l_and(isstringfunction, l_not(isfunction_wrap))(rout):
arg_types.extend(['char*', 'size_t'])
for n in rout['args']:
var = rout['vars'][n]
if isintent_callback(var):
continue
if n in cb_map:
ctype = cb_map[n]+'_typedef'
else:
ctype = getctype(var)
if l_and(isintent_c, l_or(isscalar, iscomplex))(var):
pass
elif isstring(var):
pass
#ctype = 'void*'
else:
ctype = ctype+'*'
if isstring(var) or isarrayofstrings(var):
arg_types2.append('size_t')
arg_types.append(ctype)
proto_args = ','.join(arg_types+arg_types2)
if not proto_args:
proto_args = 'void'
#print proto_args
return proto_args
def getusercode(rout):
return getmultilineblock(rout, 'usercode')
def getusercode1(rout):
return getmultilineblock(rout, 'usercode', counter=1)
def getpymethoddef(rout):
return getmultilineblock(rout, 'pymethoddef')
def getargs(rout):
sortargs, args=[], []
if 'args' in rout:
args=rout['args']
if 'sortvars' in rout:
for a in rout['sortvars']:
if a in args: sortargs.append(a)
for a in args:
if a not in sortargs:
sortargs.append(a)
else: sortargs=rout['args']
return args, sortargs
def getargs2(rout):
sortargs, args=[], rout.get('args', [])
auxvars = [a for a in rout['vars'].keys() if isintent_aux(rout['vars'][a])\
and a not in args]
args = auxvars + args
if 'sortvars' in rout:
for a in rout['sortvars']:
if a in args: sortargs.append(a)
for a in args:
if a not in sortargs:
sortargs.append(a)
else: sortargs=auxvars + rout['args']
return args, sortargs
def getrestdoc(rout):
if 'f2pymultilines' not in rout:
return None
k = None
if rout['block']=='python module':
k = rout['block'], rout['name']
return rout['f2pymultilines'].get(k, None)
def gentitle(name):
l=(80-len(name)-6)//2
return '/*%s %s %s*/'%(l*'*', name, l*'*')
def flatlist(l):
if isinstance(l, list):
return reduce(lambda x,y,f=flatlist:x+f(y), l, [])
return [l]
def stripcomma(s):
if s and s[-1]==',': return s[:-1]
return s
def replace(str,d,defaultsep=''):
if isinstance(d, list):
return [replace(str, _m, defaultsep) for _m in d]
if isinstance(str, list):
return [replace(_m, d, defaultsep) for _m in str]
for k in 2*list(d.keys()):
if k=='separatorsfor':
continue
if 'separatorsfor' in d and k in d['separatorsfor']:
sep=d['separatorsfor'][k]
else:
sep=defaultsep
if isinstance(d[k], list):
str=str.replace('#%s#'%(k), sep.join(flatlist(d[k])))
else:
str=str.replace('#%s#'%(k), d[k])
return str
def dictappend(rd, ar):
if isinstance(ar, list):
for a in ar:
rd=dictappend(rd, a)
return rd
for k in ar.keys():
if k[0]=='_':
continue
if k in rd:
if isinstance(rd[k], str):
rd[k]=[rd[k]]
if isinstance(rd[k], list):
if isinstance(ar[k], list):
rd[k]=rd[k]+ar[k]
else:
rd[k].append(ar[k])
elif isinstance(rd[k], dict):
if isinstance(ar[k], dict):
if k=='separatorsfor':
for k1 in ar[k].keys():
if k1 not in rd[k]:
rd[k][k1]=ar[k][k1]
else:
rd[k]=dictappend(rd[k], ar[k])
else:
rd[k]=ar[k]
return rd
def applyrules(rules,d,var={}):
ret={}
if isinstance(rules, list):
for r in rules:
rr=applyrules(r, d, var)
ret=dictappend(ret, rr)
if '_break' in rr:
break
return ret
if '_check' in rules and (not rules['_check'](var)):
return ret
if 'need' in rules:
res = applyrules({'needs':rules['need']}, d, var)
if 'needs' in res:
cfuncs.append_needs(res['needs'])
for k in rules.keys():
if k=='separatorsfor':
ret[k]=rules[k]; continue
if isinstance(rules[k], str):
ret[k]=replace(rules[k], d)
elif isinstance(rules[k], list):
ret[k]=[]
for i in rules[k]:
ar=applyrules({k:i}, d, var)
if k in ar:
ret[k].append(ar[k])
elif k[0]=='_':
continue
elif isinstance(rules[k], dict):
ret[k]=[]
for k1 in rules[k].keys():
if isinstance(k1, types.FunctionType) and k1(var):
if isinstance(rules[k][k1], list):
for i in rules[k][k1]:
if isinstance(i, dict):
res=applyrules({'supertext':i}, d, var)
if 'supertext' in res:
i=res['supertext']
else: i=''
ret[k].append(replace(i, d))
else:
i=rules[k][k1]
if isinstance(i, dict):
res=applyrules({'supertext':i}, d)
if 'supertext' in res:
i=res['supertext']
else: i=''
ret[k].append(replace(i, d))
else:
errmess('applyrules: ignoring rule %s.\n'%repr(rules[k]))
if isinstance(ret[k], list):
if len(ret[k])==1:
ret[k]=ret[k][0]
if ret[k]==[]:
del ret[k]
return ret
| mit |
dfdx2/django | tests/many_to_one/tests.py | 12 | 30596 | import datetime
from copy import deepcopy
from django.core.exceptions import FieldError, MultipleObjectsReturned
from django.db import models, transaction
from django.db.utils import IntegrityError
from django.test import TestCase
from django.utils.translation import gettext_lazy
from .models import (
Article, Category, Child, City, District, First, Parent, Record, Relation,
Reporter, School, Student, Third, ToFieldChild,
)
class ManyToOneTests(TestCase):
def setUp(self):
# Create a few Reporters.
self.r = Reporter(first_name='John', last_name='Smith', email='[email protected]')
self.r.save()
self.r2 = Reporter(first_name='Paul', last_name='Jones', email='[email protected]')
self.r2.save()
# Create an Article.
self.a = Article(headline="This is a test", pub_date=datetime.date(2005, 7, 27), reporter=self.r)
self.a.save()
def test_get(self):
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
self.assertEqual((r.first_name, self.r.last_name), ('John', 'Smith'))
def test_create(self):
# You can also instantiate an Article by passing the Reporter's ID
# instead of a Reporter object.
a3 = Article(headline="Third article", pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
a3.save()
self.assertEqual(a3.reporter.id, self.r.id)
# Similarly, the reporter ID can be a string.
a4 = Article(headline="Fourth article", pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
a4.save()
self.assertEqual(repr(a4.reporter), "<Reporter: John Smith>")
def test_add(self):
# Create an Article via the Reporter object.
new_article = self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
self.assertEqual(repr(new_article), "<Article: John's second story>")
self.assertEqual(new_article.reporter.id, self.r.id)
# Create a new article, and add it to the article set.
new_article2 = Article(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
msg = "<Article: Paul's story> instance isn't saved. Use bulk=False or save the object first."
with self.assertRaisesMessage(ValueError, msg):
self.r.article_set.add(new_article2)
self.r.article_set.add(new_article2, bulk=False)
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(
self.r.article_set.all(),
["<Article: John's second story>", "<Article: Paul's story>", "<Article: This is a test>"]
)
# Add the same article to a different article set - check that it moves.
self.r2.article_set.add(new_article2)
self.assertEqual(new_article2.reporter.id, self.r2.id)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Adding an object of the wrong type raises TypeError.
with transaction.atomic():
with self.assertRaisesMessage(TypeError, "'Article' instance expected, got <Reporter:"):
self.r.article_set.add(self.r2)
self.assertQuerysetEqual(
self.r.article_set.all(),
["<Article: John's second story>", "<Article: This is a test>"]
)
def test_set(self):
new_article = self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again.
self.r2.article_set.set([new_article, new_article2])
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(
self.r2.article_set.all(),
["<Article: John's second story>", "<Article: Paul's story>"]
)
# Funny case - because the ForeignKey cannot be null,
# existing members of the set must remain.
self.r.article_set.set([new_article])
self.assertQuerysetEqual(
self.r.article_set.all(),
["<Article: John's second story>", "<Article: This is a test>"]
)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
def test_reverse_assignment_deprecation(self):
msg = (
"Direct assignment to the reverse side of a related set is "
"prohibited. Use article_set.set() instead."
)
with self.assertRaisesMessage(TypeError, msg):
self.r2.article_set = []
def test_assign(self):
new_article = self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter directly using the descriptor.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again using set() method.
self.r2.article_set.set([new_article, new_article2])
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(
self.r2.article_set.all(),
["<Article: John's second story>", "<Article: Paul's story>"]
)
# Because the ForeignKey cannot be null, existing members of the set
# must remain.
self.r.article_set.set([new_article])
self.assertQuerysetEqual(
self.r.article_set.all(),
["<Article: John's second story>", "<Article: This is a test>"]
)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Reporter cannot be null - there should not be a clear or remove method
self.assertFalse(hasattr(self.r2.article_set, 'remove'))
self.assertFalse(hasattr(self.r2.article_set, 'clear'))
def test_selects(self):
self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
# Reporter objects have access to their related Article objects.
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='This'), ["<Article: This is a test>"])
self.assertEqual(self.r.article_set.count(), 2)
self.assertEqual(self.r2.article_set.count(), 1)
# Get articles by id
self.assertQuerysetEqual(Article.objects.filter(id__exact=self.a.id), ["<Article: This is a test>"])
self.assertQuerysetEqual(Article.objects.filter(pk=self.a.id), ["<Article: This is a test>"])
# Query on an article property
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='This'), ["<Article: This is a test>"])
# The API automatically follows relationships as far as you need.
# Use double underscores to separate relationships.
# This works as many levels deep as you want. There's no limit.
# Find all Articles for any Reporter whose first name is "John".
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John'),
["<Article: John's second story>", "<Article: This is a test>"]
)
# Implied __exact also works
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name='John'),
["<Article: John's second story>", "<Article: This is a test>"]
)
# Query twice over the related field.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John', reporter__last_name__exact='Smith'),
["<Article: John's second story>", "<Article: This is a test>"]
)
# The underlying query only makes one join when a related table is referenced twice.
queryset = Article.objects.filter(reporter__first_name__exact='John', reporter__last_name__exact='Smith')
self.assertNumQueries(1, list, queryset)
self.assertEqual(queryset.query.get_compiler(queryset.db).as_sql()[0].count('INNER JOIN'), 1)
# The automatically joined table has a predictable name.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John').extra(
where=["many_to_one_reporter.last_name='Smith'"]),
["<Article: John's second story>", "<Article: This is a test>"]
)
# ... and should work fine with the string that comes out of forms.Form.cleaned_data
self.assertQuerysetEqual(
(Article.objects
.filter(reporter__first_name__exact='John')
.extra(where=["many_to_one_reporter.last_name='%s'" % 'Smith'])),
["<Article: John's second story>", "<Article: This is a test>"]
)
# Find all Articles for a Reporter.
# Use direct ID check, pk check, and object comparison
self.assertQuerysetEqual(
Article.objects.filter(reporter__id__exact=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__pk=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r.id, self.r2.id]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r, self.r2]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# You can also use a queryset instead of a literal list of instances.
# The queryset must be reduced to a list of values using values(),
# then converted into a query
self.assertQuerysetEqual(
Article.objects.filter(
reporter__in=Reporter.objects.filter(first_name='John').values('pk').query
).distinct(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_reverse_selects(self):
a3 = Article.objects.create(
headline="Third article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=self.r.id,
)
Article.objects.create(
headline="Fourth article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=self.r.id,
)
john_smith = ["<Reporter: John Smith>"]
# Reporters can be queried
self.assertQuerysetEqual(Reporter.objects.filter(id__exact=self.r.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(pk=self.r.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(first_name__startswith='John'), john_smith)
# Reporters can query in opposite direction of ForeignKey definition
self.assertQuerysetEqual(Reporter.objects.filter(article__id__exact=self.a.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article__pk=self.a.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a.id), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article__in=[self.a.id, a3.id]).distinct(), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article__in=[self.a.id, a3]).distinct(), john_smith)
self.assertQuerysetEqual(Reporter.objects.filter(article__in=[self.a, a3]).distinct(), john_smith)
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T'),
["<Reporter: John Smith>", "<Reporter: John Smith>"],
ordered=False
)
self.assertQuerysetEqual(Reporter.objects.filter(article__headline__startswith='T').distinct(), john_smith)
# Counting in the opposite direction works in conjunction with distinct()
self.assertEqual(Reporter.objects.filter(article__headline__startswith='T').count(), 2)
self.assertEqual(Reporter.objects.filter(article__headline__startswith='T').distinct().count(), 1)
# Queries can go round in circles.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John'),
[
"<Reporter: John Smith>",
"<Reporter: John Smith>",
"<Reporter: John Smith>",
],
ordered=False
)
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John').distinct(),
john_smith
)
self.assertQuerysetEqual(Reporter.objects.filter(article__reporter__exact=self.r).distinct(), john_smith)
# Implied __exact also works.
self.assertQuerysetEqual(Reporter.objects.filter(article__reporter=self.r).distinct(), john_smith)
# It's possible to use values() calls across many-to-one relations.
# (Note, too, that we clear the ordering here so as not to drag the
# 'headline' field into the columns being used to determine uniqueness)
d = {'reporter__first_name': 'John', 'reporter__last_name': 'Smith'}
qs = Article.objects.filter(
reporter=self.r,
).distinct().order_by().values('reporter__first_name', 'reporter__last_name')
self.assertEqual([d], list(qs))
def test_select_related(self):
# Article.objects.select_related().dates() works properly when there
# are multiple Articles with the same date but different foreign-key
# objects (Reporters).
r1 = Reporter.objects.create(first_name='Mike', last_name='Royko', email='[email protected]')
r2 = Reporter.objects.create(first_name='John', last_name='Kass', email='[email protected]')
Article.objects.create(headline='First', pub_date=datetime.date(1980, 4, 23), reporter=r1)
Article.objects.create(headline='Second', pub_date=datetime.date(1980, 4, 23), reporter=r2)
self.assertEqual(
list(Article.objects.select_related().dates('pub_date', 'day')),
[datetime.date(1980, 4, 23), datetime.date(2005, 7, 27)]
)
self.assertEqual(
list(Article.objects.select_related().dates('pub_date', 'month')),
[datetime.date(1980, 4, 1), datetime.date(2005, 7, 1)]
)
self.assertEqual(
list(Article.objects.select_related().dates('pub_date', 'year')),
[datetime.date(1980, 1, 1), datetime.date(2005, 1, 1)]
)
def test_delete(self):
self.r.article_set.create(headline="John's second story", pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
Article.objects.create(headline="Third article", pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
Article.objects.create(
headline="Fourth article",
pub_date=datetime.date(2005, 7, 27),
reporter_id=str(self.r.id),
)
# If you delete a reporter, his articles will be deleted.
self.assertQuerysetEqual(
Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: Third article>",
"<Article: This is a test>",
]
)
self.assertQuerysetEqual(
Reporter.objects.order_by('first_name'),
["<Reporter: John Smith>", "<Reporter: Paul Jones>"]
)
self.r2.delete()
self.assertQuerysetEqual(
Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Third article>",
"<Article: This is a test>",
]
)
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'), ["<Reporter: John Smith>"])
# You can delete using a JOIN in the query.
Reporter.objects.filter(article__headline__startswith='This').delete()
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertQuerysetEqual(Article.objects.all(), [])
def test_explicit_fk(self):
# Create a new Article with get_or_create using an explicit value
# for a ForeignKey.
a2, created = Article.objects.get_or_create(
headline="John's second test",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r.id,
)
self.assertTrue(created)
self.assertEqual(a2.reporter.id, self.r.id)
# You can specify filters containing the explicit FK value.
self.assertQuerysetEqual(
Article.objects.filter(reporter_id__exact=self.r.id),
["<Article: John's second test>", "<Article: This is a test>"]
)
# Create an Article by Paul for the same date.
a3 = Article.objects.create(
headline="Paul's commentary",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r2.id,
)
self.assertEqual(a3.reporter.id, self.r2.id)
# Get should respect explicit foreign keys as well.
with self.assertRaises(MultipleObjectsReturned):
Article.objects.get(reporter_id=self.r.id)
self.assertEqual(
repr(a3),
repr(Article.objects.get(reporter_id=self.r2.id, pub_date=datetime.date(2011, 5, 7)))
)
def test_deepcopy_and_circular_references(self):
# Regression for #12876 -- Model methods that include queries that
# recursive don't cause recursion depth problems under deepcopy.
self.r.cached_query = Article.objects.filter(reporter=self.r)
self.assertEqual(repr(deepcopy(self.r)), "<Reporter: John Smith>")
def test_manager_class_caching(self):
r1 = Reporter.objects.create(first_name='Mike')
r2 = Reporter.objects.create(first_name='John')
# Same twice
self.assertIs(r1.article_set.__class__, r1.article_set.__class__)
# Same as each other
self.assertIs(r1.article_set.__class__, r2.article_set.__class__)
def test_create_relation_with_gettext_lazy(self):
reporter = Reporter.objects.create(first_name='John', last_name='Smith', email='[email protected]')
lazy = gettext_lazy('test')
reporter.article_set.create(headline=lazy, pub_date=datetime.date(2011, 6, 10))
notlazy = str(lazy)
article = reporter.article_set.get()
self.assertEqual(article.headline, notlazy)
def test_values_list_exception(self):
expected_message = "Cannot resolve keyword 'notafield' into field. Choices are: %s"
reporter_fields = ', '.join(sorted(f.name for f in Reporter._meta.get_fields()))
with self.assertRaisesMessage(FieldError, expected_message % reporter_fields):
Article.objects.values_list('reporter__notafield')
article_fields = ', '.join(['EXTRA'] + sorted(f.name for f in Article._meta.get_fields()))
with self.assertRaisesMessage(FieldError, expected_message % article_fields):
Article.objects.extra(select={'EXTRA': 'EXTRA_SELECT'}).values_list('notafield')
def test_fk_assignment_and_related_object_cache(self):
# Tests of ForeignKey assignment and the related-object cache (see #6886).
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
# Look up the object again so that we get a "fresh" object.
c = Child.objects.get(name="Child")
p = c.parent
# Accessing the related object again returns the exactly same object.
self.assertIs(c.parent, p)
# But if we kill the cache, we get a new object.
del c._parent_cache
self.assertIsNot(c.parent, p)
# Assigning a new object results in that object getting cached immediately.
p2 = Parent.objects.create(name="Parent 2")
c.parent = p2
self.assertIs(c.parent, p2)
# Assigning None succeeds if field is null=True.
p.bestchild = None
self.assertIsNone(p.bestchild)
# bestchild should still be None after saving.
p.save()
self.assertIsNone(p.bestchild)
# bestchild should still be None after fetching the object again.
p = Parent.objects.get(name="Parent")
self.assertIsNone(p.bestchild)
# Assigning None will not fail: Child.parent is null=False.
setattr(c, "parent", None)
# You also can't assign an object of the wrong type here
with self.assertRaises(ValueError):
setattr(c, "parent", First(id=1, second=1))
# You can assign None to Child.parent during object creation.
Child(name='xyzzy', parent=None)
# But when trying to save a Child with parent=None, the database will
# raise IntegrityError.
with self.assertRaises(IntegrityError), transaction.atomic():
Child.objects.create(name='xyzzy', parent=None)
# Creation using keyword argument should cache the related object.
p = Parent.objects.get(name="Parent")
c = Child(parent=p)
self.assertIs(c.parent, p)
# Creation using keyword argument and unsaved related instance (#8070).
p = Parent()
msg = "save() prohibited to prevent data loss due to unsaved related object 'parent'."
with self.assertRaisesMessage(ValueError, msg):
Child.objects.create(parent=p)
msg = "save() prohibited to prevent data loss due to unsaved related object 'parent'."
with self.assertRaisesMessage(ValueError, msg):
ToFieldChild.objects.create(parent=p)
# Creation using attname keyword argument and an id will cause the
# related object to be fetched.
p = Parent.objects.get(name="Parent")
c = Child(parent_id=p.id)
self.assertIsNot(c.parent, p)
self.assertEqual(c.parent, p)
def test_fk_to_bigautofield(self):
ch = City.objects.create(name='Chicago')
District.objects.create(city=ch, name='Far South')
District.objects.create(city=ch, name='North')
ny = City.objects.create(name='New York', id=2 ** 33)
District.objects.create(city=ny, name='Brooklyn')
District.objects.create(city=ny, name='Manhattan')
def test_multiple_foreignkeys(self):
# Test of multiple ForeignKeys to the same model (bug #7125).
c1 = Category.objects.create(name='First')
c2 = Category.objects.create(name='Second')
c3 = Category.objects.create(name='Third')
r1 = Record.objects.create(category=c1)
r2 = Record.objects.create(category=c1)
r3 = Record.objects.create(category=c2)
r4 = Record.objects.create(category=c2)
r5 = Record.objects.create(category=c3)
Relation.objects.create(left=r1, right=r2)
Relation.objects.create(left=r3, right=r4)
Relation.objects.create(left=r1, right=r3)
Relation.objects.create(left=r5, right=r2)
Relation.objects.create(left=r3, right=r2)
q1 = Relation.objects.filter(left__category__name__in=['First'], right__category__name__in=['Second'])
self.assertQuerysetEqual(q1, ["<Relation: First - Second>"])
q2 = Category.objects.filter(record__left_set__right__category__name='Second').order_by('name')
self.assertQuerysetEqual(q2, ["<Category: First>", "<Category: Second>"])
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
with self.assertRaises(ValueError):
Child.objects.create(name="Grandchild", parent=c)
def test_fk_instantiation_outside_model(self):
# Regression for #12190 -- Should be able to instantiate a FK outside
# of a model, and interrogate its related field.
cat = models.ForeignKey(Category, models.CASCADE)
self.assertEqual('id', cat.remote_field.get_related_field().name)
def test_relation_unsaved(self):
# The <field>_set manager does not join on Null value fields (#17541)
Third.objects.create(name='Third 1')
Third.objects.create(name='Third 2')
th = Third(name="testing")
# The object isn't saved an thus the relation field is null - we won't even
# execute a query in this case.
with self.assertNumQueries(0):
self.assertEqual(th.child_set.count(), 0)
th.save()
# Now the model is saved, so we will need to execute an query.
with self.assertNumQueries(1):
self.assertEqual(th.child_set.count(), 0)
def test_related_object(self):
public_school = School.objects.create(is_public=True)
public_student = Student.objects.create(school=public_school)
private_school = School.objects.create(is_public=False)
private_student = Student.objects.create(school=private_school)
# Only one school is available via all() due to the custom default manager.
self.assertQuerysetEqual(School.objects.all(), ["<School: School object>"])
self.assertEqual(public_student.school, public_school)
# Make sure the base manager is used so that an student can still access
# its related school even if the default manager doesn't normally
# allow it.
self.assertEqual(private_student.school, private_school)
School._meta.base_manager_name = 'objects'
School._meta._expire_cache()
try:
private_student = Student.objects.get(pk=private_student.pk)
with self.assertRaises(School.DoesNotExist):
private_student.school
finally:
School._meta.base_manager_name = None
School._meta._expire_cache()
def test_hasattr_related_object(self):
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(Article(), 'reporter'))
def test_clear_after_prefetch(self):
c = City.objects.create(name='Musical City')
District.objects.create(name='Ladida', city=c)
city = City.objects.prefetch_related('districts').get(id=c.id)
self.assertQuerysetEqual(city.districts.all(), ['<District: Ladida>'])
city.districts.clear()
self.assertQuerysetEqual(city.districts.all(), [])
def test_remove_after_prefetch(self):
c = City.objects.create(name='Musical City')
d = District.objects.create(name='Ladida', city=c)
city = City.objects.prefetch_related('districts').get(id=c.id)
self.assertQuerysetEqual(city.districts.all(), ['<District: Ladida>'])
city.districts.remove(d)
self.assertQuerysetEqual(city.districts.all(), [])
def test_add_after_prefetch(self):
c = City.objects.create(name='Musical City')
District.objects.create(name='Ladida', city=c)
d2 = District.objects.create(name='Ladidu')
city = City.objects.prefetch_related('districts').get(id=c.id)
self.assertEqual(city.districts.count(), 1)
city.districts.add(d2)
self.assertEqual(city.districts.count(), 2)
def test_set_after_prefetch(self):
c = City.objects.create(name='Musical City')
District.objects.create(name='Ladida', city=c)
d2 = District.objects.create(name='Ladidu')
city = City.objects.prefetch_related('districts').get(id=c.id)
self.assertEqual(city.districts.count(), 1)
city.districts.set([d2])
self.assertQuerysetEqual(city.districts.all(), ['<District: Ladidu>'])
def test_add_then_remove_after_prefetch(self):
c = City.objects.create(name='Musical City')
District.objects.create(name='Ladida', city=c)
d2 = District.objects.create(name='Ladidu')
city = City.objects.prefetch_related('districts').get(id=c.id)
self.assertEqual(city.districts.count(), 1)
city.districts.add(d2)
self.assertEqual(city.districts.count(), 2)
city.districts.remove(d2)
self.assertEqual(city.districts.count(), 1)
| bsd-3-clause |
DrabWeb/iTerm2 | tests/esctest/tests/el.py | 31 | 2319 | from esc import NUL, blank
import escargs
import esccmd
import escio
from esctypes import Point, Rect
from escutil import AssertEQ, AssertScreenCharsInRectEqual, GetCursorPosition, knownBug
class ELTests(object):
def prepare(self):
"""Initializes the screen to abcdefghij on the first line with the cursor
on the 'e'."""
esccmd.CUP(Point(1, 1))
escio.Write("abcdefghij")
esccmd.CUP(Point(5, 1))
def test_EL_Default(self):
"""Should erase to right of cursor."""
self.prepare()
esccmd.EL()
AssertScreenCharsInRectEqual(Rect(1, 1, 10, 1),
[ "abcd" + 6 * NUL ])
def test_EL_0(self):
"""Should erase to right of cursor."""
self.prepare()
esccmd.EL(0)
AssertScreenCharsInRectEqual(Rect(1, 1, 10, 1),
[ "abcd" + 6 * NUL ])
def test_EL_1(self):
"""Should erase to left of cursor."""
self.prepare()
esccmd.EL(1)
AssertScreenCharsInRectEqual(Rect(1, 1, 10, 1),
[ 5 * blank() + "fghij" ])
def test_EL_2(self):
"""Should erase whole line."""
self.prepare()
esccmd.EL(2)
AssertScreenCharsInRectEqual(Rect(1, 1, 10, 1),
[ 10 * NUL ])
def test_EL_IgnoresScrollRegion(self):
"""Should erase whole line."""
self.prepare()
esccmd.DECSET(esccmd.DECLRMM)
esccmd.DECSLRM(2, 4)
esccmd.CUP(Point(5, 1))
esccmd.EL(2)
esccmd.DECRESET(esccmd.DECLRMM)
AssertScreenCharsInRectEqual(Rect(1, 1, 10, 1),
[ 10 * NUL ])
def test_EL_doesNotRespectDECProtection(self):
"""EL respects DECSCA."""
escio.Write("a")
escio.Write("b")
esccmd.DECSCA(1)
escio.Write("c")
esccmd.DECSCA(0)
esccmd.CUP(Point(1, 1))
esccmd.EL(2)
AssertScreenCharsInRectEqual(Rect(1, 1, 3, 1),
[ NUL * 3 ])
@knownBug(terminal="iTerm2",
reason="Protection not implemented.")
def test_EL_respectsISOProtection(self):
"""EL respects SPA/EPA."""
escio.Write("a")
escio.Write("b")
esccmd.SPA()
escio.Write("c")
esccmd.EPA()
esccmd.CUP(Point(1, 1))
esccmd.EL(2)
AssertScreenCharsInRectEqual(Rect(1, 1, 3, 1),
[ blank() * 2 + "c" ])
| gpl-2.0 |
dougwig/x-neutron-lbaas | neutron_lbaas/openstack/common/service.py | 2 | 15276 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generic Node base class for all workers that run on hosts."""
import errno
import logging as std_logging
import os
import random
import signal
import sys
import time
try:
# Importing just the symbol here because the io module does not
# exist in Python 2.6.
from io import UnsupportedOperation # noqa
except ImportError:
# Python 2.6
UnsupportedOperation = None
import eventlet
from eventlet import event
from oslo.config import cfg
from neutron_lbaas.openstack.common import eventlet_backdoor
from neutron_lbaas.openstack.common._i18n import _LE, _LI, _LW
from neutron_lbaas.openstack.common import log as logging
from neutron_lbaas.openstack.common import systemd
from neutron_lbaas.openstack.common import threadgroup
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def _sighup_supported():
return hasattr(signal, 'SIGHUP')
def _is_daemon():
# The process group for a foreground process will match the
# process group of the controlling terminal. If those values do
# not match, or ioctl() fails on the stdout file handle, we assume
# the process is running in the background as a daemon.
# http://www.gnu.org/software/bash/manual/bashref.html#Job-Control-Basics
try:
is_daemon = os.getpgrp() != os.tcgetpgrp(sys.stdout.fileno())
except OSError as err:
if err.errno == errno.ENOTTY:
# Assume we are a daemon because there is no terminal.
is_daemon = True
else:
raise
except UnsupportedOperation:
# Could not get the fileno for stdout, so we must be a daemon.
is_daemon = True
return is_daemon
def _is_sighup_and_daemon(signo):
if not (_sighup_supported() and signo == signal.SIGHUP):
# Avoid checking if we are a daemon, because the signal isn't
# SIGHUP.
return False
return _is_daemon()
def _signo_to_signame(signo):
signals = {signal.SIGTERM: 'SIGTERM',
signal.SIGINT: 'SIGINT'}
if _sighup_supported():
signals[signal.SIGHUP] = 'SIGHUP'
return signals[signo]
def _set_signals_handler(handler):
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGINT, handler)
if _sighup_supported():
signal.signal(signal.SIGHUP, handler)
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
def __init__(self):
"""Initialize the service launcher.
:returns: None
"""
self.services = Services()
self.backdoor_port = eventlet_backdoor.initialize_if_enabled()
def launch_service(self, service):
"""Load and start the given service.
:param service: The service you would like to start.
:returns: None
"""
service.backdoor_port = self.backdoor_port
self.services.add(service)
def stop(self):
"""Stop all services which are currently running.
:returns: None
"""
self.services.stop()
def wait(self):
"""Waits until all services have been stopped, and then returns.
:returns: None
"""
self.services.wait()
def restart(self):
"""Reload config files and restart service.
:returns: None
"""
cfg.CONF.reload_config_files()
self.services.restart()
class SignalExit(SystemExit):
def __init__(self, signo, exccode=1):
super(SignalExit, self).__init__(exccode)
self.signo = signo
class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame):
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
raise SignalExit(signo)
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _wait_for_exit_or_signal(self, ready_callback=None):
status = None
signo = 0
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
if ready_callback:
ready_callback()
super(ServiceLauncher, self).wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
finally:
self.stop()
return status, signo
def wait(self, ready_callback=None):
systemd.notify_once()
while True:
self.handle_signal()
status, signo = self._wait_for_exit_or_signal(ready_callback)
if not _is_sighup_and_daemon(signo):
return status
self.restart()
class ServiceWrapper(object):
def __init__(self, service, workers):
self.service = service
self.workers = workers
self.children = set()
self.forktimes = []
class ProcessLauncher(object):
def __init__(self, wait_interval=0.01):
"""Constructor.
:param wait_interval: The interval to sleep for between checks
of child process exit.
"""
self.children = {}
self.sigcaught = None
self.running = True
self.wait_interval = wait_interval
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
self.handle_signal()
def handle_signal(self):
_set_signals_handler(self._handle_signal)
def _handle_signal(self, signo, frame):
self.sigcaught = signo
self.running = False
# Allow the process to be killed again and die from natural causes
_set_signals_handler(signal.SIG_DFL)
def _pipe_watcher(self):
# This will block until the write end is closed when the parent
# dies unexpectedly
self.readpipe.read()
LOG.info(_LI('Parent process has died unexpectedly, exiting'))
sys.exit(1)
def _child_process_handle_signal(self):
# Setup child signal handlers differently
def _sigterm(*args):
signal.signal(signal.SIGTERM, signal.SIG_DFL)
raise SignalExit(signal.SIGTERM)
def _sighup(*args):
signal.signal(signal.SIGHUP, signal.SIG_DFL)
raise SignalExit(signal.SIGHUP)
signal.signal(signal.SIGTERM, _sigterm)
if _sighup_supported():
signal.signal(signal.SIGHUP, _sighup)
# Block SIGINT and let the parent send us a SIGTERM
signal.signal(signal.SIGINT, signal.SIG_IGN)
def _child_wait_for_exit_or_signal(self, launcher):
status = 0
signo = 0
# NOTE(johannes): All exceptions are caught to ensure this
# doesn't fallback into the loop spawning children. It would
# be bad for a child to spawn more children.
try:
launcher.wait()
except SignalExit as exc:
signame = _signo_to_signame(exc.signo)
LOG.info(_LI('Child caught %s, exiting'), signame)
status = exc.code
signo = exc.signo
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_LE('Unhandled exception'))
status = 2
finally:
launcher.stop()
return status, signo
def _child_process(self, service):
self._child_process_handle_signal()
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
eventlet.hubs.use_hub()
# Close write to ensure only parent has it open
os.close(self.writepipe)
# Create greenthread to watch for parent to close pipe
eventlet.spawn_n(self._pipe_watcher)
# Reseed random number generator
random.seed()
launcher = Launcher()
launcher.launch_service(service)
return launcher
def _start_child(self, wrap):
if len(wrap.forktimes) > wrap.workers:
# Limit ourselves to one process a second (over the period of
# number of workers * 1 second). This will allow workers to
# start up quickly but ensure we don't fork off children that
# die instantly too quickly.
if time.time() - wrap.forktimes[0] < wrap.workers:
LOG.info(_LI('Forking too fast, sleeping'))
time.sleep(1)
wrap.forktimes.pop(0)
wrap.forktimes.append(time.time())
pid = os.fork()
if pid == 0:
launcher = self._child_process(wrap.service)
while True:
self._child_process_handle_signal()
status, signo = self._child_wait_for_exit_or_signal(launcher)
if not _is_sighup_and_daemon(signo):
break
launcher.restart()
os._exit(status)
LOG.info(_LI('Started child %d'), pid)
wrap.children.add(pid)
self.children[pid] = wrap
return pid
def launch_service(self, service, workers=1):
wrap = ServiceWrapper(service, workers)
LOG.info(_LI('Starting %d workers'), wrap.workers)
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def _wait_child(self):
try:
# Don't block if no child processes have exited
pid, status = os.waitpid(0, os.WNOHANG)
if not pid:
return None
except OSError as exc:
if exc.errno not in (errno.EINTR, errno.ECHILD):
raise
return None
if os.WIFSIGNALED(status):
sig = os.WTERMSIG(status)
LOG.info(_LI('Child %(pid)d killed by signal %(sig)d'),
dict(pid=pid, sig=sig))
else:
code = os.WEXITSTATUS(status)
LOG.info(_LI('Child %(pid)s exited with status %(code)d'),
dict(pid=pid, code=code))
if pid not in self.children:
LOG.warning(_LW('pid %d not in child list'), pid)
return None
wrap = self.children.pop(pid)
wrap.children.remove(pid)
return wrap
def _respawn_children(self):
while self.running:
wrap = self._wait_child()
if not wrap:
# Yield to other threads if no children have exited
# Sleep for a short time to avoid excessive CPU usage
# (see bug #1095346)
eventlet.greenthread.sleep(self.wait_interval)
continue
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
def wait(self):
"""Loop waiting on children to die and respawning as necessary."""
systemd.notify_once()
LOG.debug('Full set of CONF:')
CONF.log_opt_values(LOG, std_logging.DEBUG)
try:
while True:
self.handle_signal()
self._respawn_children()
# No signal means that stop was called. Don't clean up here.
if not self.sigcaught:
return
signame = _signo_to_signame(self.sigcaught)
LOG.info(_LI('Caught %s, stopping children'), signame)
if not _is_sighup_and_daemon(self.sigcaught):
break
for pid in self.children:
os.kill(pid, signal.SIGHUP)
self.running = True
self.sigcaught = None
except eventlet.greenlet.GreenletExit:
LOG.info(_LI("Wait called after thread killed. Cleaning up."))
self.stop()
def stop(self):
"""Terminate child processes and wait on each."""
self.running = False
for pid in self.children:
try:
os.kill(pid, signal.SIGTERM)
except OSError as exc:
if exc.errno != errno.ESRCH:
raise
# Wait for children to die
if self.children:
LOG.info(_LI('Waiting on %d children to exit'), len(self.children))
while self.children:
self._wait_child()
class Service(object):
"""Service object for binaries running on hosts."""
def __init__(self, threads=1000):
self.tg = threadgroup.ThreadGroup(threads)
# signal that the service is done shutting itself down:
self._done = event.Event()
def reset(self):
# NOTE(Fengqian): docs for Event.reset() recommend against using it
self._done = event.Event()
def start(self):
pass
def stop(self):
self.tg.stop()
self.tg.wait()
# Signal that service cleanup is done:
if not self._done.ready():
self._done.send()
def wait(self):
self._done.wait()
class Services(object):
def __init__(self):
self.services = []
self.tg = threadgroup.ThreadGroup()
self.done = event.Event()
def add(self, service):
self.services.append(service)
self.tg.add_thread(self.run_service, service, self.done)
def stop(self):
# wait for graceful shutdown of services:
for service in self.services:
service.stop()
service.wait()
# Each service has performed cleanup, now signal that the run_service
# wrapper threads can now die:
if not self.done.ready():
self.done.send()
# reap threads:
self.tg.stop()
def wait(self):
self.tg.wait()
def restart(self):
self.stop()
self.done = event.Event()
for restart_service in self.services:
restart_service.reset()
self.tg.add_thread(self.run_service, restart_service, self.done)
@staticmethod
def run_service(service, done):
"""Service start wrapper.
:param service: service to run
:param done: event to wait on until a shutdown is triggered
:returns: None
"""
service.start()
done.wait()
def launch(service, workers=1):
if workers is None or workers == 1:
launcher = ServiceLauncher()
launcher.launch_service(service)
else:
launcher = ProcessLauncher()
launcher.launch_service(service, workers=workers)
return launcher
| apache-2.0 |
ioram7/keystone-federado-pgid2013 | build/lib.linux-x86_64-2.7/keystone/contrib/ec2/backends/kvs.py | 9 | 1820 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.common import kvs
class Ec2(kvs.Base):
# Public interface
def get_credential(self, credential_id):
credential_ref = self.db.get('credential-%s' % credential_id)
return credential_ref
def list_credentials(self, user_id):
credential_ids = self.db.get('credential_list', [])
rv = [self.get_credential(x) for x in credential_ids]
return [x for x in rv if x['user_id'] == user_id]
# CRUD
def create_credential(self, credential_id, credential):
self.db.set('credential-%s' % credential_id, credential)
credential_list = set(self.db.get('credential_list', []))
credential_list.add(credential_id)
self.db.set('credential_list', list(credential_list))
return credential
def delete_credential(self, credential_id):
# This will ensure credential-%s is here before deleting
self.db.get('credential-%s' % credential_id)
self.db.delete('credential-%s' % credential_id)
credential_list = set(self.db.get('credential_list', []))
credential_list.remove(credential_id)
self.db.set('credential_list', list(credential_list))
return None
| apache-2.0 |
TMU-VHDL-team2/sqrt | wiki_data/a_dec.py | 1 | 1269 | #!/usr/bin/env python3
def func1():
if j < 0:
if (32768 >> (-j-1)) < x1:
return y2
else:
return x1 << -j
else:
return x1 >> j
def func2():
if j < 0:
return y >> -j
else:
return y << j
x1 = int(input())
x0 = 0
a = 0
y = 0
n = 0
c = 0
print(hex(x1))
t = x1
while t > 0:
t >>= 1
n += 1
n += 16
n += n & 1
for i in range(n, -1, -2):
j = i - 16
a <<= 1
y <<= 1
if y > 65535:
y %= 65536 # 下16ビットをとる
y2 = (1 | y)
c = True
f1 = func1()
if func1() < y2:
if x0 >> i < y2:
c = False
if c:
a += 1
y += 1
x1 -= func2()
x0 -= (y << i) % 65536 # 下16ビットをとる
if x0 < 0:
x1 -= 1
x0 += 65536 # 下16ビットをとる
y += 1
print('i, c, a, y, x1, x0, func1, x0>>i, y2 :',
"{0:2d}".format(i),
"{0:2d}".format(c),
"{0:6d}".format(a),
"{0:8s}".format(hex(y)),
"{0:8s}".format(hex(x1)),
"{0:8s}".format(hex(x0)),
"{0:6d}".format(f1),
"{0:6d}".format(x0>>i),
"{0:6d}".format(y2)
)
print(hex(a), ' = ', a / 256.)
| mit |
openid/python-openid | examples/djopenid/consumer/views.py | 1 | 8229 | from __future__ import unicode_literals
import six
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from openid.consumer import consumer
from openid.consumer.discover import DiscoveryFailure
from openid.extensions import ax, pape, sreg
from openid.server.trustroot import RP_RETURN_TO_URL_TYPE
from openid.yadis.constants import YADIS_HEADER_NAME
from .. import util
PAPE_POLICIES = [
'AUTH_PHISHING_RESISTANT',
'AUTH_MULTI_FACTOR',
'AUTH_MULTI_FACTOR_PHYSICAL',
]
# List of (name, uri) for use in generating the request form.
POLICY_PAIRS = [(p, getattr(pape, p))
for p in PAPE_POLICIES]
def getOpenIDStore():
"""
Return an OpenID store object fit for the currently-chosen
database backend, if any.
"""
return util.getOpenIDStore('/tmp/djopenid_c_store', 'c_')
def getConsumer(request):
"""
Get a Consumer object to perform OpenID authentication.
"""
return consumer.Consumer(request.session, getOpenIDStore())
def renderIndexPage(request, **template_args):
template_args['consumer_url'] = request.build_absolute_uri(reverse('consumer:index'))
template_args['pape_policies'] = POLICY_PAIRS
response = render(request, 'consumer/index.html', template_args)
response[YADIS_HEADER_NAME] = request.build_absolute_uri(reverse('consumer:xrds'))
return response
def startOpenID(request):
"""
Start the OpenID authentication process. Renders an
authentication form and accepts its POST.
* Renders an error message if OpenID cannot be initiated
* Requests some Simple Registration data using the OpenID
library's Simple Registration machinery
* Generates the appropriate trust root and return URL values for
this application (tweak where appropriate)
* Generates the appropriate redirect based on the OpenID protocol
version.
"""
if request.POST:
# Start OpenID authentication.
openid_url = request.POST['openid_identifier']
c = getConsumer(request)
error = None
try:
auth_request = c.begin(openid_url)
except DiscoveryFailure as e:
# Some other protocol-level failure occurred.
error = "OpenID discovery error: %s" % (six.text_type(e),)
if error:
# Render the page with an error.
return renderIndexPage(request, error=error)
# Add Simple Registration request information. Some fields
# are optional, some are required. It's possible that the
# server doesn't support sreg or won't return any of the
# fields.
sreg_request = sreg.SRegRequest(optional=['email', 'nickname'],
required=['dob'])
auth_request.addExtension(sreg_request)
# Add Attribute Exchange request information.
ax_request = ax.FetchRequest()
# XXX - uses myOpenID-compatible schema values, which are
# not those listed at axschema.org.
ax_request.add(
ax.AttrInfo('http://schema.openid.net/namePerson',
required=True))
ax_request.add(
ax.AttrInfo('http://schema.openid.net/contact/web/default',
required=False, count=ax.UNLIMITED_VALUES))
auth_request.addExtension(ax_request)
# Add PAPE request information. We'll ask for
# phishing-resistant auth and display any policies we get in
# the response.
requested_policies = []
policy_prefix = 'policy_'
for k, v in six.iteritems(request.POST):
if k.startswith(policy_prefix):
policy_attr = k[len(policy_prefix):]
if policy_attr in PAPE_POLICIES:
requested_policies.append(getattr(pape, policy_attr))
if requested_policies:
pape_request = pape.Request(requested_policies)
auth_request.addExtension(pape_request)
# Compute the trust root and return URL values to build the
# redirect information.
trust_root = request.build_absolute_uri(reverse('consumer:index'))
return_to = request.build_absolute_uri(reverse('consumer:return_to'))
# Send the browser to the server either by sending a redirect
# URL or by generating a POST form.
if auth_request.shouldSendRedirect():
url = auth_request.redirectURL(trust_root, return_to)
return HttpResponseRedirect(url)
else:
# Beware: this renders a template whose content is a form
# and some javascript to submit it upon page load. Non-JS
# users will have to click the form submit button to
# initiate OpenID authentication.
form_id = 'openid_message'
form_html = auth_request.formMarkup(trust_root, return_to,
False, {'id': form_id})
return render(request, 'consumer/request_form.html', {'html': form_html})
return renderIndexPage(request)
def finishOpenID(request):
"""
Finish the OpenID authentication process. Invoke the OpenID
library with the response from the OpenID server and render a page
detailing the result.
"""
result = {}
# Because the object containing the query parameters is a
# MultiValueDict and the OpenID library doesn't allow that, we'll
# convert it to a normal dict.
# OpenID 2 can send arguments as either POST body or GET query
# parameters.
request_args = util.normalDict(request.GET)
if request.method == 'POST':
request_args.update(util.normalDict(request.POST))
if request_args:
c = getConsumer(request)
# Get a response object indicating the result of the OpenID
# protocol.
return_to = request.build_absolute_uri(reverse('consumer:return_to'))
response = c.complete(request_args, return_to)
# Get a Simple Registration response object if response
# information was included in the OpenID response.
sreg_response = {}
ax_items = {}
if response.status == consumer.SUCCESS:
sreg_response = sreg.SRegResponse.fromSuccessResponse(response)
ax_response = ax.FetchResponse.fromSuccessResponse(response)
if ax_response:
ax_items = {
'fullname': ax_response.get(
'http://schema.openid.net/namePerson'),
'web': ax_response.get(
'http://schema.openid.net/contact/web/default'),
}
# Get a PAPE response object if response information was
# included in the OpenID response.
pape_response = None
if response.status == consumer.SUCCESS:
pape_response = pape.Response.fromSuccessResponse(response)
if not pape_response.auth_policies:
pape_response = None
# Map different consumer status codes to template contexts.
results = {
consumer.CANCEL:
{'message': 'OpenID authentication cancelled.'},
consumer.FAILURE:
{'error': 'OpenID authentication failed.'},
consumer.SUCCESS:
{'url': response.getDisplayIdentifier(),
'sreg': sreg_response and sreg_response.items(),
'ax': ax_items.items(),
'pape': pape_response}
}
result = results[response.status]
if isinstance(response, consumer.FailureResponse):
# In a real application, this information should be
# written to a log for debugging/tracking OpenID
# authentication failures. In general, the messages are
# not user-friendly, but intended for developers.
result['failure_reason'] = response.message
return renderIndexPage(request, **result)
def rpXRDS(request):
"""
Return a relying party verification XRDS document
"""
return_to = request.build_absolute_uri(reverse('consumer:return_to'))
return util.renderXRDS(request, [RP_RETURN_TO_URL_TYPE], [return_to])
| apache-2.0 |
jgomezdans/KaFKA | kafka/inference/solvers.py | 1 | 5323 | #!/usr/bin/env python
"""Some solvers"""
# KaFKA A fast Kalman filter implementation for raster based datasets.
# Copyright (c) 2017 J Gomez-Dans. All rights reserved.
#
# This file is part of KaFKA.
#
# KaFKA is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# KaFKA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with KaFKA. If not, see <http://www.gnu.org/licenses/>.
from collections import namedtuple
import numpy as np
import scipy.sparse as sp
import matplotlib.pyplot as plt
#from utils import matrix_squeeze, spsolve2, reconstruct_array
# Set up logging
import logging
LOG = logging.getLogger(__name__+".solvers")
__author__ = "J Gomez-Dans"
__copyright__ = "Copyright 2017 J Gomez-Dans"
__version__ = "1.0 (09.03.2017)"
__license__ = "GPLv3"
__email__ = "[email protected]"
def variational_kalman( observations, mask, state_mask, uncertainty, H_matrix, n_params,
x_forecast, P_forecast, P_forecast_inv, the_metadata, approx_diagonal=True):
"""We can just use """
if len(H_matrix) == 2:
non_linear = True
H0, H_matrix_ = H_matrix
else:
H0 = 0.
non_linear = False
R_mat = sp.diags(uncertainty.diagonal()[state_mask.flatten()])
LOG.info("Creating linear problem")
y = observations[state_mask]
y = np.where(mask[state_mask], y, 0.)
y_orig = y*1.
if non_linear:
y = y + H_matrix_.dot(x_forecast) - H0
#Aa = matrix_squeeze (P_forecast_inv, mask=maska.ravel())
A = H_matrix_.T.dot(R_mat).dot(H_matrix_) + P_forecast_inv
b = H_matrix_.T.dot(R_mat).dot(y) + P_forecast_inv.dot (x_forecast)
b = b.astype(np.float32)
A = A.astype(np.float32)
# Here we can either do a spLU of A, and solve, or we can have a first go
# by assuming P_forecast_inv is diagonal, and use the inverse of A_approx as
# a preconditioner
LOG.info("Solving")
AI = sp.linalg.splu (A)
x_analysis = AI.solve (b)
# So retval is the solution vector and A is the Hessian
# (->inv(A) is posterior cov)
fwd_modelled = H_matrix_.dot(x_analysis-x_forecast) + H0
innovations = y_orig - fwd_modelled
#x_analysis = reconstruct_array ( x_analysis_prime, x_forecast,
# mask.ravel(), n_params=n_params)
return x_analysis, None, A, innovations, fwd_modelled
def sort_band_data(H_matrix, observations, uncertainty, mask,
x0, x_forecast, state_mask):
if len(H_matrix) == 2:
non_linear = True
H0, H_matrix_ = H_matrix
else:
H0 = 0.
H_matrix_ = H_matrix
non_linear = False
R = uncertainty.diagonal()[state_mask.flatten()]
y = observations[state_mask]
y = np.where(mask[state_mask], y, 0.)
y_orig = y*1.
if non_linear:
y = y + H_matrix_.dot(x0) - H0
return H_matrix_, H0, R, y, y_orig
def variational_kalman_multiband( observations_b, mask_b, state_mask, uncertainty_b, H_matrix_b, n_params,
x0, x_forecast, P_forecast, P_forecast_inv, the_metadata_b, approx_diagonal=True):
"""We can just use """
n_bands = len(observations_b)
y = []
y_orig = []
H_matrix = []
H0 = []
R_mat = []
for i in range(n_bands):
a, b, c, d, e = sort_band_data(H_matrix_b[i], observations_b[i],
uncertainty_b[i], mask_b[i], x0, x_forecast, state_mask)
H_matrix.append(a)
H0.append(b)
R_mat.append(c)
y.append(d)
y_orig.append(e)
H_matrix_ = sp.vstack(H_matrix)
H0 = np.hstack(H0)
R_mat = sp.diags(np.hstack(R_mat))
y = np.hstack(y)
y_orig = np.hstack(y_orig)
#Aa = matrix_squeeze (P_forecast_inv, mask=maska.ravel())
A = H_matrix_.T.dot(R_mat).dot(H_matrix_) + P_forecast_inv
b = H_matrix_.T.dot(R_mat).dot(y) + P_forecast_inv.dot (x_forecast)
b = b.astype(np.float32)
A = A.astype(np.float32)
# Here we can either do a spLU of A, and solve, or we can have a first go
# by assuming P_forecast_inv is diagonal, and use the inverse of A_approx as
# a preconditioner
LOG.info("Solving")
AI = sp.linalg.splu (A)
x_analysis = AI.solve (b)
# So retval is the solution vector and A is the Hessian
# (->inv(A) is posterior cov)
fwd_modelled = H_matrix_.dot(x_analysis-x_forecast) + H0
innovations = y_orig - fwd_modelled
""" For now I am going to return innovations as y_orig - H0 as
That is what is needed by the Hessian correction. Need to discuss with Jose
What the intention for innovations is and then we can find the best solution"""
innovations = y_orig - H0
#x_analysis = reconstruct_array ( x_analysis_prime, x_forecast,
# mask.ravel(), n_params=n_params)
return x_analysis, None, A, innovations, fwd_modelled
| gpl-3.0 |
dllsf/odootest | addons/project_issue/__init__.py | 433 | 1131 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project_issue
import report
import res_config
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
hepochen/hoedown_misaka | tests/run_tests.py | 4 | 2663 | # -*- coding: utf-8 -*-
import importlib
import inspect
import os
import sys
from itertools import chain
from os.path import dirname, join as jp, splitext
CWD = dirname(sys.modules[__name__].__file__)
sys.path.insert(0, jp(CWD, '..'))
from chibitest import runner, TestCase, Benchmark
help_message = """\
Options:
--include (-i) comma separated list of testcases
--exclude (-e) comma separated list of testcases
--benchmark (-b) run bechmarks
--list (-l) list all testcases
"""
def get_test_modules():
modules = []
for n in os.listdir(CWD):
if n.startswith('test_') and n.endswith('.py'):
n, _ = splitext(n)
modules.append(importlib.import_module(n))
return modules
def is_testcase(n):
return inspect.isclass(n) \
and issubclass(n, TestCase) \
and not n is TestCase \
and not n is Benchmark
def is_benchmark(n):
return inspect.isclass(n) \
and issubclass(n, Benchmark) \
and not n is Benchmark
def get_testcases(module):
return [(testcase.__name__, testcase) \
for _, testcase in inspect.getmembers(module, is_testcase)]
def run_testcases(testcases, benchmark=False, include=[], exclude=[]):
if include:
testcases = [n for n in testcases if n[0] in include]
if exclude:
testcases = [n for n in testcases if not n[0] in exclude]
if benchmark:
testcases = [n[1] for n in testcases if is_benchmark(n[1])]
else:
testcases = [n[1] for n in testcases if not is_benchmark(n[1])]
runner(testcases)
if __name__ == '__main__':
testcases = list(chain(*map(get_testcases, get_test_modules())))
include = []
exclude = []
benchmark = False
if len(sys.argv) >= 2:
if sys.argv[1] in ('-l', '--list'):
for name, testcase in testcases:
print(name)
sys.exit(0)
elif sys.argv[1] in ('-h', '--help'):
print(help_message)
sys.exit(0)
else:
last_arg = '--include'
for arg in sys.argv[1:]:
if arg in ('-i', '--include', '-e', '--exclude'):
last_arg = arg
elif not arg.startswith('-'): # - or --
arg = [n for n in arg.split(',') if n]
if last_arg in ('-i', '--include'):
include.extend(arg)
elif last_arg in ('-e', '--exclude'):
exclude.extend(arg)
if '-b' in sys.argv[1:] or '--benchmark' in sys.argv[1:]:
benchmark = True
run_testcases(testcases, benchmark, include, exclude)
| mit |
praekelt/malaria24-django | malaria24/ona/tests/test_admin.py | 1 | 6051 | from django.contrib.auth.models import User
from django.core import urlresolvers
from django.db.models.signals import post_save
from django.test import override_settings
from mock import patch
from malaria24.ona.models import (
ReportedCase,
new_case_alert_ehps,
new_case_alert_mis, new_case_alert_jembi)
from .base import MalariaTestCase
class ReportedCaseAdminTest(MalariaTestCase):
def setUp(self):
super(ReportedCaseAdminTest, self).setUp()
post_save.disconnect(
new_case_alert_ehps, sender=ReportedCase)
post_save.disconnect(
new_case_alert_mis, sender=ReportedCase)
post_save.disconnect(
new_case_alert_jembi, sender=ReportedCase)
User.objects.create_superuser(
username='test',
password='test',
email='[email protected]'
)
self.client.login(username='test', password='test')
def tearDown(self):
super(ReportedCaseAdminTest, self).tearDown()
post_save.connect(
new_case_alert_ehps, sender=ReportedCase)
post_save.connect(
new_case_alert_mis, sender=ReportedCase)
post_save.connect(
new_case_alert_jembi, sender=ReportedCase)
@override_settings(FORWARD_TO_JEMBI=False)
@patch('malaria24.ona.tasks.compile_and_send_jembi.delay')
def test_setting_disables_send_to_jembi(self, mock_task):
case = self.mk_case(first_name="John", last_name="Day", gender="male",
msisdn="0711111111", landmark_description="None",
id_type="said", case_number="20171214-123456-42",
abroad="No", locality="None",
reported_by="+27721111111",
sa_id_number="5608071111083",
landmark="School", facility_code="123456")
case.save()
case.digest = None
data = {
'action': 'send_jembi_alert',
'_selected_action': [case.pk]
}
list_url = urlresolvers.reverse('admin:ona_reportedcase_changelist')
response = self.client.post(list_url, data, follow=True)
mock_task.not_called()
self.assertContains(response, "Sending to Jembi currently disabled.")
@patch('malaria24.ona.tasks.compile_and_send_jembi.delay')
def test_only_unsent_cases_sent_to_jembi(self, mock_task):
case1 = self.mk_case(first_name="John", last_name="Day", gender="male",
msisdn="0711111111", landmark_description="None",
id_type="said", case_number="20171214-123456-42",
abroad="No", locality="None",
reported_by="+27721111111",
sa_id_number="5608071111083",
landmark="School", facility_code="123456",
jembi_alert_sent=True)
case2 = self.mk_case(first_name="Mark", last_name="Day", gender="male",
msisdn="0711111112", landmark_description="None",
id_type="said", case_number="20171214-123456-56",
abroad="No", locality="None",
reported_by="+27721111112",
sa_id_number="5610031111083",
landmark="School", facility_code="123456")
case1.save()
case2.save()
data = {
'action': 'send_jembi_alert',
'_selected_action': [case1.pk, case2.pk]
}
list_url = urlresolvers.reverse('admin:ona_reportedcase_changelist')
response = self.client.post(list_url, data, follow=True)
mock_task.assert_called_with(case2.pk)
self.assertContains(response,
"Forwarding all unsent cases to Jembi (total 1).")
@patch('malaria24.ona.tasks.compile_and_send_jembi.delay')
def test_task_called_for_each_selected_unsent_case(self, mock_task):
case1 = self.mk_case(first_name="John", last_name="Day", gender="male",
msisdn="0711111111", landmark_description="None",
id_type="said", case_number="20171214-123456-42",
abroad="No", locality="None",
reported_by="+27721111111",
sa_id_number="5608071111083",
landmark="School", facility_code="123456")
case2 = self.mk_case(first_name="Mark", last_name="Day", gender="male",
msisdn="0711111112", landmark_description="None",
id_type="said", case_number="20171214-123456-56",
abroad="No", locality="None",
reported_by="+27721111112",
sa_id_number="5610031111083",
landmark="School", facility_code="123456")
case3 = self.mk_case(first_name="Luke", last_name="Day", gender="male",
msisdn="0711111113", landmark_description="None",
id_type="said", case_number="20171214-123456-64",
abroad="No", locality="None",
reported_by="+27721111113",
sa_id_number="8112051111083",
landmark="School", facility_code="123456")
case1.save()
case2.save()
case3.save()
data = {
'action': 'send_jembi_alert',
'_selected_action': [case1.pk, case2.pk]
}
list_url = urlresolvers.reverse('admin:ona_reportedcase_changelist')
response = self.client.post(list_url, data, follow=True)
mock_task.assert_any_call(case1.pk)
mock_task.assert_any_call(case2.pk)
self.assertContains(response,
"Forwarding all unsent cases to Jembi (total 2).")
| bsd-2-clause |
yongshengwang/hue | desktop/core/ext-py/django-extensions-1.5.0/django_extensions/management/commands/find_template.py | 35 | 1196 | from django.core.management.base import LabelCommand
from django.template import loader
from django.template import TemplateDoesNotExist
import sys
from django_extensions.management.utils import signalcommand
def get_template_path(path):
try:
template = loader.find_template(path)
if template[1]:
return template[1].name
# work arround https://code.djangoproject.com/ticket/17199 issue
for template_loader in loader.template_source_loaders:
try:
source, origin = template_loader.load_template_source(path)
return origin
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist(path)
except TemplateDoesNotExist:
return None
class Command(LabelCommand):
help = "Finds the location of the given template by resolving its path"
args = "[template_path]"
label = 'template path'
@signalcommand
def handle_label(self, template_path, **options):
path = get_template_path(template_path)
if path is None:
sys.stderr.write("No template found\n")
sys.exit(1)
else:
print(path)
| apache-2.0 |
BigBrother-International/gst-cerbero | cerbero/packages/osx/buildtools.py | 3 | 3083 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <[email protected]>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
import tempfile
from cerbero.packages.osx.info_plist import ComponentPropertyPlist
from cerbero.utils import shell
class PackageBuild(object):
''' Wrapper for the packagebuild application '''
CMD = 'pkgbuild'
def create_package(self, root, pkg_id, version, title, output_file,
destination='/opt/', scripts_path=None):
'''
Creates an osx flat package, where all files are properly bundled in a
directory that is set as the package root
@param root: root path
@type root: str
@param pkg_id: package indentifier
@type pkg_id: str
@param version: package version
@type version: str
@param title: package title
@type title: str
@param output_file: path of the output file
@type output_file: str
@param destination: installation path
@type destination: str
@param scripts_path: relative path for package scripts
@type scripts_path: str
'''
args = {'root': root, 'identifier': pkg_id, 'version': version,
'install-location': destination}
if scripts_path is not None:
args['scripts'] = scripts_path
#plist = tempfile.NamedTemporaryFile()
#cpl = ComponentPropertyPlist(title, os.path.basename(output_file))
#cpl.save(plist.name)
#args['component-plist'] = plist.name
shell.call(self._cmd_with_args(args, output_file))
def _cmd_with_args(self, args, output):
args_str = ''
for k, v in args.iteritems():
args_str += " --%s '%s'" % (k, v)
return '%s %s %s' % (self.CMD, args_str, output)
class ProductBuild (object):
''' Wrapper for the packagebuild application '''
CMD = 'productbuild'
def create_app_package(self, app_bundle, output):
shell.call("%s --component %s /Applications %s"
% (self.CMD, app_bundle, output))
def create_package(self, distribution, output, package_path=None):
cmd = "%s --distribution %s %s" % (self.CMD, distribution, output)
for p in package_path:
cmd += ' --package-path %s' % p
shell.call(cmd)
| lgpl-2.1 |
hramrach/osc | tests/test_addfiles.py | 15 | 3192 | import osc.core
import osc.oscerr
import os
import sys
from common import OscTestCase
FIXTURES_DIR = os.path.join(os.getcwd(), 'addfile_fixtures')
def suite():
import unittest
return unittest.makeSuite(TestAddFiles)
class TestAddFiles(OscTestCase):
def _get_fixtures_dir(self):
return FIXTURES_DIR
def testSimpleAdd(self):
"""add one file ('toadd1') to the wc"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
p.addfile('toadd1')
exp = 'A toadd1\n'
self.assertEqual(sys.stdout.getvalue(), exp)
self.assertFalse(os.path.exists(os.path.join('.osc', 'toadd1')))
self._check_status(p, 'toadd1', 'A')
self._check_addlist('toadd1\n')
def testSimpleMultipleAdd(self):
"""add multiple files ('toadd1', 'toadd2') to the wc"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
p.addfile('toadd1')
p.addfile('toadd2')
exp = 'A toadd1\nA toadd2\n'
self.assertEqual(sys.stdout.getvalue(), exp)
self.assertFalse(os.path.exists(os.path.join('.osc', 'toadd1')))
self.assertFalse(os.path.exists(os.path.join('.osc', 'toadd2')))
self._check_status(p, 'toadd1', 'A')
self._check_status(p, 'toadd2', 'A')
self._check_addlist('toadd1\ntoadd2\n')
def testAddVersionedFile(self):
"""add a versioned file"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
self.assertRaises(osc.oscerr.PackageFileConflict, p.addfile, 'merge')
self.assertFalse(os.path.exists(os.path.join('.osc', '_to_be_added')))
self._check_status(p, 'merge', ' ')
def testAddUnversionedFileTwice(self):
"""add the same file twice"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
p.addfile('toadd1')
self.assertRaises(osc.oscerr.PackageFileConflict, p.addfile, 'toadd1')
exp = 'A toadd1\n'
self.assertEqual(sys.stdout.getvalue(), exp)
self.assertFalse(os.path.exists(os.path.join('.osc', 'toadd1')))
self._check_status(p, 'toadd1', 'A')
self._check_addlist('toadd1\n')
def testReplace(self):
"""replace a deleted file ('foo')"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
open('foo', 'w').write('replaced file\n')
p.addfile('foo')
exp = 'A foo\n'
self.assertEqual(sys.stdout.getvalue(), exp)
self.assertTrue(os.path.exists(os.path.join('.osc', 'foo')))
self.assertNotEqual(open(os.path.join('.osc', 'foo'), 'r').read(), 'replaced file\n')
self.assertFalse(os.path.exists(os.path.join('.osc', '_to_be_deleted')))
self._check_status(p, 'foo', 'R')
self._check_addlist('foo\n')
def testAddNonExistentFile(self):
"""add a non existent file"""
self._change_to_pkg('simple')
p = osc.core.Package('.')
self.assertRaises(osc.oscerr.OscIOError, p.addfile, 'doesnotexist')
self.assertFalse(os.path.exists(os.path.join('.osc', '_to_be_added')))
if __name__ == '__main__':
import unittest
unittest.main()
| gpl-2.0 |
whitehorse-io/encarnia | pyenv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/escprober.py | 2936 | 3187 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .escsm import (HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel,
ISO2022KRSMModel)
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .compat import wrap_ord
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM:
continue
codingSM.active = True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
# PY3K: aBuf is a byte array, so c is an int, not a byte
for codingSM in self._mCodingSM:
if not codingSM:
continue
if not codingSM.active:
continue
codingState = codingSM.next_state(wrap_ord(c))
if codingState == constants.eError:
codingSM.active = False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine() # nopep8
return self.get_state()
return self.get_state()
| mit |
ubc/edx-platform | common/djangoapps/course_modes/tests/test_views.py | 64 | 15334 | import unittest
import decimal
import ddt
from mock import patch
from django.conf import settings
from django.core.urlresolvers import reverse
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from util.testing import UrlResetMixin
from embargo.test_utils import restrict_course
from xmodule.modulestore.tests.factories import CourseFactory
from course_modes.tests.factories import CourseModeFactory
from student.tests.factories import CourseEnrollmentFactory, UserFactory
from student.models import CourseEnrollment
from course_modes.models import CourseMode, Mode
@ddt.ddt
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class CourseModeViewTest(UrlResetMixin, ModuleStoreTestCase):
@patch.dict(settings.FEATURES, {'MODE_CREATION_FOR_TESTING': True})
def setUp(self):
super(CourseModeViewTest, self).setUp('course_modes.urls')
self.course = CourseFactory.create()
self.user = UserFactory.create(username="Bob", email="[email protected]", password="edx")
self.client.login(username=self.user.username, password="edx")
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@ddt.data(
# is_active?, enrollment_mode, redirect?
(True, 'verified', True),
(True, 'honor', False),
(True, 'audit', False),
(False, 'verified', False),
(False, 'honor', False),
(False, 'audit', False),
(False, None, False),
)
@ddt.unpack
def test_redirect_to_dashboard(self, is_active, enrollment_mode, redirect):
# Create the course modes
for mode in ('audit', 'honor', 'verified'):
CourseModeFactory(mode_slug=mode, course_id=self.course.id)
# Enroll the user in the test course
if enrollment_mode is not None:
CourseEnrollmentFactory(
is_active=is_active,
mode=enrollment_mode,
course_id=self.course.id,
user=self.user
)
# Configure whether we're upgrading or not
url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.get(url)
# Check whether we were correctly redirected
if redirect:
self.assertRedirects(response, reverse('dashboard'))
else:
self.assertEquals(response.status_code, 200)
def test_no_id_redirect(self):
# Create the course modes
CourseModeFactory(mode_slug=CourseMode.NO_ID_PROFESSIONAL_MODE, course_id=self.course.id, min_price=100)
# Enroll the user in the test course
CourseEnrollmentFactory(
is_active=False,
mode=CourseMode.NO_ID_PROFESSIONAL_MODE,
course_id=self.course.id,
user=self.user
)
# Configure whether we're upgrading or not
url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.get(url)
# Check whether we were correctly redirected
start_flow_url = reverse('verify_student_start_flow', args=[unicode(self.course.id)])
self.assertRedirects(response, start_flow_url)
def test_no_enrollment(self):
# Create the course modes
for mode in ('audit', 'honor', 'verified'):
CourseModeFactory(mode_slug=mode, course_id=self.course.id)
# User visits the track selection page directly without ever enrolling
url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
@ddt.data(
'',
'1,,2',
'1, ,2',
'1, 2, 3'
)
def test_suggested_prices(self, price_list):
# Create the course modes
for mode in ('audit', 'honor'):
CourseModeFactory(mode_slug=mode, course_id=self.course.id)
CourseModeFactory(
mode_slug='verified',
course_id=self.course.id,
suggested_prices=price_list
)
# Enroll the user in the test course to emulate
# automatic enrollment
CourseEnrollmentFactory(
is_active=True,
course_id=self.course.id,
user=self.user
)
# Verify that the prices render correctly
response = self.client.get(
reverse('course_modes_choose', args=[unicode(self.course.id)]),
follow=False,
)
self.assertEquals(response.status_code, 200)
# TODO: Fix it so that response.templates works w/ mako templates, and then assert
# that the right template rendered
@ddt.data(
(['honor', 'verified', 'credit'], True),
(['honor', 'verified'], False),
)
@ddt.unpack
def test_credit_upsell_message(self, available_modes, show_upsell):
# Create the course modes
for mode in available_modes:
CourseModeFactory(mode_slug=mode, course_id=self.course.id)
# Check whether credit upsell is shown on the page
# This should *only* be shown when a credit mode is available
url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.get(url)
if show_upsell:
self.assertContains(response, "Credit")
else:
self.assertNotContains(response, "Credit")
@ddt.data('professional', 'no-id-professional')
def test_professional_enrollment(self, mode):
# The only course mode is professional ed
CourseModeFactory(mode_slug=mode, course_id=self.course.id, min_price=1)
# Go to the "choose your track" page
choose_track_url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.get(choose_track_url)
# Since the only available track is professional ed, expect that
# we're redirected immediately to the start of the payment flow.
start_flow_url = reverse('verify_student_start_flow', args=[unicode(self.course.id)])
self.assertRedirects(response, start_flow_url)
# Now enroll in the course
CourseEnrollmentFactory(
user=self.user,
is_active=True,
mode=mode,
course_id=unicode(self.course.id),
)
# Expect that this time we're redirected to the dashboard (since we're already registered)
response = self.client.get(choose_track_url)
self.assertRedirects(response, reverse('dashboard'))
# Mapping of course modes to the POST parameters sent
# when the user chooses that mode.
POST_PARAMS_FOR_COURSE_MODE = {
'honor': {'honor_mode': True},
'verified': {'verified_mode': True, 'contribution': '1.23'},
'unsupported': {'unsupported_mode': True},
}
@ddt.data(
('honor', 'dashboard'),
('verified', 'start-flow'),
)
@ddt.unpack
def test_choose_mode_redirect(self, course_mode, expected_redirect):
# Create the course modes
for mode in ('audit', 'honor', 'verified'):
min_price = 0 if course_mode in ["honor", "audit"] else 1
CourseModeFactory(mode_slug=mode, course_id=self.course.id, min_price=min_price)
# Choose the mode (POST request)
choose_track_url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.post(choose_track_url, self.POST_PARAMS_FOR_COURSE_MODE[course_mode])
# Verify the redirect
if expected_redirect == 'dashboard':
redirect_url = reverse('dashboard')
elif expected_redirect == 'start-flow':
redirect_url = reverse(
'verify_student_start_flow',
kwargs={'course_id': unicode(self.course.id)}
)
else:
self.fail("Must provide a valid redirect URL name")
self.assertRedirects(response, redirect_url)
def test_remember_donation_for_course(self):
# Create the course modes
for mode in ('honor', 'verified'):
CourseModeFactory(mode_slug=mode, course_id=self.course.id)
# Choose the mode (POST request)
choose_track_url = reverse('course_modes_choose', args=[unicode(self.course.id)])
self.client.post(choose_track_url, self.POST_PARAMS_FOR_COURSE_MODE['verified'])
# Expect that the contribution amount is stored in the user's session
self.assertIn('donation_for_course', self.client.session)
self.assertIn(unicode(self.course.id), self.client.session['donation_for_course'])
actual_amount = self.client.session['donation_for_course'][unicode(self.course.id)]
expected_amount = decimal.Decimal(self.POST_PARAMS_FOR_COURSE_MODE['verified']['contribution'])
self.assertEqual(actual_amount, expected_amount)
def test_successful_honor_enrollment(self):
# Create the course modes
for mode in ('honor', 'verified'):
CourseModeFactory(mode_slug=mode, course_id=self.course.id)
# Enroll the user in the default mode (honor) to emulate
# automatic enrollment
params = {
'enrollment_action': 'enroll',
'course_id': unicode(self.course.id)
}
self.client.post(reverse('change_enrollment'), params)
# Explicitly select the honor mode (POST request)
choose_track_url = reverse('course_modes_choose', args=[unicode(self.course.id)])
self.client.post(choose_track_url, self.POST_PARAMS_FOR_COURSE_MODE['honor'])
# Verify that the user's enrollment remains unchanged
mode, is_active = CourseEnrollment.enrollment_mode_for_user(self.user, self.course.id)
self.assertEqual(mode, 'honor')
self.assertEqual(is_active, True)
def test_unsupported_enrollment_mode_failure(self):
# Create the supported course modes
for mode in ('honor', 'verified'):
CourseModeFactory(mode_slug=mode, course_id=self.course.id)
# Choose an unsupported mode (POST request)
choose_track_url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.post(choose_track_url, self.POST_PARAMS_FOR_COURSE_MODE['unsupported'])
self.assertEqual(400, response.status_code)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_default_mode_creation(self):
# Hit the mode creation endpoint with no querystring params, to create an honor mode
url = reverse('create_mode', args=[unicode(self.course.id)])
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
expected_mode = [Mode(u'honor', u'Honor Code Certificate', 0, '', 'usd', None, None, None)]
course_mode = CourseMode.modes_for_course(self.course.id)
self.assertEquals(course_mode, expected_mode)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@ddt.data(
(u'verified', u'Verified Certificate', 10, '10,20,30', 'usd'),
(u'professional', u'Professional Education', 100, '100,200', 'usd'),
)
@ddt.unpack
def test_verified_mode_creation(self, mode_slug, mode_display_name, min_price, suggested_prices, currency):
parameters = {}
parameters['mode_slug'] = mode_slug
parameters['mode_display_name'] = mode_display_name
parameters['min_price'] = min_price
parameters['suggested_prices'] = suggested_prices
parameters['currency'] = currency
url = reverse('create_mode', args=[unicode(self.course.id)])
response = self.client.get(url, parameters)
self.assertEquals(response.status_code, 200)
expected_mode = [Mode(mode_slug, mode_display_name, min_price, suggested_prices, currency, None, None, None)]
course_mode = CourseMode.modes_for_course(self.course.id)
self.assertEquals(course_mode, expected_mode)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
def test_multiple_mode_creation(self):
# Create an honor mode
base_url = reverse('create_mode', args=[unicode(self.course.id)])
self.client.get(base_url)
# Excluding the currency parameter implicitly tests the mode creation endpoint's ability to
# use default values when parameters are partially missing.
parameters = {}
parameters['mode_slug'] = u'verified'
parameters['mode_display_name'] = u'Verified Certificate'
parameters['min_price'] = 10
parameters['suggested_prices'] = '10,20'
# Create a verified mode
url = reverse('create_mode', args=[unicode(self.course.id)])
self.client.get(url, parameters)
honor_mode = Mode(u'honor', u'Honor Code Certificate', 0, '', 'usd', None, None, None)
verified_mode = Mode(u'verified', u'Verified Certificate', 10, '10,20', 'usd', None, None, None)
expected_modes = [honor_mode, verified_mode]
course_modes = CourseMode.modes_for_course(self.course.id)
self.assertEquals(course_modes, expected_modes)
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
@patch.dict(settings.FEATURES, {"IS_EDX_DOMAIN": True})
def test_hide_nav(self):
# Create the course modes
for mode in ["honor", "verified"]:
CourseModeFactory(mode_slug=mode, course_id=self.course.id)
# Load the track selection page
url = reverse('course_modes_choose', args=[unicode(self.course.id)])
response = self.client.get(url)
# Verify that the header navigation links are hidden for the edx.org version
self.assertNotContains(response, "How it Works")
self.assertNotContains(response, "Find courses")
self.assertNotContains(response, "Schools & Partners")
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class TrackSelectionEmbargoTest(UrlResetMixin, ModuleStoreTestCase):
"""Test embargo restrictions on the track selection page. """
@patch.dict(settings.FEATURES, {'EMBARGO': True})
def setUp(self):
super(TrackSelectionEmbargoTest, self).setUp('embargo')
# Create a course and course modes
self.course = CourseFactory.create()
CourseModeFactory(mode_slug='honor', course_id=self.course.id)
CourseModeFactory(mode_slug='verified', course_id=self.course.id, min_price=10)
# Create a user and log in
self.user = UserFactory.create(username="Bob", email="[email protected]", password="edx")
self.client.login(username=self.user.username, password="edx")
# Construct the URL for the track selection page
self.url = reverse('course_modes_choose', args=[unicode(self.course.id)])
@patch.dict(settings.FEATURES, {'EMBARGO': True})
def test_embargo_restrict(self):
with restrict_course(self.course.id) as redirect_url:
response = self.client.get(self.url)
self.assertRedirects(response, redirect_url)
def test_embargo_allow(self):
response = self.client.get(self.url)
self.assertEqual(response.status_code, 200)
| agpl-3.0 |
wolfier/incubator-airflow | airflow/sensors/base_sensor_operator.py | 5 | 2739 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from time import sleep
from airflow.exceptions import AirflowException, AirflowSensorTimeout, \
AirflowSkipException
from airflow.models import BaseOperator
from airflow.utils import timezone
from airflow.utils.decorators import apply_defaults
class BaseSensorOperator(BaseOperator):
"""
Sensor operators are derived from this class an inherit these attributes.
Sensor operators keep executing at a time interval and succeed when
a criteria is met and fail if and when they time out.
:param soft_fail: Set to true to mark the task as SKIPPED on failure
:type soft_fail: bool
:param poke_interval: Time in seconds that the job should wait in
between each tries
:type poke_interval: int
:param timeout: Time, in seconds before the task times out and fails.
:type timeout: int
"""
ui_color = '#e6f1f2'
@apply_defaults
def __init__(self,
poke_interval=60,
timeout=60 * 60 * 24 * 7,
soft_fail=False,
*args,
**kwargs):
super(BaseSensorOperator, self).__init__(*args, **kwargs)
self.poke_interval = poke_interval
self.soft_fail = soft_fail
self.timeout = timeout
def poke(self, context):
"""
Function that the sensors defined while deriving this class should
override.
"""
raise AirflowException('Override me.')
def execute(self, context):
started_at = timezone.utcnow()
while not self.poke(context):
if (timezone.utcnow() - started_at).total_seconds() > self.timeout:
if self.soft_fail:
raise AirflowSkipException('Snap. Time is OUT.')
else:
raise AirflowSensorTimeout('Snap. Time is OUT.')
sleep(self.poke_interval)
self.log.info("Success criteria met. Exiting.")
| apache-2.0 |
whitehorse-io/encarnia | pyenv/lib/python2.7/site-packages/twisted/web/test/test_xml.py | 10 | 41831 | # -*- test-case-name: twisted.web.test.test_xml -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Some fairly inadequate testcases for Twisted XML support.
"""
from twisted.trial.unittest import TestCase
from twisted.web import sux
from twisted.web import microdom
from twisted.web import domhelpers
class Sux0r(sux.XMLParser):
def __init__(self):
self.tokens = []
def getTagStarts(self):
return [token for token in self.tokens if token[0] == 'start']
def gotTagStart(self, name, attrs):
self.tokens.append(("start", name, attrs))
def gotText(self, text):
self.tokens.append(("text", text))
class SUXTests(TestCase):
def testBork(self):
s = "<bork><bork><bork>"
ms = Sux0r()
ms.connectionMade()
ms.dataReceived(s)
self.assertEqual(len(ms.getTagStarts()),3)
class MicroDOMTests(TestCase):
def test_leadingTextDropping(self):
"""
Make sure that if there's no top-level node lenient-mode won't
drop leading text that's outside of any elements.
"""
s = "Hi orders! <br>Well. <br>"
d = microdom.parseString(s, beExtremelyLenient=True)
self.assertEqual(d.firstChild().toxml(),
'<html>Hi orders! <br />Well. <br /></html>')
def test_trailingTextDropping(self):
"""
Ensure that no *trailing* text in a mal-formed
no-top-level-element document(s) will not be dropped.
"""
s = "<br>Hi orders!"
d = microdom.parseString(s, beExtremelyLenient=True)
self.assertEqual(d.firstChild().toxml(),
'<html><br />Hi orders!</html>')
def test_noTags(self):
"""
A string with nothing that looks like a tag at all should just
be parsed as body text.
"""
s = "Hi orders!"
d = microdom.parseString(s, beExtremelyLenient=True)
self.assertEqual(d.firstChild().toxml(),
"<html>Hi orders!</html>")
def test_surroundingCrap(self):
"""
If a document is surrounded by non-xml text, the text should
be remain in the XML.
"""
s = "Hi<br> orders!"
d = microdom.parseString(s, beExtremelyLenient=True)
self.assertEqual(d.firstChild().toxml(),
"<html>Hi<br /> orders!</html>")
def testCaseSensitiveSoonCloser(self):
s = """
<HTML><BODY>
<P ALIGN="CENTER">
<A HREF="http://www.apache.org/"><IMG SRC="/icons/apache_pb.gif"></A>
</P>
<P>
This is an insane set of text nodes that should NOT be gathered under
the A tag above.
</P>
</BODY></HTML>
"""
d = microdom.parseString(s, beExtremelyLenient=1)
l = domhelpers.findNodesNamed(d.documentElement, 'a')
n = domhelpers.gatherTextNodes(l[0],1).replace(' ',' ')
self.assertEqual(n.find('insane'), -1)
def test_lenientParenting(self):
"""
Test that C{parentNode} attributes are set to meaningful values when
we are parsing HTML that lacks a root node.
"""
# Spare the rod, ruin the child.
s = "<br/><br/>"
d = microdom.parseString(s, beExtremelyLenient=1)
self.assertIdentical(d.documentElement,
d.documentElement.firstChild().parentNode)
def test_lenientParentSingle(self):
"""
Test that the C{parentNode} attribute is set to a meaningful value
when we parse an HTML document that has a non-Element root node.
"""
s = "Hello"
d = microdom.parseString(s, beExtremelyLenient=1)
self.assertIdentical(d.documentElement,
d.documentElement.firstChild().parentNode)
def testUnEntities(self):
s = """
<HTML>
This HTML goes between Stupid <=CrAzY!=> Dumb.
</HTML>
"""
d = microdom.parseString(s, beExtremelyLenient=1)
n = domhelpers.gatherTextNodes(d)
self.assertNotEqual(n.find('>'), -1)
def testEmptyError(self):
self.assertRaises(sux.ParseError, microdom.parseString, "")
def testTameDocument(self):
s = """
<test>
<it>
<is>
<a>
test
</a>
</is>
</it>
</test>
"""
d = microdom.parseString(s)
self.assertEqual(
domhelpers.gatherTextNodes(d.documentElement).strip() ,'test')
def testAwfulTagSoup(self):
s = """
<html>
<head><title> I send you this message to have your advice!!!!</titl e
</headd>
<body bgcolor alink hlink vlink>
<h1><BLINK>SALE</blINK> TWENTY MILLION EMAILS & FUR COAT NOW
FREE WITH `ENLARGER'</h1>
YES THIS WONDERFUL AWFER IS NOW HERER!!!
<script LANGUAGE="javascript">
function give_answers() {
if (score < 70) {
alert("I hate you");
}}
</script><a href=/foo.com/lalal name=foo>lalal</a>
</body>
</HTML>
"""
d = microdom.parseString(s, beExtremelyLenient=1)
l = domhelpers.findNodesNamed(d.documentElement, 'blink')
self.assertEqual(len(l), 1)
def testScriptLeniency(self):
s = """
<script>(foo < bar) and (bar > foo)</script>
<script language="javascript">foo </scrip bar </script>
<script src="foo">
<script src="foo">baz</script>
<script /><script></script>
"""
d = microdom.parseString(s, beExtremelyLenient=1)
self.assertEqual(d.firstChild().firstChild().firstChild().data,
"(foo < bar) and (bar > foo)")
self.assertEqual(
d.firstChild().getElementsByTagName("script")[1].firstChild().data,
"foo </scrip bar ")
def testScriptLeniencyIntelligence(self):
# if there is comment or CDATA in script, the autoquoting in bEL mode
# should not happen
s = """<script><!-- lalal --></script>"""
self.assertEqual(
microdom.parseString(s, beExtremelyLenient=1).firstChild().toxml(), s)
s = """<script><![CDATA[lalal]]></script>"""
self.assertEqual(
microdom.parseString(s, beExtremelyLenient=1).firstChild().toxml(), s)
s = """<script> // <![CDATA[
lalal
//]]></script>"""
self.assertEqual(
microdom.parseString(s, beExtremelyLenient=1).firstChild().toxml(), s)
def testPreserveCase(self):
s = '<eNcApSuLaTe><sUxor></sUxor><bOrk><w00T>TeXt</W00t></BoRk></EnCaPsUlAtE>'
s2 = s.lower().replace('text', 'TeXt')
# these are the only two option permutations that *can* parse the above
d = microdom.parseString(s, caseInsensitive=1, preserveCase=1)
d2 = microdom.parseString(s, caseInsensitive=1, preserveCase=0)
# caseInsensitive=0 preserveCase=0 is not valid, it's converted to
# caseInsensitive=0 preserveCase=1
d3 = microdom.parseString(s2, caseInsensitive=0, preserveCase=1)
d4 = microdom.parseString(s2, caseInsensitive=1, preserveCase=0)
d5 = microdom.parseString(s2, caseInsensitive=1, preserveCase=1)
# this is slightly contrived, toxml() doesn't need to be identical
# for the documents to be equivalent (i.e. <b></b> to <b/>),
# however this assertion tests preserving case for start and
# end tags while still matching stuff like <bOrk></BoRk>
self.assertEqual(d.documentElement.toxml(), s)
self.assertTrue(d.isEqualToDocument(d2), "%r != %r" % (d.toxml(), d2.toxml()))
self.assertTrue(d2.isEqualToDocument(d3), "%r != %r" % (d2.toxml(), d3.toxml()))
# caseInsensitive=0 on the left, NOT perserveCase=1 on the right
## XXX THIS TEST IS TURNED OFF UNTIL SOMEONE WHO CARES ABOUT FIXING IT DOES
#self.assertFalse(d3.isEqualToDocument(d2), "%r == %r" % (d3.toxml(), d2.toxml()))
self.assertTrue(d3.isEqualToDocument(d4), "%r != %r" % (d3.toxml(), d4.toxml()))
self.assertTrue(d4.isEqualToDocument(d5), "%r != %r" % (d4.toxml(), d5.toxml()))
def testDifferentQuotes(self):
s = '<test a="a" b=\'b\' />'
d = microdom.parseString(s)
e = d.documentElement
self.assertEqual(e.getAttribute('a'), 'a')
self.assertEqual(e.getAttribute('b'), 'b')
def testLinebreaks(self):
s = '<test \na="a"\n\tb="#b" />'
d = microdom.parseString(s)
e = d.documentElement
self.assertEqual(e.getAttribute('a'), 'a')
self.assertEqual(e.getAttribute('b'), '#b')
def testMismatchedTags(self):
for s in '<test>', '<test> </tset>', '</test>':
self.assertRaises(microdom.MismatchedTags, microdom.parseString, s)
def testComment(self):
s = "<bar><!--<foo />--></bar>"
d = microdom.parseString(s)
e = d.documentElement
self.assertEqual(e.nodeName, "bar")
c = e.childNodes[0]
self.assertTrue(isinstance(c, microdom.Comment))
self.assertEqual(c.value, "<foo />")
c2 = c.cloneNode()
self.assertTrue(c is not c2)
self.assertEqual(c2.toxml(), "<!--<foo />-->")
def testText(self):
d = microdom.parseString("<bar>xxxx</bar>").documentElement
text = d.childNodes[0]
self.assertTrue(isinstance(text, microdom.Text))
self.assertEqual(text.value, "xxxx")
clone = text.cloneNode()
self.assertTrue(clone is not text)
self.assertEqual(clone.toxml(), "xxxx")
def testEntities(self):
nodes = microdom.parseString("<b>&AB;</b>").documentElement.childNodes
self.assertEqual(len(nodes), 2)
self.assertEqual(nodes[0].data, "&")
self.assertEqual(nodes[1].data, "AB;")
self.assertEqual(nodes[0].cloneNode().toxml(), "&")
for n in nodes:
self.assertTrue(isinstance(n, microdom.EntityReference))
def testCData(self):
s = '<x><![CDATA[</x>\r\n & foo]]></x>'
cdata = microdom.parseString(s).documentElement.childNodes[0]
self.assertTrue(isinstance(cdata, microdom.CDATASection))
self.assertEqual(cdata.data, "</x>\r\n & foo")
self.assertEqual(cdata.cloneNode().toxml(), "<![CDATA[</x>\r\n & foo]]>")
def testSingletons(self):
s = "<foo><b/><b /><b\n/></foo>"
s2 = "<foo><b/><b/><b/></foo>"
nodes = microdom.parseString(s).documentElement.childNodes
nodes2 = microdom.parseString(s2).documentElement.childNodes
self.assertEqual(len(nodes), 3)
for (n, n2) in zip(nodes, nodes2):
self.assertTrue(isinstance(n, microdom.Element))
self.assertEqual(n.nodeName, "b")
self.assertTrue(n.isEqualToNode(n2))
def testAttributes(self):
s = '<foo a="b" />'
node = microdom.parseString(s).documentElement
self.assertEqual(node.getAttribute("a"), "b")
self.assertEqual(node.getAttribute("c"), None)
self.assertTrue(node.hasAttribute("a"))
self.assertTrue(not node.hasAttribute("c"))
a = node.getAttributeNode("a")
self.assertEqual(a.value, "b")
node.setAttribute("foo", "bar")
self.assertEqual(node.getAttribute("foo"), "bar")
def testChildren(self):
s = "<foo><bar /><baz /><bax>foo</bax></foo>"
d = microdom.parseString(s).documentElement
self.assertEqual([n.nodeName for n in d.childNodes], ["bar", "baz", "bax"])
self.assertEqual(d.lastChild().nodeName, "bax")
self.assertEqual(d.firstChild().nodeName, "bar")
self.assertTrue(d.hasChildNodes())
self.assertTrue(not d.firstChild().hasChildNodes())
def testMutate(self):
s = "<foo />"
s1 = '<foo a="b"><bar/><foo/></foo>'
s2 = '<foo a="b">foo</foo>'
d = microdom.parseString(s).documentElement
d1 = microdom.parseString(s1).documentElement
d2 = microdom.parseString(s2).documentElement
d.appendChild(d.cloneNode())
d.setAttribute("a", "b")
child = d.childNodes[0]
self.assertEqual(child.getAttribute("a"), None)
self.assertEqual(child.nodeName, "foo")
d.insertBefore(microdom.Element("bar"), child)
self.assertEqual(d.childNodes[0].nodeName, "bar")
self.assertEqual(d.childNodes[1], child)
for n in d.childNodes:
self.assertEqual(n.parentNode, d)
self.assertTrue(d.isEqualToNode(d1))
d.removeChild(child)
self.assertEqual(len(d.childNodes), 1)
self.assertEqual(d.childNodes[0].nodeName, "bar")
t = microdom.Text("foo")
d.replaceChild(t, d.firstChild())
self.assertEqual(d.firstChild(), t)
self.assertTrue(d.isEqualToNode(d2))
def test_replaceNonChild(self):
"""
L{Node.replaceChild} raises L{ValueError} if the node given to be
replaced is not a child of the node C{replaceChild} is called on.
"""
parent = microdom.parseString('<foo />')
orphan = microdom.parseString('<bar />')
replacement = microdom.parseString('<baz />')
self.assertRaises(
ValueError, parent.replaceChild, replacement, orphan)
def testSearch(self):
s = "<foo><bar id='me' /><baz><foo /></baz></foo>"
s2 = "<fOo><bAr id='me' /><bAz><fOO /></bAz></fOo>"
d = microdom.parseString(s)
d2 = microdom.parseString(s2, caseInsensitive=0, preserveCase=1)
d3 = microdom.parseString(s2, caseInsensitive=1, preserveCase=1)
root = d.documentElement
self.assertEqual(root.firstChild(), d.getElementById('me'))
self.assertEqual(d.getElementsByTagName("foo"),
[root, root.lastChild().firstChild()])
root = d2.documentElement
self.assertEqual(root.firstChild(), d2.getElementById('me'))
self.assertEqual(d2.getElementsByTagName('fOo'), [root])
self.assertEqual(d2.getElementsByTagName('fOO'),
[root.lastChild().firstChild()])
self.assertEqual(d2.getElementsByTagName('foo'), [])
root = d3.documentElement
self.assertEqual(root.firstChild(), d3.getElementById('me'))
self.assertEqual(d3.getElementsByTagName('FOO'),
[root, root.lastChild().firstChild()])
self.assertEqual(d3.getElementsByTagName('fOo'),
[root, root.lastChild().firstChild()])
def testDoctype(self):
s = ('<?xml version="1.0"?>'
'<!DOCTYPE foo PUBLIC "baz" "http://www.example.com/example.dtd">'
'<foo></foo>')
s2 = '<foo/>'
d = microdom.parseString(s)
d2 = microdom.parseString(s2)
self.assertEqual(d.doctype,
'foo PUBLIC "baz" "http://www.example.com/example.dtd"')
self.assertEqual(d.toxml(), s)
self.assertFalse(d.isEqualToDocument(d2))
self.assertTrue(d.documentElement.isEqualToNode(d2.documentElement))
samples = [("<img/>", "<img />"),
("<foo A='b'>x</foo>", '<foo A="b">x</foo>'),
("<foo><BAR /></foo>", "<foo><BAR></BAR></foo>"),
("<foo>hello there & yoyoy</foo>",
"<foo>hello there & yoyoy</foo>"),
]
def testOutput(self):
for s, out in self.samples:
d = microdom.parseString(s, caseInsensitive=0)
d2 = microdom.parseString(out, caseInsensitive=0)
testOut = d.documentElement.toxml()
self.assertEqual(out, testOut)
self.assertTrue(d.isEqualToDocument(d2))
def testErrors(self):
for s in ["<foo>&am</foo>", "<foo", "<f>&</f>", "<() />"]:
self.assertRaises(Exception, microdom.parseString, s)
def testCaseInsensitive(self):
s = "<foo a='b'><BAx>x</bax></FOO>"
s2 = '<foo a="b"><bax>x</bax></foo>'
s3 = "<FOO a='b'><BAx>x</BAx></FOO>"
s4 = "<foo A='b'>x</foo>"
d = microdom.parseString(s)
d2 = microdom.parseString(s2)
d3 = microdom.parseString(s3, caseInsensitive=1)
d4 = microdom.parseString(s4, caseInsensitive=1, preserveCase=1)
d5 = microdom.parseString(s4, caseInsensitive=1, preserveCase=0)
d6 = microdom.parseString(s4, caseInsensitive=0, preserveCase=0)
out = microdom.parseString(s).documentElement.toxml()
self.assertRaises(microdom.MismatchedTags, microdom.parseString,
s, caseInsensitive=0)
self.assertEqual(out, s2)
self.assertTrue(d.isEqualToDocument(d2))
self.assertTrue(d.isEqualToDocument(d3))
self.assertTrue(d4.documentElement.hasAttribute('a'))
self.assertFalse(d6.documentElement.hasAttribute('a'))
self.assertEqual(d4.documentElement.toxml(), '<foo A="b">x</foo>')
self.assertEqual(d5.documentElement.toxml(), '<foo a="b">x</foo>')
def testEatingWhitespace(self):
s = """<hello>
</hello>"""
d = microdom.parseString(s)
self.assertTrue(not d.documentElement.hasChildNodes(),
d.documentElement.childNodes)
self.assertTrue(d.isEqualToDocument(microdom.parseString('<hello></hello>')))
def testLenientAmpersand(self):
prefix = "<?xml version='1.0'?>"
# we use <pre> so space will be preserved
for i, o in [("&", "&"),
("& ", "& "),
("&", "&"),
("&hello monkey", "&hello monkey")]:
d = microdom.parseString("%s<pre>%s</pre>"
% (prefix, i), beExtremelyLenient=1)
self.assertEqual(d.documentElement.toxml(), "<pre>%s</pre>" % o)
# non-space preserving
d = microdom.parseString("<t>hello & there</t>", beExtremelyLenient=1)
self.assertEqual(d.documentElement.toxml(), "<t>hello & there</t>")
def testInsensitiveLenient(self):
# testing issue #537
d = microdom.parseString(
"<?xml version='1.0'?><bar><xA><y>c</Xa> <foo></bar>",
beExtremelyLenient=1)
self.assertEqual(d.documentElement.firstChild().toxml(), "<xa><y>c</y></xa>")
def testLaterCloserSimple(self):
s = "<ul><li>foo<li>bar<li>baz</ul>"
d = microdom.parseString(s, beExtremelyLenient=1)
expected = "<ul><li>foo</li><li>bar</li><li>baz</li></ul>"
actual = d.documentElement.toxml()
self.assertEqual(expected, actual)
def testLaterCloserCaseInsensitive(self):
s = "<DL><p><DT>foo<DD>bar</DL>"
d = microdom.parseString(s, beExtremelyLenient=1)
expected = "<dl><p></p><dt>foo</dt><dd>bar</dd></dl>"
actual = d.documentElement.toxml()
self.assertEqual(expected, actual)
def testLaterCloserDL(self):
s = ("<dl>"
"<dt>word<dd>definition"
"<dt>word<dt>word<dd>definition<dd>definition"
"</dl>")
expected = ("<dl>"
"<dt>word</dt><dd>definition</dd>"
"<dt>word</dt><dt>word</dt><dd>definition</dd><dd>definition</dd>"
"</dl>")
d = microdom.parseString(s, beExtremelyLenient=1)
actual = d.documentElement.toxml()
self.assertEqual(expected, actual)
def testUnicodeTolerance(self):
import struct
s = '<foo><bar><baz /></bar></foo>'
j =(u'<?xml version="1.0" encoding="UCS-2" ?>\r\n<JAPANESE>\r\n'
u'<TITLE>\u5c02\u9580\u5bb6\u30ea\u30b9\u30c8 </TITLE></JAPANESE>')
j2=('\xff\xfe<\x00?\x00x\x00m\x00l\x00 \x00v\x00e\x00r\x00s\x00i\x00o'
'\x00n\x00=\x00"\x001\x00.\x000\x00"\x00 \x00e\x00n\x00c\x00o\x00d'
'\x00i\x00n\x00g\x00=\x00"\x00U\x00C\x00S\x00-\x002\x00"\x00 \x00?'
'\x00>\x00\r\x00\n\x00<\x00J\x00A\x00P\x00A\x00N\x00E\x00S\x00E'
'\x00>\x00\r\x00\n\x00<\x00T\x00I\x00T\x00L\x00E\x00>\x00\x02\\'
'\x80\x95\xb6[\xea0\xb90\xc80 \x00<\x00/\x00T\x00I\x00T\x00L\x00E'
'\x00>\x00<\x00/\x00J\x00A\x00P\x00A\x00N\x00E\x00S\x00E\x00>\x00')
def reverseBytes(s):
fmt = str(len(s) // 2) + 'H'
return struct.pack('<' + fmt, *struct.unpack('>' + fmt, s))
urd = microdom.parseString(reverseBytes(s.encode('UTF-16')))
ud = microdom.parseString(s.encode('UTF-16'))
sd = microdom.parseString(s)
self.assertTrue(ud.isEqualToDocument(sd))
self.assertTrue(ud.isEqualToDocument(urd))
ud = microdom.parseString(j)
urd = microdom.parseString(reverseBytes(j2))
sd = microdom.parseString(j2)
self.assertTrue(ud.isEqualToDocument(sd))
self.assertTrue(ud.isEqualToDocument(urd))
# test that raw text still gets encoded
# test that comments get encoded
j3=microdom.parseString(u'<foo/>')
hdr='<?xml version="1.0"?>'
div=microdom.lmx().text(u'\u221a', raw=1).node
de=j3.documentElement
de.appendChild(div)
de.appendChild(j3.createComment(u'\u221a'))
self.assertEqual(j3.toxml(), hdr+
u'<foo><div>\u221a</div><!--\u221a--></foo>'.encode('utf8'))
def testNamedChildren(self):
tests = {"<foo><bar /><bar unf='1' /><bar>asdfadsf</bar>"
"<bam/></foo>" : 3,
'<foo>asdf</foo>' : 0,
'<foo><bar><bar></bar></bar></foo>' : 1,
}
for t in tests.keys():
node = microdom.parseString(t).documentElement
result = domhelpers.namedChildren(node, 'bar')
self.assertEqual(len(result), tests[t])
if result:
self.assertTrue(hasattr(result[0], 'tagName'))
def testCloneNode(self):
s = '<foo a="b"><bax>x</bax></foo>'
node = microdom.parseString(s).documentElement
clone = node.cloneNode(deep=1)
self.failIfEquals(node, clone)
self.assertEqual(len(node.childNodes), len(clone.childNodes))
c1, c2 = node.firstChild(), clone.firstChild()
self.failIfEquals(c1, c2)
self.assertEqual(len(c1.childNodes), len(c2.childNodes))
self.failIfEquals(c1.firstChild(), c2.firstChild())
self.assertEqual(s, clone.toxml())
self.assertEqual(node.namespace, clone.namespace)
def testCloneDocument(self):
s = ('<?xml version="1.0"?>'
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"'
'"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"><foo></foo>')
node = microdom.parseString(s)
clone = node.cloneNode(deep=1)
self.failIfEquals(node, clone)
self.assertEqual(len(node.childNodes), len(clone.childNodes))
self.assertEqual(s, clone.toxml())
self.assertTrue(clone.isEqualToDocument(node))
self.assertTrue(node.isEqualToDocument(clone))
def testLMX(self):
n = microdom.Element("p")
lmx = microdom.lmx(n)
lmx.text("foo")
b = lmx.b(a="c")
b.foo()["z"] = "foo"
b.foo()
b.add("bar", c="y")
s = '<p>foo<b a="c"><foo z="foo"></foo><foo></foo><bar c="y"></bar></b></p>'
self.assertEqual(s, n.toxml())
def testDict(self):
"""
Returns a dictionary which is hashable.
"""
n = microdom.Element("p")
hash(n)
def testEscaping(self):
# issue 590
raw = "&'some \"stuff\"', <what up?>"
cooked = "&'some "stuff"', <what up?>"
esc1 = microdom.escape(raw)
self.assertEqual(esc1, cooked)
self.assertEqual(microdom.unescape(esc1), raw)
def testNamespaces(self):
s = '''
<x xmlns="base">
<y />
<y q="1" x:q="2" y:q="3" />
<y:y xml:space="1">here is some space </y:y>
<y:y />
<x:y />
</x>
'''
d = microdom.parseString(s)
# at least make sure it doesn't traceback
s2 = d.toprettyxml()
self.assertEqual(d.documentElement.namespace,
"base")
self.assertEqual(d.documentElement.getElementsByTagName("y")[0].namespace,
"base")
self.assertEqual(
d.documentElement.getElementsByTagName("y")[1].getAttributeNS('base','q'),
'1')
d2 = microdom.parseString(s2)
self.assertEqual(d2.documentElement.namespace,
"base")
self.assertEqual(d2.documentElement.getElementsByTagName("y")[0].namespace,
"base")
self.assertEqual(
d2.documentElement.getElementsByTagName("y")[1].getAttributeNS('base','q'),
'1')
def testNamespaceDelete(self):
"""
Test that C{toxml} can support xml structures that remove namespaces.
"""
s1 = ('<?xml version="1.0"?><html xmlns="http://www.w3.org/TR/REC-html40">'
'<body xmlns=""></body></html>')
s2 = microdom.parseString(s1).toxml()
self.assertEqual(s1, s2)
def testNamespaceInheritance(self):
"""
Check that unspecified namespace is a thing separate from undefined
namespace. This test added after discovering some weirdness in Lore.
"""
# will only work if childNodes is mutated. not sure why.
child = microdom.Element('ol')
parent = microdom.Element('div', namespace='http://www.w3.org/1999/xhtml')
parent.childNodes = [child]
self.assertEqual(parent.toxml(),
'<div xmlns="http://www.w3.org/1999/xhtml"><ol></ol></div>')
def test_prefixedTags(self):
"""
XML elements with a prefixed name as per upper level tag definition
have a start-tag of C{"<prefix:tag>"} and an end-tag of
C{"</prefix:tag>"}.
Refer to U{http://www.w3.org/TR/xml-names/#ns-using} for details.
"""
outerNamespace = "http://example.com/outer"
innerNamespace = "http://example.com/inner"
document = microdom.Document()
# Create the root in one namespace. Microdom will probably make this
# the default namespace.
root = document.createElement("root", namespace=outerNamespace)
# Give the root some prefixes to use.
root.addPrefixes({innerNamespace: "inner"})
# Append a child to the root from the namespace that prefix is bound
# to.
tag = document.createElement("tag", namespace=innerNamespace)
# Give that tag a child too. This way we test rendering of tags with
# children and without children.
child = document.createElement("child", namespace=innerNamespace)
tag.appendChild(child)
root.appendChild(tag)
document.appendChild(root)
# ok, the xml should appear like this
xmlOk = (
'<?xml version="1.0"?>'
'<root xmlns="http://example.com/outer" '
'xmlns:inner="http://example.com/inner">'
'<inner:tag><inner:child></inner:child></inner:tag>'
'</root>')
xmlOut = document.toxml()
self.assertEqual(xmlOut, xmlOk)
def test_prefixPropagation(self):
"""
Children of prefixed tags respect the default namespace at the point
where they are rendered. Specifically, they are not influenced by the
prefix of their parent as that prefix has no bearing on them.
See U{http://www.w3.org/TR/xml-names/#scoping} for details.
To further clarify the matter, the following::
<root xmlns="http://example.com/ns/test">
<mytag xmlns="http://example.com/ns/mytags">
<mysubtag xmlns="http://example.com/ns/mytags">
<element xmlns="http://example.com/ns/test"></element>
</mysubtag>
</mytag>
</root>
Should become this after all the namespace declarations have been
I{moved up}::
<root xmlns="http://example.com/ns/test"
xmlns:mytags="http://example.com/ns/mytags">
<mytags:mytag>
<mytags:mysubtag>
<element></element>
</mytags:mysubtag>
</mytags:mytag>
</root>
"""
outerNamespace = "http://example.com/outer"
innerNamespace = "http://example.com/inner"
document = microdom.Document()
# creates a root element
root = document.createElement("root", namespace=outerNamespace)
document.appendChild(root)
# Create a child with a specific namespace with a prefix bound to it.
root.addPrefixes({innerNamespace: "inner"})
mytag = document.createElement("mytag",namespace=innerNamespace)
root.appendChild(mytag)
# Create a child of that which has the outer namespace.
mysubtag = document.createElement("mysubtag", namespace=outerNamespace)
mytag.appendChild(mysubtag)
xmlOk = (
'<?xml version="1.0"?>'
'<root xmlns="http://example.com/outer" '
'xmlns:inner="http://example.com/inner">'
'<inner:mytag>'
'<mysubtag></mysubtag>'
'</inner:mytag>'
'</root>'
)
xmlOut = document.toxml()
self.assertEqual(xmlOut, xmlOk)
class BrokenHTMLTests(TestCase):
"""
Tests for when microdom encounters very bad HTML and C{beExtremelyLenient}
is enabled. These tests are inspired by some HTML generated in by a mailer,
which breaks up very long lines by splitting them with '!\n '. The expected
behaviour is loosely modelled on the way Firefox treats very bad HTML.
"""
def checkParsed(self, input, expected, beExtremelyLenient=1):
"""
Check that C{input}, when parsed, produces a DOM where the XML
of the document element is equal to C{expected}.
"""
output = microdom.parseString(input,
beExtremelyLenient=beExtremelyLenient)
self.assertEqual(output.documentElement.toxml(), expected)
def test_brokenAttributeName(self):
"""
Check that microdom does its best to handle broken attribute names.
The important thing is that it doesn't raise an exception.
"""
input = '<body><h1><div al!\n ign="center">Foo</div></h1></body>'
expected = ('<body><h1><div al="True" ign="center">'
'Foo</div></h1></body>')
self.checkParsed(input, expected)
def test_brokenAttributeValue(self):
"""
Check that microdom encompasses broken attribute values.
"""
input = '<body><h1><div align="cen!\n ter">Foo</div></h1></body>'
expected = '<body><h1><div align="cen!\n ter">Foo</div></h1></body>'
self.checkParsed(input, expected)
def test_brokenOpeningTag(self):
"""
Check that microdom does its best to handle broken opening tags.
The important thing is that it doesn't raise an exception.
"""
input = '<body><h1><sp!\n an>Hello World!</span></h1></body>'
expected = '<body><h1><sp an="True">Hello World!</sp></h1></body>'
self.checkParsed(input, expected)
def test_brokenSelfClosingTag(self):
"""
Check that microdom does its best to handle broken self-closing tags
The important thing is that it doesn't raise an exception.
"""
self.checkParsed('<body><span /!\n></body>',
'<body><span></span></body>')
self.checkParsed('<span!\n />', '<span></span>')
def test_brokenClosingTag(self):
"""
Check that microdom does its best to handle broken closing tags.
The important thing is that it doesn't raise an exception.
"""
input = '<body><h1><span>Hello World!</sp!\nan></h1></body>'
expected = '<body><h1><span>Hello World!</span></h1></body>'
self.checkParsed(input, expected)
input = '<body><h1><span>Hello World!</!\nspan></h1></body>'
self.checkParsed(input, expected)
input = '<body><h1><span>Hello World!</span!\n></h1></body>'
self.checkParsed(input, expected)
input = '<body><h1><span>Hello World!<!\n/span></h1></body>'
expected = '<body><h1><span>Hello World!<!></!></span></h1></body>'
self.checkParsed(input, expected)
class NodeTests(TestCase):
"""
Tests for L{Node}.
"""
def test_isNodeEqualTo(self):
"""
L{Node.isEqualToNode} returns C{True} if and only if passed a L{Node}
with the same children.
"""
# A node is equal to itself
node = microdom.Node(object())
self.assertTrue(node.isEqualToNode(node))
another = microdom.Node(object())
# Two nodes with no children are equal
self.assertTrue(node.isEqualToNode(another))
node.appendChild(microdom.Node(object()))
# A node with no children is not equal to a node with a child
self.assertFalse(node.isEqualToNode(another))
another.appendChild(microdom.Node(object()))
# A node with a child and no grandchildren is equal to another node
# with a child and no grandchildren.
self.assertTrue(node.isEqualToNode(another))
# A node with a child and a grandchild is not equal to another node
# with a child and no grandchildren.
node.firstChild().appendChild(microdom.Node(object()))
self.assertFalse(node.isEqualToNode(another))
# A node with a child and a grandchild is equal to another node with a
# child and a grandchild.
another.firstChild().appendChild(microdom.Node(object()))
self.assertTrue(node.isEqualToNode(another))
def test_validChildInstance(self):
"""
Children of L{Node} instances must also be L{Node} instances.
"""
node = microdom.Node()
child = microdom.Node()
# Node.appendChild() only accepts Node instances.
node.appendChild(child)
self.assertRaises(TypeError, node.appendChild, None)
# Node.insertBefore() only accepts Node instances.
self.assertRaises(TypeError, node.insertBefore, child, None)
self.assertRaises(TypeError, node.insertBefore, None, child)
self.assertRaises(TypeError, node.insertBefore, None, None)
# Node.removeChild() only accepts Node instances.
node.removeChild(child)
self.assertRaises(TypeError, node.removeChild, None)
# Node.replaceChild() only accepts Node instances.
self.assertRaises(TypeError, node.replaceChild, child, None)
self.assertRaises(TypeError, node.replaceChild, None, child)
self.assertRaises(TypeError, node.replaceChild, None, None)
class DocumentTests(TestCase):
"""
Tests for L{Document}.
"""
doctype = 'foo PUBLIC "baz" "http://www.example.com/example.dtd"'
def test_isEqualToNode(self):
"""
L{Document.isEqualToNode} returns C{True} if and only if passed a
L{Document} with the same C{doctype} and C{documentElement}.
"""
# A document is equal to itself
document = microdom.Document()
self.assertTrue(document.isEqualToNode(document))
# A document without a doctype or documentElement is equal to another
# document without a doctype or documentElement.
another = microdom.Document()
self.assertTrue(document.isEqualToNode(another))
# A document with a doctype is not equal to a document without a
# doctype.
document.doctype = self.doctype
self.assertFalse(document.isEqualToNode(another))
# Two documents with the same doctype are equal
another.doctype = self.doctype
self.assertTrue(document.isEqualToNode(another))
# A document with a documentElement is not equal to a document without
# a documentElement
document.appendChild(microdom.Node(object()))
self.assertFalse(document.isEqualToNode(another))
# Two documents with equal documentElements are equal.
another.appendChild(microdom.Node(object()))
self.assertTrue(document.isEqualToNode(another))
# Two documents with documentElements which are not equal are not
# equal.
document.documentElement.appendChild(microdom.Node(object()))
self.assertFalse(document.isEqualToNode(another))
def test_childRestriction(self):
"""
L{Document.appendChild} raises L{ValueError} if the document already
has a child.
"""
document = microdom.Document()
child = microdom.Node()
another = microdom.Node()
document.appendChild(child)
self.assertRaises(ValueError, document.appendChild, another)
class EntityReferenceTests(TestCase):
"""
Tests for L{EntityReference}.
"""
def test_isEqualToNode(self):
"""
L{EntityReference.isEqualToNode} returns C{True} if and only if passed
a L{EntityReference} with the same C{eref}.
"""
self.assertTrue(
microdom.EntityReference('quot').isEqualToNode(
microdom.EntityReference('quot')))
self.assertFalse(
microdom.EntityReference('quot').isEqualToNode(
microdom.EntityReference('apos')))
class CharacterDataTests(TestCase):
"""
Tests for L{CharacterData}.
"""
def test_isEqualToNode(self):
"""
L{CharacterData.isEqualToNode} returns C{True} if and only if passed a
L{CharacterData} with the same value.
"""
self.assertTrue(
microdom.CharacterData('foo').isEqualToNode(
microdom.CharacterData('foo')))
self.assertFalse(
microdom.CharacterData('foo').isEqualToNode(
microdom.CharacterData('bar')))
class CommentTests(TestCase):
"""
Tests for L{Comment}.
"""
def test_isEqualToNode(self):
"""
L{Comment.isEqualToNode} returns C{True} if and only if passed a
L{Comment} with the same value.
"""
self.assertTrue(
microdom.Comment('foo').isEqualToNode(
microdom.Comment('foo')))
self.assertFalse(
microdom.Comment('foo').isEqualToNode(
microdom.Comment('bar')))
class TextTests(TestCase):
"""
Tests for L{Text}.
"""
def test_isEqualToNode(self):
"""
L{Text.isEqualToNode} returns C{True} if and only if passed a L{Text}
which represents the same data.
"""
self.assertTrue(
microdom.Text('foo', raw=True).isEqualToNode(
microdom.Text('foo', raw=True)))
self.assertFalse(
microdom.Text('foo', raw=True).isEqualToNode(
microdom.Text('foo', raw=False)))
self.assertFalse(
microdom.Text('foo', raw=True).isEqualToNode(
microdom.Text('bar', raw=True)))
class CDATASectionTests(TestCase):
"""
Tests for L{CDATASection}.
"""
def test_isEqualToNode(self):
"""
L{CDATASection.isEqualToNode} returns C{True} if and only if passed a
L{CDATASection} which represents the same data.
"""
self.assertTrue(
microdom.CDATASection('foo').isEqualToNode(
microdom.CDATASection('foo')))
self.assertFalse(
microdom.CDATASection('foo').isEqualToNode(
microdom.CDATASection('bar')))
class ElementTests(TestCase):
"""
Tests for L{Element}.
"""
def test_isEqualToNode(self):
"""
L{Element.isEqualToNode} returns C{True} if and only if passed a
L{Element} with the same C{nodeName}, C{namespace}, C{childNodes}, and
C{attributes}.
"""
self.assertTrue(
microdom.Element(
'foo', {'a': 'b'}, object(), namespace='bar').isEqualToNode(
microdom.Element(
'foo', {'a': 'b'}, object(), namespace='bar')))
# Elements with different nodeName values do not compare equal.
self.assertFalse(
microdom.Element(
'foo', {'a': 'b'}, object(), namespace='bar').isEqualToNode(
microdom.Element(
'bar', {'a': 'b'}, object(), namespace='bar')))
# Elements with different namespaces do not compare equal.
self.assertFalse(
microdom.Element(
'foo', {'a': 'b'}, object(), namespace='bar').isEqualToNode(
microdom.Element(
'foo', {'a': 'b'}, object(), namespace='baz')))
# Elements with different childNodes do not compare equal.
one = microdom.Element('foo', {'a': 'b'}, object(), namespace='bar')
two = microdom.Element('foo', {'a': 'b'}, object(), namespace='bar')
two.appendChild(microdom.Node(object()))
self.assertFalse(one.isEqualToNode(two))
# Elements with different attributes do not compare equal.
self.assertFalse(
microdom.Element(
'foo', {'a': 'b'}, object(), namespace='bar').isEqualToNode(
microdom.Element(
'foo', {'a': 'c'}, object(), namespace='bar')))
| mit |
CKehl/pylearn2 | pylearn2/models/tests/test_s3c_inference.py | 44 | 14386 | from __future__ import print_function
from pylearn2.models.s3c import S3C
from pylearn2.models.s3c import E_Step_Scan
from pylearn2.models.s3c import Grad_M_Step
from pylearn2.models.s3c import E_Step
from pylearn2.utils import contains_nan
from theano import function
import numpy as np
from theano.compat.six.moves import xrange
import theano.tensor as T
from theano import config
#from pylearn2.utils import serial
def broadcast(mat, shape_0):
rval = mat
if mat.shape[0] != shape_0:
assert mat.shape[0] == 1
rval = np.zeros((shape_0, mat.shape[1]),dtype=mat.dtype)
for i in xrange(shape_0):
rval[i,:] = mat[0,:]
return rval
class Test_S3C_Inference:
def setUp(self):
# Temporarily change config.floatX to float64, as s3c inference
# tests currently fail due to numerical issues for float32.
self.prev_floatX = config.floatX
config.floatX = 'float64'
def tearDown(self):
# Restore previous value of floatX
config.floatX = self.prev_floatX
def __init__(self):
""" gets a small batch of data
sets up an S3C model
"""
# We also have to change the value of config.floatX in __init__.
self.prev_floatX = config.floatX
config.floatX = 'float64'
try:
self.tol = 1e-5
#dataset = serial.load('${PYLEARN2_DATA_PATH}/stl10/stl10_patches/data.pkl')
#X = dataset.get_batch_design(1000)
#X = X[:,0:5]
X = np.random.RandomState([1,2,3]).randn(1000,5)
X -= X.mean()
X /= X.std()
m, D = X.shape
N = 5
#don't give the model an e_step or learning rate so it won't spend years compiling a learn_func
self.model = S3C(nvis = D,
nhid = N,
irange = .1,
init_bias_hid = 0.,
init_B = 3.,
min_B = 1e-8,
max_B = 1000.,
init_alpha = 1., min_alpha = 1e-8, max_alpha = 1000.,
init_mu = 1., e_step = None,
m_step = Grad_M_Step(),
min_bias_hid = -1e30, max_bias_hid = 1e30,
)
self.model.make_pseudoparams()
self.h_new_coeff_schedule = [.1, .2, .3, .4, .5, .6, .7, .8, .9, 1. ]
self.e_step = E_Step_Scan(h_new_coeff_schedule = self.h_new_coeff_schedule)
self.e_step.register_model(self.model)
self.X = X
self.N = N
self.m = m
finally:
config.floatX = self.prev_floatX
def test_match_unrolled(self):
""" tests that inference with scan matches result using unrolled loops """
unrolled_e_step = E_Step(h_new_coeff_schedule = self.h_new_coeff_schedule)
unrolled_e_step.register_model(self.model)
V = T.matrix()
scan_result = self.e_step.infer(V)
unrolled_result = unrolled_e_step.infer(V)
outputs = []
for key in scan_result:
outputs.append(scan_result[key])
outputs.append(unrolled_result[key])
f = function([V], outputs)
outputs = f(self.X)
assert len(outputs) % 2 == 0
for i in xrange(0,len(outputs),2):
assert np.allclose(outputs[i],outputs[i+1])
def test_grad_s(self):
"tests that the gradients with respect to s_i are 0 after doing a mean field update of s_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
model.test_batch_size = X.shape[0]
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
S = e_step.infer_S_hat(V = X, H_hat = H_var, S_hat = Mu1_var)
s_idx = S[:,idx]
s_i_func = function([H_var,Mu1_var,idx],s_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0, var_s1_hat = Sigma1)
grad_Mu1 = T.grad(trunc_kl.sum(), Mu1_var)
grad_Mu1_idx = grad_Mu1[:,idx]
grad_func = function([H_var, Mu1_var, idx], grad_Mu1_idx)
for i in xrange(self.N):
Mu1[:,i] = s_i_func(H, Mu1, i)
g = grad_func(H,Mu1,i)
assert not contains_nan(g)
g_abs_max = np.abs(g).max()
if g_abs_max > self.tol:
raise Exception('after mean field step, gradient of kl divergence wrt mean field parameter should be 0, but here the max magnitude of a gradient element is '+str(g_abs_max)+' after updating s_'+str(i))
def test_value_s(self):
"tests that the value of the kl divergence decreases with each update to s_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
S = e_step.infer_S_hat( V = X, H_hat = H_var, S_hat = Mu1_var)
s_idx = S[:,idx]
s_i_func = function([H_var,Mu1_var,idx],s_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0, var_s1_hat = Sigma1)
trunc_kl_func = function([H_var, Mu1_var], trunc_kl)
for i in xrange(self.N):
prev_kl = trunc_kl_func(H,Mu1)
Mu1[:,i] = s_i_func(H, Mu1, i)
new_kl = trunc_kl_func(H,Mu1)
increase = new_kl - prev_kl
mx = increase.max()
if mx > 1e-3:
raise Exception('after mean field step in s, kl divergence should decrease, but some elements increased by as much as '+str(mx)+' after updating s_'+str(i))
def test_grad_h(self):
"tests that the gradients with respect to h_i are 0 after doing a mean field update of h_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
new_H = e_step.infer_H_hat(V = X, H_hat = H_var, S_hat = Mu1_var)
h_idx = new_H[:,idx]
updates_func = function([H_var,Mu1_var,idx], h_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0,
var_s1_hat = Sigma1)
grad_H = T.grad(trunc_kl.sum(), H_var)
assert len(grad_H.type.broadcastable) == 2
#from theano.printing import min_informative_str
#print min_informative_str(grad_H)
#grad_H = Print('grad_H')(grad_H)
#grad_H_idx = grad_H[:,idx]
grad_func = function([H_var, Mu1_var], grad_H)
failed = False
for i in xrange(self.N):
rval = updates_func(H, Mu1, i)
H[:,i] = rval
g = grad_func(H,Mu1)[:,i]
assert not contains_nan(g)
g_abs_max = np.abs(g).max()
if g_abs_max > self.tol:
#print "new values of H"
#print H[:,i]
#print "gradient on new values of H"
#print g
failed = True
print('iteration ',i)
#print 'max value of new H: ',H[:,i].max()
#print 'H for failing g: '
failing_h = H[np.abs(g) > self.tol, i]
#print failing_h
#from matplotlib import pyplot as plt
#plt.scatter(H[:,i],g)
#plt.show()
#ignore failures extremely close to h=1
high_mask = failing_h > .001
low_mask = failing_h < .999
mask = high_mask * low_mask
print('masked failures: ',mask.shape[0],' err ',g_abs_max)
if mask.sum() > 0:
print('failing h passing the range mask')
print(failing_h[ mask.astype(bool) ])
raise Exception('after mean field step, gradient of kl divergence'
' wrt freshly updated variational parameter should be 0, '
'but here the max magnitude of a gradient element is '
+str(g_abs_max)+' after updating h_'+str(i))
#assert not failed
def test_value_h(self):
"tests that the value of the kl divergence decreases with each update to h_i "
model = self.model
e_step = self.e_step
X = self.X
assert X.shape[0] == self.m
init_H = e_step.init_H_hat(V = X)
init_Mu1 = e_step.init_S_hat(V = X)
prev_setting = config.compute_test_value
config.compute_test_value= 'off'
H, Mu1 = function([], outputs=[init_H, init_Mu1])()
config.compute_test_value = prev_setting
H = broadcast(H, self.m)
Mu1 = broadcast(Mu1, self.m)
H = np.cast[config.floatX](self.model.rng.uniform(0.,1.,H.shape))
Mu1 = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,Mu1.shape))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
Mu1_var = T.matrix(name='Mu1_var')
Mu1_var.tag.test_value = Mu1
idx = T.iscalar()
idx.tag.test_value = 0
newH = e_step.infer_H_hat(V = X, H_hat = H_var, S_hat = Mu1_var)
h_idx = newH[:,idx]
h_i_func = function([H_var,Mu1_var,idx],h_idx)
sigma0 = 1. / model.alpha
Sigma1 = e_step.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
#by truncated KL, I mean that I am dropping terms that don't depend on H and Mu1
# (they don't affect the outcome of this test and some of them are intractable )
trunc_kl = - model.entropy_hs(H_hat = H_var, var_s0_hat = sigma0, var_s1_hat = Sigma1) + \
model.expected_energy_vhs(V = X, H_hat = H_var, S_hat = Mu1_var, var_s0_hat = sigma0, var_s1_hat = Sigma1)
trunc_kl_func = function([H_var, Mu1_var], trunc_kl)
for i in xrange(self.N):
prev_kl = trunc_kl_func(H,Mu1)
H[:,i] = h_i_func(H, Mu1, i)
#we don't update mu, the whole point of the split e step is we don't have to
new_kl = trunc_kl_func(H,Mu1)
increase = new_kl - prev_kl
print('failures after iteration ',i,': ',(increase > self.tol).sum())
mx = increase.max()
if mx > 1e-4:
print('increase amounts of failing examples:')
print(increase[increase > self.tol])
print('failing H:')
print(H[increase > self.tol,:])
print('failing Mu1:')
print(Mu1[increase > self.tol,:])
print('failing V:')
print(X[increase > self.tol,:])
raise Exception('after mean field step in h, kl divergence should decrease, but some elements increased by as much as '+str(mx)+' after updating h_'+str(i))
if __name__ == '__main__':
obj = Test_S3C_Inference()
#obj.test_grad_h()
#obj.test_grad_s()
#obj.test_value_s()
obj.test_value_h()
| bsd-3-clause |
dvklopfenstein/PrincetonAlgorithms | tests/test_Selection.py | 1 | 2392 | #!/usr/bin/env python
import sys
from AlgsSedgewickWayne.Selection import Sort
from AlgsSedgewickWayne.testcode.ArrayHistory import chk
from AlgsSedgewickWayne.testcode.ArrayHistory import ArrayHistory
from AlgsSedgewickWayne.testcode.InputArgs import cli_get_array
def test_wk2_lec(prt=sys.stdout):
"""Example from week 2 lecture, "Selection Sort (6:59)" """
# Give the array that results after the first 4 exchanges when
# selection sorting the following array:
a = map(int, "7 10 5 3 8 4 2 9 6".split() )
run(a, 'SELECTION SORT', prt=prt)
def test_wk2_ex_Selections_489125(prt=sys.stdout):
# (seed = 183182)
# Give the array that results after the first 4 exchanges when
# selection sorting the following array:
a = map(int, "13 16 40 60 19 70 71 47 12 67".split() )
run(a, 'SELECTION SORT', prt=prt)
def test_wk2_q3a(prt=sys.stdout):
# QUESTION: Any pair of items is compared no more than once during selection sort.
# ANSWER(FALSE): Consider the array { 2, 1, 0 }. Then, 2 and 1 are compared twice.
run([2, 1, 0], 'SELECTION SORT', prt=prt)
def test_wk2_q3b(prt=sys.stdout):
# QUESTION: An exchange in selection sort can decrease the number of inversions
# by two (or more).
# ANSWER(TRUE): Consider the array { 3, 2, 1 }, which has 3 inversions. The first exchange results in the array { 1, 2, 3 }, which has zero inversions.
run([3, 2, 1], 'SELECTION SORT', prt=prt)
def test_wk2_q2a(prt=sys.stdout):
desc = 'SELECTION SORT WORDS'
prt.write("\n{TEST}\n".format(TEST=desc))
exp = "BECK BUSH DEVO EVE6 HOLE JAYZ KORN MIMS VAIN RATT TOTO PINK SADE NOFX SOAD WHAM"
a = "HOLE BUSH MIMS BECK WHAM SOAD NOFX TOTO VAIN RATT DEVO PINK SADE KORN JAYZ EVE6".split()
ah = ArrayHistory()
Sort(a, array_history=ah)
ah.show(desc)
for idx, A in enumerate(ah):
if chk( A[0], exp ):
prt.write("MATCH {I}\n".format(I=idx))
def run(a, desc=None, prt=sys.stdout):
ah = ArrayHistory()
Sort(a, array_history=ah)
if desc is None:
desc = "INSERTION SORT"
prt.write("{DESC} RESULT {A}\n".format(DESC=desc, A=' '.join(str(e) for e in a)))
ah.prt()
ah.show(desc)
def run_all():
"""Run all tests."""
test_wk2_lec()
test_wk2_ex_Selections_489125()
test_wk2_q3a()
test_wk2_q2a()
def cli():
N = len(sys.argv)
if N == 1:
run_all()
elif N == 2:
run(cli_get_array())
if __name__ == '__main__':
cli()
| gpl-2.0 |
svm-zhang/poolseq_tk | poolseq_tk.py | 1 | 12829 | import os
import sys
import argparse
import collections
import multiprocessing as mp
import glob
import subprocess
import shlex
import re
import sz_collapse
import sz_acount
import sz_mergeAC
import sz_filter
import sz_fisher
import sz_cmh
import sz_plotting
import sz_overlap
import sz_prepVCF
import sz_view
import sz_biallelic
class SubcommandHelpFormatter(argparse.RawDescriptionHelpFormatter):
def _format_action(self, action):
flag = 0
parts = super(argparse.RawDescriptionHelpFormatter, self)._format_action(action)
if action.nargs == argparse.PARSER:
sub_cmd = "\n"
for i, part in enumerate(parts.split("\n")):
if i == 0:
continue
else:
if flag == 1:
sub_cmd += 4*" "+ " ".join(filter(None, part.split(" "))) + "\n"
flag = 0
continue
if len(part.split(" ")) > 4:
if len(part.split(" ")[4]) > 7:
sub_cmd += " ".join(part.split(" ")[0:5])
flag = 1
else:
sub_cmd += " ".join(part.split(" ")[0:5]) + (9-len(part.split(" ")[4])+4)*" " + " ".join(filter(None, part.split(" "))) + "\n"
return sub_cmd
else:
return parts
def getopts():
parser = argparse.ArgumentParser(description="Toolkits for Genome-wide Association Mapping using Pooled Sequencing")
sub_parsers = parser.add_subparsers(title="Commands", metavar="", dest="command")
usage = "Viewing mpileup file (transforming 5th column in mpileup into human readable letters)"
view_parser = sub_parsers.add_parser("view", help=usage)
view_parser.add_argument("-mpileup",
metavar="FILE",
dest="ipileup",
required=True,
help="mpileup file")
view_parser.add_argument("-snp",
metavar="FILE",
dest="isnp",
required=True,
help="tab-delimited snp file with four columns: chr, pos, ref, alt")
view_parser.add_argument("-o",
metavar="FILE",
dest="out",
help="tab-delimited file with five columns: chr, pos, ref, alt, transformed reads bases ")
view_parser.set_defaults(func=sz_view.run_view)
# Collapsing two mpileup files
usage = "Collapsing two pileup files at corresponding SNPs"
collapse_parser = sub_parsers.add_parser("collapse", help=usage)
collapse_parser.add_argument("-m1",
metavar="FILE",
dest="m1",
required="True",
help="one of the two mpileup files")
collapse_parser.add_argument("-m2",
metavar="FILE",
dest="m2",
required="True",
help="one of the two mpileup files")
collapse_parser.add_argument("-snps",
metavar="FILE",
dest="snps",
required="True",
help="a list of SNP positions. e.g. chr\\tpos")
collapse_parser.add_argument("-offset1",
metavar="INT",
dest="offset1",
type=int,
default=0,
help="offset add in for the first mpileup file specified by -m1")
collapse_parser.add_argument("-offset2",
metavar="INT",
dest="offset2",
type=int,
default=0,
help="offset add in for the second mpileup file specified by -m2")
collapse_parser.add_argument("-o",
metavar="FILE",
dest="out",
default=sys.stdout,
help="output file. Default: STDOUT")
collapse_parser.set_defaults(func=sz_collapse.run_collapse)
usage = "Counting number of alleles given number of pileup files"
count_parser = sub_parsers.add_parser("count", help=usage)
count_parser.add_argument("-o",
metavar="FILE",
dest="out",
default=sys.stdout,
help="output file of allele counts at each SNP. Default: STDOUT")
count_parser.add_argument("-pos",
metavar="FILE",
dest="pos",
help="file of SNPs where counting will happen")
count_parser.add_argument("pileups",
metavar="PILEUP",
nargs='+',
help="pileup files")
count_parser.set_defaults(func=sz_acount.run_count)
usage = "Getting Biallelic sites only"
biallelic_parser = sub_parsers.add_parser("biallelic", help=usage)
biallelic_parser.add_argument("-o",
metavar="FILE",
dest="out",
required=True,
help="output file of biallelic sites")
biallelic_parser.add_argument("pileups",
metavar="PILEUP",
nargs='+',
help="pileup files")
biallelic_parser.set_defaults(func=sz_biallelic.run_biallelic)
usage = "Filter SNPs that are not satisfied specified conditions"
filter_parser = sub_parsers.add_parser("filter", help=usage)
filter_parser.add_argument("-ac",
metavar="FILE",
dest="ac_file",
required=True,
help="allele counts file")
filter_parser.add_argument("-o",
metavar="FILE",
dest="out",
default=sys.stdout,
help="output file without filtered SNPs. Default: STDOUT")
filter_parser.add_argument("-min_ref_ac",
metavar="INT",
dest="min_ref_ac",
type=int,
default=5,
help="minimum number of the ref allele (3rd column) per sample/pool")
filter_parser.add_argument("-min_alt_ac",
metavar="INT",
dest="min_alt_ac",
type=int,
default=5,
help="minimum number of the alt allele (4th column) per sample/pool")
filter_parser.add_argument("-min_cov",
metavar="INT",
dest="min_cov",
type=int,
default=10,
help="specify minimum coverage per site per sample/pool")
filter_parser.set_defaults(func=sz_filter.run_filter)
usage = "Merging allele counts from multiple replicates"
mergeAC_parser = sub_parsers.add_parser("mergeAC", help=usage)
mergeAC_parser.add_argument("-o",
metavar="FILE",
dest="out",
default=sys.stdout,
help="output file of combined counts at each SNP across replicates")
mergeAC_parser.add_argument("acs",
metavar="ac_file",
nargs='+',
help="allele counts files")
mergeAC_parser.set_defaults(func=sz_mergeAC.run_merge)
usage = "Run Fisher's Exact Test at each SNP"
fisher_parser = sub_parsers.add_parser("fisher", help=usage)
fisher_parser.add_argument("-ac",
metavar="FILE",
dest="ac_file",
help="allele counts for one pool")
fisher_parser.add_argument("-outp",
metavar="PREFIX",
dest="outp",
default="poolseq_tk.fisher",
help="output file for Fisher's Exact tests")
fisher_parser.add_argument("-t",
metavar="INT",
dest="nproc",
type=int,
default=1,
help="Specify number of processes running simultaneously")
fisher_parser.add_argument("-adj_cutoff",
metavar="FLOAT",
dest="adj_cutoff",
type=float,
default=0.05,
help="specify the cutoff below which adjusted p-values will be considered as significant")
fisher_parser.add_argument("-adj_method",
metavar="STR",
dest="adj_method",
default="fdr",
# choices=["holm", "hochberg", "hommel", "bonferroni", "BH", "BY", "fdr", "none"],
help="specify the adjustment methods. Only BH procedure supported")
fisher_parser.add_argument("-direction",
metavar="STR",
dest="oddsr_direction",
choices=["greater", "less"],
required=True,
help="specify whether odds ration greater, or less, than 1")
fisher_parser.set_defaults(func=sz_fisher.run_fisher)
usage="run Cochran-Mantel-Haenszel test with multi-testing adjustment"
cmh_parser = sub_parsers.add_parser("cmh", help=usage)
cmh_parser.add_argument("-ac",
metavar="FILE",
dest="table_file",
required=True,
help="output file with the table that CMH test run on")
cmh_parser.add_argument("-outp",
metavar="PREFIX",
dest="outp",
default="poolseq_tk.cmh",
required=True, help="output file with CMH test results")
cmh_parser.add_argument("-t",
metavar="INT",
dest="nproc",
type=int,
default=1,
help="Specify number of processes running simultaneously")
cmh_parser.add_argument("-adj_cutoff",
metavar="FLOAT",
dest="adj_cutoff",
type=float,
default=0.05,
help="specify the cutoff below which adjusted p-values will be considered as significant")
cmh_parser.add_argument("-adj_method",
metavar="STR",
dest="adj_method",
default="BH",
choices=["holm", "hochberg", "hommel", "bonferroni", "BH", "BY", "fdr", "none"],
help="specify the adjustment methods")
cmh_parser.add_argument("-direction",
metavar="STR",
dest="oddsr_direction",
choices=["greater", "less"],
required=True,
help="specify whether odds ration greater, or less, than 1")
cmh_parser.set_defaults(func=sz_cmh.run_cmh)
usage = "Making Manhattan plot and QQ plot"
plot_parser = sub_parsers.add_parser("plot", help=usage)
plot_parser.add_argument("-i",
metavar="FILE",
dest="input",
required=True,
help="input file of test results with all SNPs")
plot_parser.add_argument("-highlight",
metavar="FILE",
dest="highlight_snps",
help="file of a list of SNPs to be highlighted in the Manhattan plot")
plot_parser.add_argument("-outp",
metavar="PREFIX",
dest="outp",
help="prefix of output file")
plot_parser.add_argument("-pcutoff",
metavar="FLOAT",
dest="pcutoff",
type=float,
help="specify the p value cutoff to draw on the Mahhatan plot")
plot_parser.add_argument("-fdrlevel",
metavar="FLOAT",
dest="fdrlevel",
type=float,
default=0.05,
help="specify at which level FDR will be applied")
plot_parser.add_argument("-qqtitle",
metavar="STR",
dest="qqtitle",
help="specify the title for QQ plot")
plot_parser.add_argument("-manx",
metavar="STR",
dest="manx",
help="specify xlab for manhattan plot")
plot_parser.add_argument("-manxlim",
metavar="STR",
dest="manxlim",
default="-",
help="an interval defined by min and max, sperated by comma, e.g. 19,27. Default=\"-\"")
plot_parser.add_argument("-mantitle",
metavar="STR",
dest="mantitle",
help="specify the title for Manhattan plot")
plot_mutual_group = plot_parser.add_mutually_exclusive_group(required=True)
plot_mutual_group.add_argument("-pdf",
dest="pdf",
action="store_true",
help="output qqplot in pdf format")
plot_mutual_group.add_argument("-png",
dest="png",
action="store_true",
help="output qqplot in pdf format. Probably not working!")
plot_parser.set_defaults(func=sz_plotting.making_plot)
usage = "Preparing VCF file from tests result file for snpEff"
prepVCF_parser = sub_parsers.add_parser("vcf", help=usage)
prepVCF_parser.add_argument("-i",
metavar="FILE",
dest="infile",
required="True",
help="test result file generated from poolseq_tk.py fisher or poolseq_tk.py cmh")
prepVCF_parser.add_argument("-o",
metavar="FILE",
dest="out",
help="output in VCF format.")
prepVCF_parser.add_argument("-samples",
metavar="LIST",
dest="samples",
default="table1,table2,table3,table4",
help="a list of sample names separated by comma")
prepVCF_parser.add_argument("-filter",
metavar="EXPR",
nargs='*',
dest="filters",
default=list(),
help="a set of filters to apply. Only support INFO field ratio, e.g. ratio>1")
prepVCF_parser.add_argument("-fst",
metavar="FILE",
dest="ifst",
help="a file of Fst values")
prepVCF_parser.set_defaults(func=sz_prepVCF.run_prepVCF)
adjust_parser = sub_parsers.add_parser("adjust", help="getting significant SNPs with FDR correction")
diff_parser = sub_parsers.add_parser("diff", help="get SNPs that significant in one replicate but not in the other")
usage = "Get overlaps of significant SNPs between replicates/pools"
overlap_parser = sub_parsers.add_parser("overlap", help=usage)
overlap_parser.add_argument("-a",
metavar="FILE",
dest="file_a",
help="significant SNPs identified from pool A")
overlap_parser.add_argument("-b",
metavar="FILE",
dest="file_b",
help="significant SNPs identified from pool B")
overlap_parser.add_argument("-o",
metavar="FILE",
dest="out",
help="output file of overlapion of significant SNPs identified from both pools")
overlap_parser.set_defaults(func=sz_overlap.run_overlap)
# adjust_parser.set_defaults(func=multi_testing_correction)
# diff_parser.set_defaults(func=call_diff)
return parser.parse_args()
def main():
args = getopts()
args.func(args)
if __name__ == "__main__":
main()
| gpl-2.0 |
scottcunningham/ansible | lib/ansible/plugins/action/pause.py | 57 | 5479 | # Copyright 2012, Tim Bielawa <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import sys
import time
from termios import tcflush, TCIFLUSH
from ansible.errors import *
from ansible.plugins.action import ActionBase
class ActionModule(ActionBase):
''' pauses execution for a length or time, or until input is received '''
PAUSE_TYPES = ['seconds', 'minutes', 'prompt', '']
BYPASS_HOST_LOOP = True
def run(self, tmp=None, task_vars=dict()):
''' run the pause action module '''
duration_unit = 'minutes'
prompt = None
seconds = None
result = dict(
changed = False,
rc = 0,
stderr = '',
stdout = '',
start = None,
stop = None,
delta = None,
)
# FIXME: not sure if we can get this info directly like this anymore?
#hosts = ', '.join(self.runner.host_set)
# Is 'args' empty, then this is the default prompted pause
if self._task.args is None or len(self._task.args.keys()) == 0:
pause_type = 'prompt'
#prompt = "[%s]\nPress enter to continue:\n" % hosts
prompt = "[%s]\nPress enter to continue:\n" % self._task.get_name().strip()
# Are 'minutes' or 'seconds' keys that exist in 'args'?
elif 'minutes' in self._task.args or 'seconds' in self._task.args:
try:
if 'minutes' in self._task.args:
pause_type = 'minutes'
# The time() command operates in seconds so we need to
# recalculate for minutes=X values.
seconds = int(self._task.args['minutes']) * 60
else:
pause_type = 'seconds'
seconds = int(self._task.args['seconds'])
duration_unit = 'seconds'
except ValueError as e:
return dict(failed=True, msg="non-integer value given for prompt duration:\n%s" % str(e))
# Is 'prompt' a key in 'args'?
elif 'prompt' in self._task.args:
pause_type = 'prompt'
#prompt = "[%s]\n%s:\n" % (hosts, self._task.args['prompt'])
prompt = "[%s]\n%s:\n" % (self._task.get_name().strip(), self._task.args['prompt'])
# I have no idea what you're trying to do. But it's so wrong.
else:
return dict(failed=True, msg="invalid pause type given. must be one of: %s" % ", ".join(self.PAUSE_TYPES))
#vv("created 'pause' ActionModule: pause_type=%s, duration_unit=%s, calculated_seconds=%s, prompt=%s" % \
# (self.pause_type, self.duration_unit, self.seconds, self.prompt))
########################################################################
# Begin the hard work!
start = time.time()
result['start'] = str(datetime.datetime.now())
# FIXME: this is all very broken right now, as prompting from the worker side
# is not really going to be supported, and actions marked as BYPASS_HOST_LOOP
# probably should not be run through the executor engine at all. Also, ctrl+c
# is now captured on the parent thread, so it can't be caught here via the
# KeyboardInterrupt exception.
try:
if not pause_type == 'prompt':
print("(^C-c = continue early, ^C-a = abort)")
#print("[%s]\nPausing for %s seconds" % (hosts, seconds))
print("[%s]\nPausing for %s seconds" % (self._task.get_name().strip(), seconds))
time.sleep(seconds)
else:
# Clear out any unflushed buffered input which would
# otherwise be consumed by raw_input() prematurely.
#tcflush(sys.stdin, TCIFLUSH)
result['user_input'] = raw_input(prompt.encode(sys.stdout.encoding))
except KeyboardInterrupt:
while True:
print('\nAction? (a)bort/(c)ontinue: ')
c = getch()
if c == 'c':
# continue playbook evaluation
break
elif c == 'a':
# abort further playbook evaluation
raise ae('user requested abort!')
finally:
duration = time.time() - start
result['stop'] = str(datetime.datetime.now())
result['delta'] = int(duration)
if duration_unit == 'minutes':
duration = round(duration / 60.0, 2)
else:
duration = round(duration, 2)
result['stdout'] = "Paused for %s %s" % (duration, duration_unit)
return result
| gpl-3.0 |
SnakeJenny/TensorFlow | tensorflow/python/ops/init_ops.py | 23 | 19144 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations often used for initializing tensors.
All variable initializers returned by functions in this file should have the
following signature:
def _initializer(shape, dtype=dtypes.float32, partition_info=None):
Args:
shape: List of `int` representing the shape of the output `Tensor`. Some
initializers may also be able to accept a `Tensor`.
dtype: (Optional) Type of the output `Tensor`.
partition_info: (Optional) variable_scope._PartitionInfo object holding
additional information about how the variable is partitioned. May be
`None` if the variable is not partitioned.
Returns:
A `Tensor` of type `dtype` and `shape`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import random_ops
class Initializer(object):
"""Initializer base class: all initializers inherit from this class.
"""
def __call__(self, shape, dtype=None, partition_info=None):
raise NotImplementedError
class Zeros(Initializer):
"""Initializer that generates tensors initialized to 0."""
def __init__(self, dtype=dtypes.float32):
self.dtype = dtype
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return constant_op.constant(False if dtype is dtypes.bool else 0,
dtype=dtype, shape=shape)
class Ones(Initializer):
"""Initializer that generates tensors initialized to 1."""
def __init__(self, dtype=dtypes.float32):
self.dtype = dtype
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return constant_op.constant(1, dtype=dtype, shape=shape)
class Constant(Initializer):
"""Initializer that generates tensors with constant values.
The resulting tensor is populated with values of type `dtype`, as
specified by arguments `value` following the desired `shape` of the
new tensor (see examples below).
The argument `value` can be a constant value, or a list of values of type
`dtype`. If `value` is a list, then the length of the list must be less
than or equal to the number of elements implied by the desired shape of the
tensor. In the case where the total number of elements in `value` is less
than the number of elements required by the tensor shape, the last element
in `value` will be used to fill the remaining entries. If the total number of
elements in `value` is greater than the number of elements required by the
tensor shape, the initializer will raise a `ValueError`.
Args:
value: A Python scalar, list of values, or a N-dimensional numpy array. All
elements of the initialized variable will be set to the corresponding
value in the `value` argument.
dtype: The data type.
verify_shape: Boolean that enables verification of the shape of `value`. If
`True`, the initializer will throw an error if the shape of `value` is not
compatible with the shape of the initialized tensor.
Examples:
The following example can be rewritten using a numpy.ndarray instead
of the `value` list, even reshaped, as shown in the two commented lines
below the `value` list initialization.
```python
>>> import numpy as np
>>> import tensorflow as tf
>>> value = [0, 1, 2, 3, 4, 5, 6, 7]
>>> # value = np.array(value)
>>> # value = value.reshape([2, 4])
>>> init = tf.constant_initializer(value)
>>> print('fitting shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[2, 4], initializer=init)
>>> x.initializer.run()
>>> print(x.eval())
fitting shape:
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]]
>>> print('larger shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[3, 4], initializer=init)
>>> x.initializer.run()
>>> print(x.eval())
larger shape:
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 7. 7. 7. 7.]]
>>> print('smaller shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[2, 3], initializer=init)
ValueError: Too many elements provided. Needed at most 6, but received 8
>>> print('shape verification:')
>>> init_verify = tf.constant_initializer(value, verify_shape=True)
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[3, 4], initializer=init_verify)
TypeError: Expected Tensor's shape: (3, 4), got (8,).
```
"""
def __init__(self, value=0, dtype=dtypes.float32, verify_shape=False):
self.value = value
self.dtype = dtype
self.verify_shape = verify_shape
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return constant_op.constant(self.value, dtype=dtype, shape=shape,
verify_shape=self.verify_shape)
class RandomUniform(Initializer):
"""Initializer that generates tensors with a uniform distribution.
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range
of random values to generate.
maxval: A python scalar or a scalar tensor. Upper bound of the range
of random values to generate. Defaults to 1 for float types.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type.
"""
def __init__(self, minval=0, maxval=None, seed=None, dtype=dtypes.float32):
self.minval = minval
self.maxval = maxval
self.seed = seed
self.dtype = dtype
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return random_ops.random_uniform(shape, self.minval, self.maxval,
dtype, seed=self.seed)
class RandomNormal(Initializer):
"""Initializer that generates tensors with a normal distribution.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, mean=0.0, stddev=1.0, seed=None, dtype=dtypes.float32):
self.mean = mean
self.stddev = stddev
self.seed = seed
self.dtype = _assert_float_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return random_ops.random_normal(shape, self.mean, self.stddev,
dtype, seed=self.seed)
class TruncatedNormal(Initializer):
"""Initializer that generates a truncated normal distribution.
These values are similar to values from a `random_normal_initializer`
except that values more than two standard deviations from the mean
are discarded and re-drawn. This is the recommended initializer for
neural network weights and filters.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, mean=0.0, stddev=1.0, seed=None, dtype=dtypes.float32):
self.mean = mean
self.stddev = stddev
self.seed = seed
self.dtype = _assert_float_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return random_ops.truncated_normal(shape, self.mean, self.stddev,
dtype, seed=self.seed)
class UniformUnitScaling(Initializer):
"""Initializer that generates tensors without scaling variance.
When initializing a deep network, it is in principle advantageous to keep
the scale of the input variance constant, so it does not explode or diminish
by reaching the final layer. If the input is `x` and the operation `x * W`,
and we want to initialize `W` uniformly at random, we need to pick `W` from
[-sqrt(3) / sqrt(dim), sqrt(3) / sqrt(dim)]
to keep the scale intact, where `dim = W.shape[0]` (the size of the input).
A similar calculation for convolutional networks gives an analogous result
with `dim` equal to the product of the first 3 dimensions. When
nonlinearities are present, we need to multiply this by a constant `factor`.
See [Sussillo et al., 2014](https://arxiv.org/abs/1412.6558)
([pdf](http://arxiv.org/pdf/1412.6558.pdf)) for deeper motivation, experiments
and the calculation of constants. In section 2.3 there, the constants were
numerically computed: for a linear layer it's 1.0, relu: ~1.43, tanh: ~1.15.
Args:
factor: Float. A multiplicative factor by which the values will be scaled.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, factor=1.0, seed=None, dtype=dtypes.float32):
self.factor = factor
self.seed = seed
self.dtype = _assert_float_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
input_size = 1.0
# Estimating input size is not possible to do perfectly, but we try.
# The estimate, obtained by multiplying all dimensions but the last one,
# is the right thing for matrix multiply and convolutions (see above).
for dim in scale_shape[:-1]:
input_size *= float(dim)
# Avoid errors when initializing zero-size tensors.
input_size = max(input_size, 1.0)
max_val = math.sqrt(3 / input_size) * self.factor
return random_ops.random_uniform(shape, -max_val, max_val,
dtype, seed=self.seed)
class VarianceScaling(Initializer):
"""Initializer capable of adapting its scale to the shape of weights tensors.
With `distribution="normal"`, samples are drawn from a truncated normal
distribution centered on zero, with `stddev = sqrt(scale / n)`
where n is:
- number of input units in the weight tensor, if mode = "fan_in"
- number of output units, if mode = "fan_out"
- average of the numbers of input and output units, if mode = "fan_avg"
With `distribution="uniform"`, samples are drawn from a uniform distribution
within [-limit, limit], with `limit = sqrt(3 * scale / n)`.
Arguments:
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "normal", "uniform".
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
Raises:
ValueError: In case of an invalid value for the "scale", mode" or
"distribution" arguments.
"""
def __init__(self, scale=1.0,
mode="fan_in",
distribution="normal",
seed=None,
dtype=dtypes.float32):
if scale <= 0.:
raise ValueError("`scale` must be positive float.")
if mode not in {"fan_in", "fan_out", "fan_avg"}:
raise ValueError("Invalid `mode` argument:", mode)
distribution = distribution.lower()
if distribution not in {"normal", "uniform"}:
raise ValueError("Invalid `distribution` argument:", distribution)
self.scale = scale
self.mode = mode
self.distribution = distribution
self.seed = seed
self.dtype = _assert_float_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
scale = self.scale
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
fan_in, fan_out = _compute_fans(scale_shape)
if self.mode == "fan_in":
scale /= max(1., fan_in)
elif self.mode == "fan_out":
scale /= max(1., fan_out)
else:
scale /= max(1., (fan_in + fan_out) / 2.)
if self.distribution == "normal":
stddev = math.sqrt(scale)
return random_ops.truncated_normal(shape, 0.0, stddev,
dtype, seed=self.seed)
else:
limit = math.sqrt(3.0 * scale)
return random_ops.random_uniform(shape, -limit, limit,
dtype, seed=self.seed)
class Orthogonal(Initializer):
"""Initializer that generates an orthogonal matrix.
If the shape of the tensor to initialize is two-dimensional, i is initialized
with an orthogonal matrix obtained from the singular value decomposition of a
matrix of uniform random numbers.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
Args:
gain: multiplicative factor to apply to the orthogonal matrix
dtype: The type of the output.
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
"""
def __init__(self, gain=1.0, dtype=dtypes.float32, seed=None):
self.gain = gain
self.dtype = _assert_float_dtype(dtype)
self.seed = seed
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize must be "
"at least two-dimensional")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (num_rows, num_cols)
# Generate a random matrix
a = random_ops.random_uniform(flat_shape, dtype=dtype, seed=self.seed)
# Compute the svd
_, u, v = linalg_ops.svd(a, full_matrices=False)
# Pick the appropriate singular value decomposition
if num_rows > num_cols:
q = u
else:
# Tensorflow departs from numpy conventions
# such that we need to transpose axes here
q = array_ops.transpose(v)
return self.gain * array_ops.reshape(q, shape)
# Aliases.
# pylint: disable=invalid-name
zeros_initializer = Zeros
ones_initializer = Ones
constant_initializer = Constant
random_uniform_initializer = RandomUniform
random_normal_initializer = RandomNormal
truncated_normal_initializer = TruncatedNormal
uniform_unit_scaling_initializer = UniformUnitScaling
variance_scaling_initializer = VarianceScaling
orthogonal_initializer = Orthogonal
# pylint: enable=invalid-name
def glorot_uniform_initializer(seed=None, dtype=dtypes.float32):
"""The Glorot uniform initializer, also called Xavier uniform initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Reference: http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
Arguments:
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer.
"""
return variance_scaling_initializer(scale=1.0,
mode="fan_avg",
distribution="uniform",
seed=seed,
dtype=dtype)
def glorot_normal_initializer(seed=None, dtype=dtypes.float32):
"""The Glorot normal initializer, also called Xavier normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Reference: http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
Arguments:
seed: A Python integer. Used to create random seeds. See
@{tf.set_random_seed}
for behavior.
dtype: The data type. Only floating point types are supported.
Returns:
An initializer.
"""
return variance_scaling_initializer(scale=1.0,
mode="fan_avg",
distribution="normal",
seed=seed,
dtype=dtype)
# Utility functions.
def _compute_fans(shape):
"""Computes the number of input and output units for a weight shape.
Arguments:
shape: Integer shape tuple or TF tensor shape.
Returns:
A tuple of scalars (fan_in, fan_out).
"""
if len(shape) < 1: # Just to avoid errors for constants.
fan_in = fan_out = 1
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
else:
# Assuming convolution kernels (2D, 3D, or more).
# kernel shape: (..., input_depth, depth)
receptive_field_size = 1.
for dim in shape[:-2]:
receptive_field_size *= dim
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return fan_in, fan_out
def _assert_float_dtype(dtype):
"""Validate and return floating point type based on `dtype`.
`dtype` must be a floating point type.
Args:
dtype: The data type to validate.
Returns:
Validated type.
Raises:
ValueError: if `dtype` is not a floating point type.
"""
if not dtype.is_floating:
raise ValueError("Expected floating point type, got %s." % dtype)
return dtype
| apache-2.0 |
Permutatrix/servo | tests/wpt/web-platform-tests/tools/pytest/_pytest/config.py | 166 | 45047 | """ command line options, ini-file and conftest.py processing. """
import argparse
import shlex
import traceback
import types
import warnings
import py
# DON't import pytest here because it causes import cycle troubles
import sys, os
import _pytest._code
import _pytest.hookspec # the extension point definitions
from _pytest._pluggy import PluginManager, HookimplMarker, HookspecMarker
hookimpl = HookimplMarker("pytest")
hookspec = HookspecMarker("pytest")
# pytest startup
#
class ConftestImportFailure(Exception):
def __init__(self, path, excinfo):
Exception.__init__(self, path, excinfo)
self.path = path
self.excinfo = excinfo
def main(args=None, plugins=None):
""" return exit code, after performing an in-process test run.
:arg args: list of command line arguments.
:arg plugins: list of plugin objects to be auto-registered during
initialization.
"""
try:
try:
config = _prepareconfig(args, plugins)
except ConftestImportFailure as e:
tw = py.io.TerminalWriter(sys.stderr)
for line in traceback.format_exception(*e.excinfo):
tw.line(line.rstrip(), red=True)
tw.line("ERROR: could not load %s\n" % (e.path), red=True)
return 4
else:
try:
config.pluginmanager.check_pending()
return config.hook.pytest_cmdline_main(config=config)
finally:
config._ensure_unconfigure()
except UsageError as e:
for msg in e.args:
sys.stderr.write("ERROR: %s\n" %(msg,))
return 4
class cmdline: # compatibility namespace
main = staticmethod(main)
class UsageError(Exception):
""" error in pytest usage or invocation"""
_preinit = []
default_plugins = (
"mark main terminal runner python pdb unittest capture skipping "
"tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript "
"junitxml resultlog doctest cacheprovider").split()
builtin_plugins = set(default_plugins)
builtin_plugins.add("pytester")
def _preloadplugins():
assert not _preinit
_preinit.append(get_config())
def get_config():
if _preinit:
return _preinit.pop(0)
# subsequent calls to main will create a fresh instance
pluginmanager = PytestPluginManager()
config = Config(pluginmanager)
for spec in default_plugins:
pluginmanager.import_plugin(spec)
return config
def get_plugin_manager():
"""
Obtain a new instance of the
:py:class:`_pytest.config.PytestPluginManager`, with default plugins
already loaded.
This function can be used by integration with other tools, like hooking
into pytest to run tests into an IDE.
"""
return get_config().pluginmanager
def _prepareconfig(args=None, plugins=None):
if args is None:
args = sys.argv[1:]
elif isinstance(args, py.path.local):
args = [str(args)]
elif not isinstance(args, (tuple, list)):
if not isinstance(args, str):
raise ValueError("not a string or argument list: %r" % (args,))
args = shlex.split(args)
config = get_config()
pluginmanager = config.pluginmanager
try:
if plugins:
for plugin in plugins:
if isinstance(plugin, py.builtin._basestring):
pluginmanager.consider_pluginarg(plugin)
else:
pluginmanager.register(plugin)
return pluginmanager.hook.pytest_cmdline_parse(
pluginmanager=pluginmanager, args=args)
except BaseException:
config._ensure_unconfigure()
raise
class PytestPluginManager(PluginManager):
"""
Overwrites :py:class:`pluggy.PluginManager` to add pytest-specific
functionality:
* loading plugins from the command line, ``PYTEST_PLUGIN`` env variable and
``pytest_plugins`` global variables found in plugins being loaded;
* ``conftest.py`` loading during start-up;
"""
def __init__(self):
super(PytestPluginManager, self).__init__("pytest", implprefix="pytest_")
self._conftest_plugins = set()
# state related to local conftest plugins
self._path2confmods = {}
self._conftestpath2mod = {}
self._confcutdir = None
self._noconftest = False
self.add_hookspecs(_pytest.hookspec)
self.register(self)
if os.environ.get('PYTEST_DEBUG'):
err = sys.stderr
encoding = getattr(err, 'encoding', 'utf8')
try:
err = py.io.dupfile(err, encoding=encoding)
except Exception:
pass
self.trace.root.setwriter(err.write)
self.enable_tracing()
def addhooks(self, module_or_class):
"""
.. deprecated:: 2.8
Use :py:meth:`pluggy.PluginManager.add_hookspecs` instead.
"""
warning = dict(code="I2",
fslocation=_pytest._code.getfslineno(sys._getframe(1)),
nodeid=None,
message="use pluginmanager.add_hookspecs instead of "
"deprecated addhooks() method.")
self._warn(warning)
return self.add_hookspecs(module_or_class)
def parse_hookimpl_opts(self, plugin, name):
# pytest hooks are always prefixed with pytest_
# so we avoid accessing possibly non-readable attributes
# (see issue #1073)
if not name.startswith("pytest_"):
return
# ignore some historic special names which can not be hooks anyway
if name == "pytest_plugins" or name.startswith("pytest_funcarg__"):
return
method = getattr(plugin, name)
opts = super(PytestPluginManager, self).parse_hookimpl_opts(plugin, name)
if opts is not None:
for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"):
opts.setdefault(name, hasattr(method, name))
return opts
def parse_hookspec_opts(self, module_or_class, name):
opts = super(PytestPluginManager, self).parse_hookspec_opts(
module_or_class, name)
if opts is None:
method = getattr(module_or_class, name)
if name.startswith("pytest_"):
opts = {"firstresult": hasattr(method, "firstresult"),
"historic": hasattr(method, "historic")}
return opts
def _verify_hook(self, hook, hookmethod):
super(PytestPluginManager, self)._verify_hook(hook, hookmethod)
if "__multicall__" in hookmethod.argnames:
fslineno = _pytest._code.getfslineno(hookmethod.function)
warning = dict(code="I1",
fslocation=fslineno,
nodeid=None,
message="%r hook uses deprecated __multicall__ "
"argument" % (hook.name))
self._warn(warning)
def register(self, plugin, name=None):
ret = super(PytestPluginManager, self).register(plugin, name)
if ret:
self.hook.pytest_plugin_registered.call_historic(
kwargs=dict(plugin=plugin, manager=self))
return ret
def getplugin(self, name):
# support deprecated naming because plugins (xdist e.g.) use it
return self.get_plugin(name)
def hasplugin(self, name):
"""Return True if the plugin with the given name is registered."""
return bool(self.get_plugin(name))
def pytest_configure(self, config):
# XXX now that the pluginmanager exposes hookimpl(tryfirst...)
# we should remove tryfirst/trylast as markers
config.addinivalue_line("markers",
"tryfirst: mark a hook implementation function such that the "
"plugin machinery will try to call it first/as early as possible.")
config.addinivalue_line("markers",
"trylast: mark a hook implementation function such that the "
"plugin machinery will try to call it last/as late as possible.")
def _warn(self, message):
kwargs = message if isinstance(message, dict) else {
'code': 'I1',
'message': message,
'fslocation': None,
'nodeid': None,
}
self.hook.pytest_logwarning.call_historic(kwargs=kwargs)
#
# internal API for local conftest plugin handling
#
def _set_initial_conftests(self, namespace):
""" load initial conftest files given a preparsed "namespace".
As conftest files may add their own command line options
which have arguments ('--my-opt somepath') we might get some
false positives. All builtin and 3rd party plugins will have
been loaded, however, so common options will not confuse our logic
here.
"""
current = py.path.local()
self._confcutdir = current.join(namespace.confcutdir, abs=True) \
if namespace.confcutdir else None
self._noconftest = namespace.noconftest
testpaths = namespace.file_or_dir
foundanchor = False
for path in testpaths:
path = str(path)
# remove node-id syntax
i = path.find("::")
if i != -1:
path = path[:i]
anchor = current.join(path, abs=1)
if exists(anchor): # we found some file object
self._try_load_conftest(anchor)
foundanchor = True
if not foundanchor:
self._try_load_conftest(current)
def _try_load_conftest(self, anchor):
self._getconftestmodules(anchor)
# let's also consider test* subdirs
if anchor.check(dir=1):
for x in anchor.listdir("test*"):
if x.check(dir=1):
self._getconftestmodules(x)
def _getconftestmodules(self, path):
if self._noconftest:
return []
try:
return self._path2confmods[path]
except KeyError:
if path.isfile():
clist = self._getconftestmodules(path.dirpath())
else:
# XXX these days we may rather want to use config.rootdir
# and allow users to opt into looking into the rootdir parent
# directories instead of requiring to specify confcutdir
clist = []
for parent in path.parts():
if self._confcutdir and self._confcutdir.relto(parent):
continue
conftestpath = parent.join("conftest.py")
if conftestpath.isfile():
mod = self._importconftest(conftestpath)
clist.append(mod)
self._path2confmods[path] = clist
return clist
def _rget_with_confmod(self, name, path):
modules = self._getconftestmodules(path)
for mod in reversed(modules):
try:
return mod, getattr(mod, name)
except AttributeError:
continue
raise KeyError(name)
def _importconftest(self, conftestpath):
try:
return self._conftestpath2mod[conftestpath]
except KeyError:
pkgpath = conftestpath.pypkgpath()
if pkgpath is None:
_ensure_removed_sysmodule(conftestpath.purebasename)
try:
mod = conftestpath.pyimport()
except Exception:
raise ConftestImportFailure(conftestpath, sys.exc_info())
self._conftest_plugins.add(mod)
self._conftestpath2mod[conftestpath] = mod
dirpath = conftestpath.dirpath()
if dirpath in self._path2confmods:
for path, mods in self._path2confmods.items():
if path and path.relto(dirpath) or path == dirpath:
assert mod not in mods
mods.append(mod)
self.trace("loaded conftestmodule %r" %(mod))
self.consider_conftest(mod)
return mod
#
# API for bootstrapping plugin loading
#
#
def consider_preparse(self, args):
for opt1,opt2 in zip(args, args[1:]):
if opt1 == "-p":
self.consider_pluginarg(opt2)
def consider_pluginarg(self, arg):
if arg.startswith("no:"):
name = arg[3:]
self.set_blocked(name)
if not name.startswith("pytest_"):
self.set_blocked("pytest_" + name)
else:
self.import_plugin(arg)
def consider_conftest(self, conftestmodule):
if self.register(conftestmodule, name=conftestmodule.__file__):
self.consider_module(conftestmodule)
def consider_env(self):
self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS"))
def consider_module(self, mod):
self._import_plugin_specs(getattr(mod, "pytest_plugins", None))
def _import_plugin_specs(self, spec):
if spec:
if isinstance(spec, str):
spec = spec.split(",")
for import_spec in spec:
self.import_plugin(import_spec)
def import_plugin(self, modname):
# most often modname refers to builtin modules, e.g. "pytester",
# "terminal" or "capture". Those plugins are registered under their
# basename for historic purposes but must be imported with the
# _pytest prefix.
assert isinstance(modname, str)
if self.get_plugin(modname) is not None:
return
if modname in builtin_plugins:
importspec = "_pytest." + modname
else:
importspec = modname
try:
__import__(importspec)
except ImportError as e:
new_exc = ImportError('Error importing plugin "%s": %s' % (modname, e))
# copy over name and path attributes
for attr in ('name', 'path'):
if hasattr(e, attr):
setattr(new_exc, attr, getattr(e, attr))
raise new_exc
except Exception as e:
import pytest
if not hasattr(pytest, 'skip') or not isinstance(e, pytest.skip.Exception):
raise
self._warn("skipped plugin %r: %s" %((modname, e.msg)))
else:
mod = sys.modules[importspec]
self.register(mod, modname)
self.consider_module(mod)
class Parser:
""" Parser for command line arguments and ini-file values.
:ivar extra_info: dict of generic param -> value to display in case
there's an error processing the command line arguments.
"""
def __init__(self, usage=None, processopt=None):
self._anonymous = OptionGroup("custom options", parser=self)
self._groups = []
self._processopt = processopt
self._usage = usage
self._inidict = {}
self._ininames = []
self.extra_info = {}
def processoption(self, option):
if self._processopt:
if option.dest:
self._processopt(option)
def getgroup(self, name, description="", after=None):
""" get (or create) a named option Group.
:name: name of the option group.
:description: long description for --help output.
:after: name of other group, used for ordering --help output.
The returned group object has an ``addoption`` method with the same
signature as :py:func:`parser.addoption
<_pytest.config.Parser.addoption>` but will be shown in the
respective group in the output of ``pytest. --help``.
"""
for group in self._groups:
if group.name == name:
return group
group = OptionGroup(name, description, parser=self)
i = 0
for i, grp in enumerate(self._groups):
if grp.name == after:
break
self._groups.insert(i+1, group)
return group
def addoption(self, *opts, **attrs):
""" register a command line option.
:opts: option names, can be short or long options.
:attrs: same attributes which the ``add_option()`` function of the
`argparse library
<http://docs.python.org/2/library/argparse.html>`_
accepts.
After command line parsing options are available on the pytest config
object via ``config.option.NAME`` where ``NAME`` is usually set
by passing a ``dest`` attribute, for example
``addoption("--long", dest="NAME", ...)``.
"""
self._anonymous.addoption(*opts, **attrs)
def parse(self, args, namespace=None):
from _pytest._argcomplete import try_argcomplete
self.optparser = self._getparser()
try_argcomplete(self.optparser)
return self.optparser.parse_args([str(x) for x in args], namespace=namespace)
def _getparser(self):
from _pytest._argcomplete import filescompleter
optparser = MyOptionParser(self, self.extra_info)
groups = self._groups + [self._anonymous]
for group in groups:
if group.options:
desc = group.description or group.name
arggroup = optparser.add_argument_group(desc)
for option in group.options:
n = option.names()
a = option.attrs()
arggroup.add_argument(*n, **a)
# bash like autocompletion for dirs (appending '/')
optparser.add_argument(FILE_OR_DIR, nargs='*').completer=filescompleter
return optparser
def parse_setoption(self, args, option, namespace=None):
parsedoption = self.parse(args, namespace=namespace)
for name, value in parsedoption.__dict__.items():
setattr(option, name, value)
return getattr(parsedoption, FILE_OR_DIR)
def parse_known_args(self, args, namespace=None):
"""parses and returns a namespace object with known arguments at this
point.
"""
return self.parse_known_and_unknown_args(args, namespace=namespace)[0]
def parse_known_and_unknown_args(self, args, namespace=None):
"""parses and returns a namespace object with known arguments, and
the remaining arguments unknown at this point.
"""
optparser = self._getparser()
args = [str(x) for x in args]
return optparser.parse_known_args(args, namespace=namespace)
def addini(self, name, help, type=None, default=None):
""" register an ini-file option.
:name: name of the ini-variable
:type: type of the variable, can be ``pathlist``, ``args``, ``linelist``
or ``bool``.
:default: default value if no ini-file option exists but is queried.
The value of ini-variables can be retrieved via a call to
:py:func:`config.getini(name) <_pytest.config.Config.getini>`.
"""
assert type in (None, "pathlist", "args", "linelist", "bool")
self._inidict[name] = (help, type, default)
self._ininames.append(name)
class ArgumentError(Exception):
"""
Raised if an Argument instance is created with invalid or
inconsistent arguments.
"""
def __init__(self, msg, option):
self.msg = msg
self.option_id = str(option)
def __str__(self):
if self.option_id:
return "option %s: %s" % (self.option_id, self.msg)
else:
return self.msg
class Argument:
"""class that mimics the necessary behaviour of optparse.Option """
_typ_map = {
'int': int,
'string': str,
}
# enable after some grace period for plugin writers
TYPE_WARN = False
def __init__(self, *names, **attrs):
"""store parms in private vars for use in add_argument"""
self._attrs = attrs
self._short_opts = []
self._long_opts = []
self.dest = attrs.get('dest')
if self.TYPE_WARN:
try:
help = attrs['help']
if '%default' in help:
warnings.warn(
'pytest now uses argparse. "%default" should be'
' changed to "%(default)s" ',
FutureWarning,
stacklevel=3)
except KeyError:
pass
try:
typ = attrs['type']
except KeyError:
pass
else:
# this might raise a keyerror as well, don't want to catch that
if isinstance(typ, py.builtin._basestring):
if typ == 'choice':
if self.TYPE_WARN:
warnings.warn(
'type argument to addoption() is a string %r.'
' For parsearg this is optional and when supplied '
' should be a type.'
' (options: %s)' % (typ, names),
FutureWarning,
stacklevel=3)
# argparse expects a type here take it from
# the type of the first element
attrs['type'] = type(attrs['choices'][0])
else:
if self.TYPE_WARN:
warnings.warn(
'type argument to addoption() is a string %r.'
' For parsearg this should be a type.'
' (options: %s)' % (typ, names),
FutureWarning,
stacklevel=3)
attrs['type'] = Argument._typ_map[typ]
# used in test_parseopt -> test_parse_defaultgetter
self.type = attrs['type']
else:
self.type = typ
try:
# attribute existence is tested in Config._processopt
self.default = attrs['default']
except KeyError:
pass
self._set_opt_strings(names)
if not self.dest:
if self._long_opts:
self.dest = self._long_opts[0][2:].replace('-', '_')
else:
try:
self.dest = self._short_opts[0][1:]
except IndexError:
raise ArgumentError(
'need a long or short option', self)
def names(self):
return self._short_opts + self._long_opts
def attrs(self):
# update any attributes set by processopt
attrs = 'default dest help'.split()
if self.dest:
attrs.append(self.dest)
for attr in attrs:
try:
self._attrs[attr] = getattr(self, attr)
except AttributeError:
pass
if self._attrs.get('help'):
a = self._attrs['help']
a = a.replace('%default', '%(default)s')
#a = a.replace('%prog', '%(prog)s')
self._attrs['help'] = a
return self._attrs
def _set_opt_strings(self, opts):
"""directly from optparse
might not be necessary as this is passed to argparse later on"""
for opt in opts:
if len(opt) < 2:
raise ArgumentError(
"invalid option string %r: "
"must be at least two characters long" % opt, self)
elif len(opt) == 2:
if not (opt[0] == "-" and opt[1] != "-"):
raise ArgumentError(
"invalid short option string %r: "
"must be of the form -x, (x any non-dash char)" % opt,
self)
self._short_opts.append(opt)
else:
if not (opt[0:2] == "--" and opt[2] != "-"):
raise ArgumentError(
"invalid long option string %r: "
"must start with --, followed by non-dash" % opt,
self)
self._long_opts.append(opt)
def __repr__(self):
retval = 'Argument('
if self._short_opts:
retval += '_short_opts: ' + repr(self._short_opts) + ', '
if self._long_opts:
retval += '_long_opts: ' + repr(self._long_opts) + ', '
retval += 'dest: ' + repr(self.dest) + ', '
if hasattr(self, 'type'):
retval += 'type: ' + repr(self.type) + ', '
if hasattr(self, 'default'):
retval += 'default: ' + repr(self.default) + ', '
if retval[-2:] == ', ': # always long enough to test ("Argument(" )
retval = retval[:-2]
retval += ')'
return retval
class OptionGroup:
def __init__(self, name, description="", parser=None):
self.name = name
self.description = description
self.options = []
self.parser = parser
def addoption(self, *optnames, **attrs):
""" add an option to this group.
if a shortened version of a long option is specified it will
be suppressed in the help. addoption('--twowords', '--two-words')
results in help showing '--two-words' only, but --twowords gets
accepted **and** the automatic destination is in args.twowords
"""
option = Argument(*optnames, **attrs)
self._addoption_instance(option, shortupper=False)
def _addoption(self, *optnames, **attrs):
option = Argument(*optnames, **attrs)
self._addoption_instance(option, shortupper=True)
def _addoption_instance(self, option, shortupper=False):
if not shortupper:
for opt in option._short_opts:
if opt[0] == '-' and opt[1].islower():
raise ValueError("lowercase shortoptions reserved")
if self.parser:
self.parser.processoption(option)
self.options.append(option)
class MyOptionParser(argparse.ArgumentParser):
def __init__(self, parser, extra_info=None):
if not extra_info:
extra_info = {}
self._parser = parser
argparse.ArgumentParser.__init__(self, usage=parser._usage,
add_help=False, formatter_class=DropShorterLongHelpFormatter)
# extra_info is a dict of (param -> value) to display if there's
# an usage error to provide more contextual information to the user
self.extra_info = extra_info
def parse_args(self, args=None, namespace=None):
"""allow splitting of positional arguments"""
args, argv = self.parse_known_args(args, namespace)
if argv:
for arg in argv:
if arg and arg[0] == '-':
lines = ['unrecognized arguments: %s' % (' '.join(argv))]
for k, v in sorted(self.extra_info.items()):
lines.append(' %s: %s' % (k, v))
self.error('\n'.join(lines))
getattr(args, FILE_OR_DIR).extend(argv)
return args
class DropShorterLongHelpFormatter(argparse.HelpFormatter):
"""shorten help for long options that differ only in extra hyphens
- collapse **long** options that are the same except for extra hyphens
- special action attribute map_long_option allows surpressing additional
long options
- shortcut if there are only two options and one of them is a short one
- cache result on action object as this is called at least 2 times
"""
def _format_action_invocation(self, action):
orgstr = argparse.HelpFormatter._format_action_invocation(self, action)
if orgstr and orgstr[0] != '-': # only optional arguments
return orgstr
res = getattr(action, '_formatted_action_invocation', None)
if res:
return res
options = orgstr.split(', ')
if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2):
# a shortcut for '-h, --help' or '--abc', '-a'
action._formatted_action_invocation = orgstr
return orgstr
return_list = []
option_map = getattr(action, 'map_long_option', {})
if option_map is None:
option_map = {}
short_long = {}
for option in options:
if len(option) == 2 or option[2] == ' ':
continue
if not option.startswith('--'):
raise ArgumentError('long optional argument without "--": [%s]'
% (option), self)
xxoption = option[2:]
if xxoption.split()[0] not in option_map:
shortened = xxoption.replace('-', '')
if shortened not in short_long or \
len(short_long[shortened]) < len(xxoption):
short_long[shortened] = xxoption
# now short_long has been filled out to the longest with dashes
# **and** we keep the right option ordering from add_argument
for option in options: #
if len(option) == 2 or option[2] == ' ':
return_list.append(option)
if option[2:] == short_long.get(option.replace('-', '')):
return_list.append(option.replace(' ', '='))
action._formatted_action_invocation = ', '.join(return_list)
return action._formatted_action_invocation
def _ensure_removed_sysmodule(modname):
try:
del sys.modules[modname]
except KeyError:
pass
class CmdOptions(object):
""" holds cmdline options as attributes."""
def __init__(self, values=()):
self.__dict__.update(values)
def __repr__(self):
return "<CmdOptions %r>" %(self.__dict__,)
def copy(self):
return CmdOptions(self.__dict__)
class Notset:
def __repr__(self):
return "<NOTSET>"
notset = Notset()
FILE_OR_DIR = 'file_or_dir'
class Config(object):
""" access to configuration values, pluginmanager and plugin hooks. """
def __init__(self, pluginmanager):
#: access to command line option as attributes.
#: (deprecated), use :py:func:`getoption() <_pytest.config.Config.getoption>` instead
self.option = CmdOptions()
_a = FILE_OR_DIR
self._parser = Parser(
usage="%%(prog)s [options] [%s] [%s] [...]" % (_a, _a),
processopt=self._processopt,
)
#: a pluginmanager instance
self.pluginmanager = pluginmanager
self.trace = self.pluginmanager.trace.root.get("config")
self.hook = self.pluginmanager.hook
self._inicache = {}
self._opt2dest = {}
self._cleanup = []
self._warn = self.pluginmanager._warn
self.pluginmanager.register(self, "pytestconfig")
self._configured = False
def do_setns(dic):
import pytest
setns(pytest, dic)
self.hook.pytest_namespace.call_historic(do_setns, {})
self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser))
def add_cleanup(self, func):
""" Add a function to be called when the config object gets out of
use (usually coninciding with pytest_unconfigure)."""
self._cleanup.append(func)
def _do_configure(self):
assert not self._configured
self._configured = True
self.hook.pytest_configure.call_historic(kwargs=dict(config=self))
def _ensure_unconfigure(self):
if self._configured:
self._configured = False
self.hook.pytest_unconfigure(config=self)
self.hook.pytest_configure._call_history = []
while self._cleanup:
fin = self._cleanup.pop()
fin()
def warn(self, code, message, fslocation=None):
""" generate a warning for this test session. """
self.hook.pytest_logwarning.call_historic(kwargs=dict(
code=code, message=message,
fslocation=fslocation, nodeid=None))
def get_terminal_writer(self):
return self.pluginmanager.get_plugin("terminalreporter")._tw
def pytest_cmdline_parse(self, pluginmanager, args):
# REF1 assert self == pluginmanager.config, (self, pluginmanager.config)
self.parse(args)
return self
def notify_exception(self, excinfo, option=None):
if option and option.fulltrace:
style = "long"
else:
style = "native"
excrepr = excinfo.getrepr(funcargs=True,
showlocals=getattr(option, 'showlocals', False),
style=style,
)
res = self.hook.pytest_internalerror(excrepr=excrepr,
excinfo=excinfo)
if not py.builtin.any(res):
for line in str(excrepr).split("\n"):
sys.stderr.write("INTERNALERROR> %s\n" %line)
sys.stderr.flush()
def cwd_relative_nodeid(self, nodeid):
# nodeid's are relative to the rootpath, compute relative to cwd
if self.invocation_dir != self.rootdir:
fullpath = self.rootdir.join(nodeid)
nodeid = self.invocation_dir.bestrelpath(fullpath)
return nodeid
@classmethod
def fromdictargs(cls, option_dict, args):
""" constructor useable for subprocesses. """
config = get_config()
config.option.__dict__.update(option_dict)
config.parse(args, addopts=False)
for x in config.option.plugins:
config.pluginmanager.consider_pluginarg(x)
return config
def _processopt(self, opt):
for name in opt._short_opts + opt._long_opts:
self._opt2dest[name] = opt.dest
if hasattr(opt, 'default') and opt.dest:
if not hasattr(self.option, opt.dest):
setattr(self.option, opt.dest, opt.default)
@hookimpl(trylast=True)
def pytest_load_initial_conftests(self, early_config):
self.pluginmanager._set_initial_conftests(early_config.known_args_namespace)
def _initini(self, args):
ns, unknown_args = self._parser.parse_known_and_unknown_args(args, namespace=self.option.copy())
r = determine_setup(ns.inifilename, ns.file_or_dir + unknown_args)
self.rootdir, self.inifile, self.inicfg = r
self._parser.extra_info['rootdir'] = self.rootdir
self._parser.extra_info['inifile'] = self.inifile
self.invocation_dir = py.path.local()
self._parser.addini('addopts', 'extra command line options', 'args')
self._parser.addini('minversion', 'minimally required pytest version')
def _preparse(self, args, addopts=True):
self._initini(args)
if addopts:
args[:] = shlex.split(os.environ.get('PYTEST_ADDOPTS', '')) + args
args[:] = self.getini("addopts") + args
self._checkversion()
self.pluginmanager.consider_preparse(args)
try:
self.pluginmanager.load_setuptools_entrypoints("pytest11")
except ImportError as e:
self.warn("I2", "could not load setuptools entry import: %s" % (e,))
self.pluginmanager.consider_env()
self.known_args_namespace = ns = self._parser.parse_known_args(args, namespace=self.option.copy())
if self.known_args_namespace.confcutdir is None and self.inifile:
confcutdir = py.path.local(self.inifile).dirname
self.known_args_namespace.confcutdir = confcutdir
try:
self.hook.pytest_load_initial_conftests(early_config=self,
args=args, parser=self._parser)
except ConftestImportFailure:
e = sys.exc_info()[1]
if ns.help or ns.version:
# we don't want to prevent --help/--version to work
# so just let is pass and print a warning at the end
self._warn("could not load initial conftests (%s)\n" % e.path)
else:
raise
def _checkversion(self):
import pytest
minver = self.inicfg.get('minversion', None)
if minver:
ver = minver.split(".")
myver = pytest.__version__.split(".")
if myver < ver:
raise pytest.UsageError(
"%s:%d: requires pytest-%s, actual pytest-%s'" %(
self.inicfg.config.path, self.inicfg.lineof('minversion'),
minver, pytest.__version__))
def parse(self, args, addopts=True):
# parse given cmdline arguments into this config object.
assert not hasattr(self, 'args'), (
"can only parse cmdline args at most once per Config object")
self._origargs = args
self.hook.pytest_addhooks.call_historic(
kwargs=dict(pluginmanager=self.pluginmanager))
self._preparse(args, addopts=addopts)
# XXX deprecated hook:
self.hook.pytest_cmdline_preparse(config=self, args=args)
args = self._parser.parse_setoption(args, self.option, namespace=self.option)
if not args:
cwd = os.getcwd()
if cwd == self.rootdir:
args = self.getini('testpaths')
if not args:
args = [cwd]
self.args = args
def addinivalue_line(self, name, line):
""" add a line to an ini-file option. The option must have been
declared but might not yet be set in which case the line becomes the
the first line in its value. """
x = self.getini(name)
assert isinstance(x, list)
x.append(line) # modifies the cached list inline
def getini(self, name):
""" return configuration value from an :ref:`ini file <inifiles>`. If the
specified name hasn't been registered through a prior
:py:func:`parser.addini <pytest.config.Parser.addini>`
call (usually from a plugin), a ValueError is raised. """
try:
return self._inicache[name]
except KeyError:
self._inicache[name] = val = self._getini(name)
return val
def _getini(self, name):
try:
description, type, default = self._parser._inidict[name]
except KeyError:
raise ValueError("unknown configuration value: %r" %(name,))
try:
value = self.inicfg[name]
except KeyError:
if default is not None:
return default
if type is None:
return ''
return []
if type == "pathlist":
dp = py.path.local(self.inicfg.config.path).dirpath()
l = []
for relpath in shlex.split(value):
l.append(dp.join(relpath, abs=True))
return l
elif type == "args":
return shlex.split(value)
elif type == "linelist":
return [t for t in map(lambda x: x.strip(), value.split("\n")) if t]
elif type == "bool":
return bool(_strtobool(value.strip()))
else:
assert type is None
return value
def _getconftest_pathlist(self, name, path):
try:
mod, relroots = self.pluginmanager._rget_with_confmod(name, path)
except KeyError:
return None
modpath = py.path.local(mod.__file__).dirpath()
l = []
for relroot in relroots:
if not isinstance(relroot, py.path.local):
relroot = relroot.replace("/", py.path.local.sep)
relroot = modpath.join(relroot, abs=True)
l.append(relroot)
return l
def getoption(self, name, default=notset, skip=False):
""" return command line option value.
:arg name: name of the option. You may also specify
the literal ``--OPT`` option instead of the "dest" option name.
:arg default: default value if no option of that name exists.
:arg skip: if True raise pytest.skip if option does not exists
or has a None value.
"""
name = self._opt2dest.get(name, name)
try:
val = getattr(self.option, name)
if val is None and skip:
raise AttributeError(name)
return val
except AttributeError:
if default is not notset:
return default
if skip:
import pytest
pytest.skip("no %r option found" %(name,))
raise ValueError("no option named %r" % (name,))
def getvalue(self, name, path=None):
""" (deprecated, use getoption()) """
return self.getoption(name)
def getvalueorskip(self, name, path=None):
""" (deprecated, use getoption(skip=True)) """
return self.getoption(name, skip=True)
def exists(path, ignore=EnvironmentError):
try:
return path.check()
except ignore:
return False
def getcfg(args, inibasenames):
args = [x for x in args if not str(x).startswith("-")]
if not args:
args = [py.path.local()]
for arg in args:
arg = py.path.local(arg)
for base in arg.parts(reverse=True):
for inibasename in inibasenames:
p = base.join(inibasename)
if exists(p):
iniconfig = py.iniconfig.IniConfig(p)
if 'pytest' in iniconfig.sections:
return base, p, iniconfig['pytest']
elif inibasename == "pytest.ini":
# allowed to be empty
return base, p, {}
return None, None, None
def get_common_ancestor(args):
# args are what we get after early command line parsing (usually
# strings, but can be py.path.local objects as well)
common_ancestor = None
for arg in args:
if str(arg)[0] == "-":
continue
p = py.path.local(arg)
if common_ancestor is None:
common_ancestor = p
else:
if p.relto(common_ancestor) or p == common_ancestor:
continue
elif common_ancestor.relto(p):
common_ancestor = p
else:
shared = p.common(common_ancestor)
if shared is not None:
common_ancestor = shared
if common_ancestor is None:
common_ancestor = py.path.local()
elif not common_ancestor.isdir():
common_ancestor = common_ancestor.dirpath()
return common_ancestor
def determine_setup(inifile, args):
if inifile:
iniconfig = py.iniconfig.IniConfig(inifile)
try:
inicfg = iniconfig["pytest"]
except KeyError:
inicfg = None
rootdir = get_common_ancestor(args)
else:
ancestor = get_common_ancestor(args)
rootdir, inifile, inicfg = getcfg(
[ancestor], ["pytest.ini", "tox.ini", "setup.cfg"])
if rootdir is None:
for rootdir in ancestor.parts(reverse=True):
if rootdir.join("setup.py").exists():
break
else:
rootdir = ancestor
return rootdir, inifile, inicfg or {}
def setns(obj, dic):
import pytest
for name, value in dic.items():
if isinstance(value, dict):
mod = getattr(obj, name, None)
if mod is None:
modname = "pytest.%s" % name
mod = types.ModuleType(modname)
sys.modules[modname] = mod
mod.__all__ = []
setattr(obj, name, mod)
obj.__all__.append(name)
setns(mod, value)
else:
setattr(obj, name, value)
obj.__all__.append(name)
#if obj != pytest:
# pytest.__all__.append(name)
setattr(pytest, name, value)
def create_terminal_writer(config, *args, **kwargs):
"""Create a TerminalWriter instance configured according to the options
in the config object. Every code which requires a TerminalWriter object
and has access to a config object should use this function.
"""
tw = py.io.TerminalWriter(*args, **kwargs)
if config.option.color == 'yes':
tw.hasmarkup = True
if config.option.color == 'no':
tw.hasmarkup = False
return tw
def _strtobool(val):
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
.. note:: copied from distutils.util
"""
val = val.lower()
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return 1
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return 0
else:
raise ValueError("invalid truth value %r" % (val,))
| mpl-2.0 |
josephnoir/RIOT | cpu/esp32/gen_esp32part.py | 15 | 17009 | #!/usr/bin/env python
#
# ESP32 partition table generation tool
#
# Converts partition tables to/from CSV and binary formats.
#
# See http://esp-idf.readthedocs.io/en/latest/api-guides/partition-tables.html
# for explanation of partition table structure and uses.
#
# Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division
import argparse
import os
import re
import struct
import sys
import hashlib
import binascii
MAX_PARTITION_LENGTH = 0xC00 # 3K for partition data (96 entries) leaves 1K in a 4K sector for signature
SHA256_PARTITION_BEGIN = b"\xEB\xEB" + b"\xFF" * 14 # The first 2 bytes are like magic numbers for SHA256 sum
__version__ = '1.0'
quiet = False
sha256sum = True
def status(msg):
""" Print status message to stderr """
if not quiet:
critical(msg)
def critical(msg):
""" Print critical message to stderr """
if not quiet:
sys.stderr.write(msg)
sys.stderr.write('\n')
class PartitionTable(list):
def __init__(self):
super(PartitionTable, self).__init__(self)
@classmethod
def from_csv(cls, csv_contents):
res = PartitionTable()
lines = csv_contents.splitlines()
def expand_vars(f):
f = os.path.expandvars(f)
m = re.match(r'(?<!\\)\$([A-Za-z_][A-Za-z0-9_]*)', f)
if m:
raise InputError("unknown variable '%s'" % m.group(1))
return f
line_num = range(len(lines))
for line_no in line_num:
line = expand_vars(lines[line_no]).strip()
if line.startswith("#") or len(line) == 0:
continue
try:
res.append(PartitionDefinition.from_csv(line))
except InputError as e:
raise InputError("Error at line %d: %s" % (line_no+1, e))
except Exception:
critical("Unexpected error parsing line %d: %s" % (line_no+1, line))
raise
# fix up missing offsets & negative sizes
last_end = 0x5000 # first offset after partition table
for e in res:
if e.offset is None:
pad_to = 0x10000 if e.type == PartitionDefinition.APP_TYPE else 4
if last_end % pad_to != 0:
last_end += pad_to - (last_end % pad_to)
e.offset = last_end
if e.size < 0:
e.size = -e.size - e.offset
last_end = e.offset + e.size
return res
def __getitem__(self, item):
""" Allow partition table access via name as well as by
numeric index. """
if isinstance(item, str):
for x in self:
if x.name == item:
return x
raise ValueError("No partition entry named '%s'" % item)
else:
return super(PartitionTable, self).__getitem__(item)
def verify(self):
# verify each partition individually
for p in self:
p.verify()
# check for overlaps
last = None
for p in sorted(self, key=lambda x: x.offset):
if p.offset < 0x5000:
raise InputError("Partition offset 0x%x is below 0x5000" % p.offset)
if last is not None and p.offset < last.offset + last.size:
raise InputError("Partition at 0x%x overlaps 0x%x-0x%x" % (p.offset,
last.offset,
last.offset+last.size-1))
last = p
def flash_size(self):
""" Return the size that partitions will occupy in flash
(ie the offset the last partition ends at)
"""
try:
last = sorted(self, reverse=True)[0]
except IndexError:
return 0 # empty table!
return last.offset + last.size
@classmethod
def from_binary(cls, b):
sha256 = hashlib.sha256()
result = cls()
for o in range(0, len(b), 32):
data = b[o:o+32]
if len(data) != 32:
raise InputError("Partition table length must be a multiple of 32 bytes")
if data == b'\xFF'*32:
return result # got end marker
if sha256sum and data[:2] == SHA256_PARTITION_BEGIN[:2]: # check only the magic number part
if data[16:] == sha256.digest():
continue # the next iteration will check for the end marker
else:
raise InputError("SHA256 checksums don't match! "
"(computed: 0x%s, parsed: 0x%s)" % (sha256.hexdigest(),
binascii.hexlify(data[16:])))
else:
sha256.update(data)
result.append(PartitionDefinition.from_binary(data))
raise InputError("Partition table is missing an end-of-table marker")
def to_binary(self):
result = b"".join(e.to_binary() for e in self)
# to satisfy Cadacy, was: if sha256sum:
# to satisfy Cadacy, was: result += SHA256_PARTITION_BEGIN + hashlib.sha256(result).digest()
if sha256sum:
result += SHA256_PARTITION_BEGIN + hashlib.sha256(result).digest()
if len(result) >= MAX_PARTITION_LENGTH:
raise InputError("Binary partition table length (%d) longer than max" % len(result))
result += b"\xFF" * (MAX_PARTITION_LENGTH - len(result)) # pad the sector, for signing
return result
def to_csv(self, simple_formatting=False):
rows = ["# Espressif ESP32 Partition Table",
"# Name, Type, SubType, Offset, Size, Flags"]
rows += [x.to_csv(simple_formatting) for x in self]
return "\n".join(rows) + "\n"
class PartitionDefinition(object):
APP_TYPE = 0x00
DATA_TYPE = 0x01
TYPES = {
"app": APP_TYPE,
"data": DATA_TYPE,
}
# Keep this map in sync with esp_partition_subtype_t enum in esp_partition.h
SUBTYPES = {
APP_TYPE: {
"factory": 0x00,
"test": 0x20,
},
DATA_TYPE: {
"ota": 0x00,
"phy": 0x01,
"nvs": 0x02,
"coredump": 0x03,
"esphttpd": 0x80,
"fat": 0x81,
"spiffs": 0x82,
},
}
MAGIC_BYTES = b"\xAA\x50"
ALIGNMENT = {
APP_TYPE: 0x10000,
DATA_TYPE: 0x04,
}
# dictionary maps flag name (as used in CSV flags list, property name)
# to bit set in flags words in binary format
FLAGS = {
"encrypted": 0
}
# add subtypes for the 16 OTA slot values ("ota_XXX, etc.")
for ota_slot in range(16):
SUBTYPES[TYPES["app"]]["ota_%d" % ota_slot] = 0x10 + ota_slot
def __init__(self):
self.name = ""
self.type = None
self.subtype = None
self.offset = None
self.size = None
self.encrypted = False
@classmethod
def from_csv(cls, line):
""" Parse a line from the CSV """
line_w_defaults = line + ",,,," # lazy way to support default fields
fields = [f.strip() for f in line_w_defaults.split(",")]
res = PartitionDefinition()
res.name = fields[0]
res.type = res.parse_type(fields[1])
res.subtype = res.parse_subtype(fields[2])
res.offset = res.parse_address(fields[3])
res.size = res.parse_address(fields[4])
if res.size is None:
raise InputError("Size field can't be empty")
flags = fields[5].split(":")
for flag in flags:
if flag in cls.FLAGS:
setattr(res, flag, True)
elif len(flag) > 0:
raise InputError("CSV flag column contains unknown flag '%s'" % (flag))
return res
def __eq__(self, other):
return self.name == other.name and self.type == other.type \
and self.subtype == other.subtype and self.offset == other.offset \
and self.size == other.size
def __repr__(self):
def maybe_hex(x):
return "0x%x" % x if x is not None else "None"
return "PartitionDefinition('%s', 0x%x, 0x%x, %s, %s)" % (self.name, self.type,
self.subtype or 0,
maybe_hex(self.offset),
maybe_hex(self.size))
def __str__(self):
return "Part '%s' %d/%d @ 0x%x size 0x%x" % (self.name, self.type,
self.subtype, self.offset or -1,
self.size or -1)
def __cmp__(self, other):
return self.offset - other.offset
def parse_type(self, strval):
if strval == "":
raise InputError("Field 'type' can't be left empty.")
return parse_int(strval, self.TYPES)
def parse_subtype(self, strval):
if strval == "":
return 0 # default
return parse_int(strval, self.SUBTYPES.get(self.type, {}))
@classmethod
def parse_address(cls, strval):
if strval == "":
return None # PartitionTable will fill in default
return parse_int(strval, {})
def verify(self):
if self.type is None:
raise ValidationError(self, "Type field is not set")
if self.subtype is None:
raise ValidationError(self, "Subtype field is not set")
if self.offset is None:
raise ValidationError(self, "Offset field is not set")
align = self.ALIGNMENT.get(self.type, 4)
if self.offset % align:
raise ValidationError(self, "Offset 0x%x is not aligned to 0x%x" % (self.offset, align))
if self.size is None:
raise ValidationError(self, "Size field is not set")
STRUCT_FORMAT = "<2sBBLL16sL"
@classmethod
def from_binary(cls, b):
if len(b) != 32:
raise InputError("Partition definition length must be exactly 32 bytes. Got %d bytes." % len(b))
res = cls()
(magic, res.type, res.subtype, res.offset,
res.size, res.name, flags) = struct.unpack(cls.STRUCT_FORMAT, b)
if b"\x00" in res.name: # strip null byte padding from name string
res.name = res.name[:res.name.index(b"\x00")]
res.name = res.name.decode()
if magic != cls.MAGIC_BYTES:
raise InputError("Invalid magic bytes (%r) for partition definition" % magic)
for flag, bit in cls.FLAGS.items():
if flags & (1 << bit):
setattr(res, flag, True)
flags &= ~(1 << bit)
if flags != 0:
critical("WARNING: Partition definition had unknown flag(s) 0x%08x. Newer binary format?" % flags)
return res
def get_flags_list(self):
return [flag for flag in self.FLAGS.keys() if getattr(self, flag)]
def to_binary(self):
flags = sum((1 << self.FLAGS[flag]) for flag in self.get_flags_list())
return struct.pack(self.STRUCT_FORMAT,
self.MAGIC_BYTES,
self.type, self.subtype,
self.offset, self.size,
self.name.encode(),
flags)
def to_csv(self, simple_formatting=False):
def addr_format(a, include_sizes):
if not simple_formatting and include_sizes:
for (val, suffix) in [(0x100000, "M"), (0x400, "K")]:
if a % val == 0:
return "%d%s" % (a // val, suffix)
return "0x%x" % a
def lookup_keyword(t, keywords):
for k, v in keywords.items():
if simple_formatting is False and t == v:
return k
return "%d" % t
def generate_text_flags():
""" colon-delimited list of flags """
return ":".join(self.get_flags_list())
return ",".join([self.name,
lookup_keyword(self.type, self.TYPES),
lookup_keyword(self.subtype, self.SUBTYPES.get(self.type, {})),
addr_format(self.offset, False),
addr_format(self.size, True),
generate_text_flags()])
def parse_int(v, keywords):
"""Generic parser for integer fields - int(x,0) with provision for
k/m/K/M suffixes and 'keyword' value lookup.
"""
try:
for letter, multiplier in [("k", 1024), ("m", 1024*1024)]:
if v.lower().endswith(letter):
return parse_int(v[:-1], keywords) * multiplier
return int(v, 0)
except ValueError:
if len(keywords) == 0:
raise InputError("Invalid field value %s" % v)
try:
return keywords[v.lower()]
except KeyError:
raise InputError("Value '%s' is not valid. Known keywords: %s" % (v, ", ".join(keywords)))
def main():
global quiet
global sha256sum
parser = argparse.ArgumentParser(description='ESP32 partition table utility')
parser.add_argument('--flash-size',
help='Optional flash size limit, checks partition table fits in flash',
nargs='?', choices=['1MB', '2MB', '4MB', '8MB', '16MB'])
parser.add_argument('--disable-sha256sum', help='Disable sha256 checksum for the partition table',
default=False, action='store_true')
parser.add_argument('--verify', '-v', help='Verify partition table fields',
default=True, action='store_false')
parser.add_argument('--quiet', '-q', help="Don't print status messages to stderr",
action='store_true')
parser.add_argument('input',
help='Path to CSV or binary file to parse. Will use stdin if omitted.',
type=argparse.FileType('rb'), default=sys.stdin)
parser.add_argument('output', help='Path to output converted binary or CSV file. Will use '
'stdout if omitted, unless the --display argument is also passed (in '
'which case only the summary is printed.)',
nargs='?',
default='-')
args = parser.parse_args()
quiet = args.quiet
sha256sum = not args.disable_sha256sum
input_arg = args.input.read()
input_is_binary = input_arg[0:2] == PartitionDefinition.MAGIC_BYTES
if input_is_binary:
status("Parsing binary partition input...")
table = PartitionTable.from_binary(input_arg)
else:
input_arg = input_arg.decode()
status("Parsing CSV input...")
table = PartitionTable.from_csv(input_arg)
if args.verify:
status("Verifying table...")
table.verify()
if args.flash_size:
size_mb = int(args.flash_size.replace("MB", ""))
size = size_mb * 1024 * 1024 # flash memory uses honest megabytes!
table_size = table.flash_size()
if size < table_size:
raise InputError("Partitions defined in '%s' occupy %.1fMB of flash (%d bytes) which "
"does not fit in configured flash size %dMB. Change the flash size "
"in menuconfig under the 'Serial Flasher Config' menu." %
(args.input.name, table_size / 1024.0 / 1024.0, table_size, size_mb))
if input_is_binary:
output = table.to_csv()
with sys.stdout if args.output == '-' else open(args.output, 'w') as f:
f.write(output)
else:
output = table.to_binary()
with sys.stdout.buffer if args.output == '-' else open(args.output, 'wb') as f:
f.write(output)
class InputError(RuntimeError):
def __init__(self, e):
super(InputError, self).__init__(e)
class ValidationError(InputError):
def __init__(self, partition, message):
super(ValidationError, self).__init__(
"Partition %s invalid: %s" % (partition.name, message))
if __name__ == '__main__':
try:
main()
except InputError as e:
print(e, file=sys.stderr)
sys.exit(2)
| lgpl-2.1 |
mfalcon/edujango | edujango/static/admin/js/compress.py | 784 | 1896 | #!/usr/bin/env python
import os
import optparse
import subprocess
import sys
here = os.path.dirname(__file__)
def main():
usage = "usage: %prog [file1..fileN]"
description = """With no file paths given this script will automatically
compress all jQuery-based files of the admin app. Requires the Google Closure
Compiler library and Java version 6 or later."""
parser = optparse.OptionParser(usage, description=description)
parser.add_option("-c", dest="compiler", default="~/bin/compiler.jar",
help="path to Closure Compiler jar file")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose")
parser.add_option("-q", "--quiet",
action="store_false", dest="verbose")
(options, args) = parser.parse_args()
compiler = os.path.expanduser(options.compiler)
if not os.path.exists(compiler):
sys.exit("Google Closure compiler jar file %s not found. Please use the -c option to specify the path." % compiler)
if not args:
if options.verbose:
sys.stdout.write("No filenames given; defaulting to admin scripts\n")
args = [os.path.join(here, f) for f in [
"actions.js", "collapse.js", "inlines.js", "prepopulate.js"]]
for arg in args:
if not arg.endswith(".js"):
arg = arg + ".js"
to_compress = os.path.expanduser(arg)
if os.path.exists(to_compress):
to_compress_min = "%s.min.js" % "".join(arg.rsplit(".js"))
cmd = "java -jar %s --js %s --js_output_file %s" % (compiler, to_compress, to_compress_min)
if options.verbose:
sys.stdout.write("Running: %s\n" % cmd)
subprocess.call(cmd.split())
else:
sys.stdout.write("File %s not found. Sure it exists?\n" % to_compress)
if __name__ == '__main__':
main()
| apache-2.0 |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/numpy/distutils/from_template.py | 20 | 7826 | #!/usr/bin/python
"""
process_file(filename)
takes templated file .xxx.src and produces .xxx file where .xxx
is .pyf .f90 or .f using the following template rules:
'<..>' denotes a template.
All function and subroutine blocks in a source file with names that
contain '<..>' will be replicated according to the rules in '<..>'.
The number of comma-separated words in '<..>' will determine the number of
replicates.
'<..>' may have two different forms, named and short. For example,
named:
<p=d,s,z,c> where anywhere inside a block '<p>' will be replaced with
'd', 's', 'z', and 'c' for each replicate of the block.
<_c> is already defined: <_c=s,d,c,z>
<_t> is already defined: <_t=real,double precision,complex,double complex>
short:
<s,d,c,z>, a short form of the named, useful when no <p> appears inside
a block.
In general, '<..>' contains a comma separated list of arbitrary
expressions. If these expression must contain a comma|leftarrow|rightarrow,
then prepend the comma|leftarrow|rightarrow with a backslash.
If an expression matches '\\<index>' then it will be replaced
by <index>-th expression.
Note that all '<..>' forms in a block must have the same number of
comma-separated entries.
Predefined named template rules:
<prefix=s,d,c,z>
<ftype=real,double precision,complex,double complex>
<ftypereal=real,double precision,\\0,\\1>
<ctype=float,double,complex_float,complex_double>
<ctypereal=float,double,\\0,\\1>
"""
from __future__ import division, absolute_import, print_function
__all__ = ['process_str', 'process_file']
import os
import sys
import re
routine_start_re = re.compile(r'(\n|\A)(( (\$|\*))|)\s*(subroutine|function)\b', re.I)
routine_end_re = re.compile(r'\n\s*end\s*(subroutine|function)\b.*(\n|\Z)', re.I)
function_start_re = re.compile(r'\n (\$|\*)\s*function\b', re.I)
def parse_structure(astr):
""" Return a list of tuples for each function or subroutine each
tuple is the start and end of a subroutine or function to be
expanded.
"""
spanlist = []
ind = 0
while True:
m = routine_start_re.search(astr, ind)
if m is None:
break
start = m.start()
if function_start_re.match(astr, start, m.end()):
while True:
i = astr.rfind('\n', ind, start)
if i==-1:
break
start = i
if astr[i:i+7]!='\n $':
break
start += 1
m = routine_end_re.search(astr, m.end())
ind = end = m and m.end()-1 or len(astr)
spanlist.append((start, end))
return spanlist
template_re = re.compile(r"<\s*(\w[\w\d]*)\s*>")
named_re = re.compile(r"<\s*(\w[\w\d]*)\s*=\s*(.*?)\s*>")
list_re = re.compile(r"<\s*((.*?))\s*>")
def find_repl_patterns(astr):
reps = named_re.findall(astr)
names = {}
for rep in reps:
name = rep[0].strip() or unique_key(names)
repl = rep[1].replace(r'\,', '@comma@')
thelist = conv(repl)
names[name] = thelist
return names
item_re = re.compile(r"\A\\(?P<index>\d+)\Z")
def conv(astr):
b = astr.split(',')
l = [x.strip() for x in b]
for i in range(len(l)):
m = item_re.match(l[i])
if m:
j = int(m.group('index'))
l[i] = l[j]
return ','.join(l)
def unique_key(adict):
""" Obtain a unique key given a dictionary."""
allkeys = list(adict.keys())
done = False
n = 1
while not done:
newkey = '__l%s' % (n)
if newkey in allkeys:
n += 1
else:
done = True
return newkey
template_name_re = re.compile(r'\A\s*(\w[\w\d]*)\s*\Z')
def expand_sub(substr, names):
substr = substr.replace(r'\>', '@rightarrow@')
substr = substr.replace(r'\<', '@leftarrow@')
lnames = find_repl_patterns(substr)
substr = named_re.sub(r"<\1>", substr) # get rid of definition templates
def listrepl(mobj):
thelist = conv(mobj.group(1).replace(r'\,', '@comma@'))
if template_name_re.match(thelist):
return "<%s>" % (thelist)
name = None
for key in lnames.keys(): # see if list is already in dictionary
if lnames[key] == thelist:
name = key
if name is None: # this list is not in the dictionary yet
name = unique_key(lnames)
lnames[name] = thelist
return "<%s>" % name
substr = list_re.sub(listrepl, substr) # convert all lists to named templates
# newnames are constructed as needed
numsubs = None
base_rule = None
rules = {}
for r in template_re.findall(substr):
if r not in rules:
thelist = lnames.get(r, names.get(r, None))
if thelist is None:
raise ValueError('No replicates found for <%s>' % (r))
if r not in names and not thelist.startswith('_'):
names[r] = thelist
rule = [i.replace('@comma@', ',') for i in thelist.split(',')]
num = len(rule)
if numsubs is None:
numsubs = num
rules[r] = rule
base_rule = r
elif num == numsubs:
rules[r] = rule
else:
print("Mismatch in number of replacements (base <%s=%s>)"
" for <%s=%s>. Ignoring." %
(base_rule, ','.join(rules[base_rule]), r, thelist))
if not rules:
return substr
def namerepl(mobj):
name = mobj.group(1)
return rules.get(name, (k+1)*[name])[k]
newstr = ''
for k in range(numsubs):
newstr += template_re.sub(namerepl, substr) + '\n\n'
newstr = newstr.replace('@rightarrow@', '>')
newstr = newstr.replace('@leftarrow@', '<')
return newstr
def process_str(allstr):
newstr = allstr
writestr = '' #_head # using _head will break free-format files
struct = parse_structure(newstr)
oldend = 0
names = {}
names.update(_special_names)
for sub in struct:
writestr += newstr[oldend:sub[0]]
names.update(find_repl_patterns(newstr[oldend:sub[0]]))
writestr += expand_sub(newstr[sub[0]:sub[1]], names)
oldend = sub[1]
writestr += newstr[oldend:]
return writestr
include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P<name>[\w\d./\\]+[.]src)['\"]", re.I)
def resolve_includes(source):
d = os.path.dirname(source)
fid = open(source)
lines = []
for line in fid:
m = include_src_re.match(line)
if m:
fn = m.group('name')
if not os.path.isabs(fn):
fn = os.path.join(d, fn)
if os.path.isfile(fn):
print('Including file', fn)
lines.extend(resolve_includes(fn))
else:
lines.append(line)
else:
lines.append(line)
fid.close()
return lines
def process_file(source):
lines = resolve_includes(source)
return process_str(''.join(lines))
_special_names = find_repl_patterns('''
<_c=s,d,c,z>
<_t=real,double precision,complex,double complex>
<prefix=s,d,c,z>
<ftype=real,double precision,complex,double complex>
<ctype=float,double,complex_float,complex_double>
<ftypereal=real,double precision,\\0,\\1>
<ctypereal=float,double,\\0,\\1>
''')
if __name__ == "__main__":
try:
file = sys.argv[1]
except IndexError:
fid = sys.stdin
outfile = sys.stdout
else:
fid = open(file, 'r')
(base, ext) = os.path.splitext(file)
newname = base
outfile = open(newname, 'w')
allstr = fid.read()
writestr = process_str(allstr)
outfile.write(writestr)
| apache-2.0 |
Audacity-Team/Audacity | lib-src/lv2/lv2/plugins/eg02-midigate.lv2/waflib/Tools/suncxx.py | 134 | 1459 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os
from waflib import Utils
from waflib.Tools import ccroot,ar
from waflib.Configure import conf
@conf
def find_sxx(conf):
v=conf.env
cc=None
if v['CXX']:cc=v['CXX']
elif'CXX'in conf.environ:cc=conf.environ['CXX']
if not cc:cc=conf.find_program('CC',var='CXX')
if not cc:cc=conf.find_program('c++',var='CXX')
if not cc:conf.fatal('Could not find a Sun C++ compiler')
cc=conf.cmd_to_list(cc)
try:
conf.cmd_and_log(cc+['-flags'])
except Exception:
conf.fatal('%r is not a Sun compiler'%cc)
v['CXX']=cc
v['CXX_NAME']='sun'
@conf
def sxx_common_flags(conf):
v=conf.env
v['CXX_SRC_F']=[]
v['CXX_TGT_F']=['-c','-o']
if not v['LINK_CXX']:v['LINK_CXX']=v['CXX']
v['CXXLNK_SRC_F']=[]
v['CXXLNK_TGT_F']=['-o']
v['CPPPATH_ST']='-I%s'
v['DEFINES_ST']='-D%s'
v['LIB_ST']='-l%s'
v['LIBPATH_ST']='-L%s'
v['STLIB_ST']='-l%s'
v['STLIBPATH_ST']='-L%s'
v['SONAME_ST']='-Wl,-h,%s'
v['SHLIB_MARKER']='-Bdynamic'
v['STLIB_MARKER']='-Bstatic'
v['cxxprogram_PATTERN']='%s'
v['CXXFLAGS_cxxshlib']=['-Kpic','-DPIC']
v['LINKFLAGS_cxxshlib']=['-G']
v['cxxshlib_PATTERN']='lib%s.so'
v['LINKFLAGS_cxxstlib']=['-Bstatic']
v['cxxstlib_PATTERN']='lib%s.a'
def configure(conf):
conf.find_sxx()
conf.find_ar()
conf.sxx_common_flags()
conf.cxx_load_tools()
conf.cxx_add_flags()
conf.link_add_flags()
| mit |
malena/bedrock | scripts/check_calendars.py | 18 | 1596 | #!/usr/bin/env python
import os
from icalendar import Calendar
def get_ics(filename):
return filename.endswith('ics')
def check_if_correct_parse(ics_file):
fh = open(ics_file, 'rb')
try:
# some calendars, such as Austrian ones have multiple
# vCalendar entries - we probably don't want them to fail
# parse. So we set multiple=True below
cal_entries = Calendar.from_ical(fh.read(), multiple=True)
if cal_entries is None:
raise ValueError
finally:
fh.close()
def run(*args):
calendars_dir = os.path.join('media','caldata')
ics_files = map(lambda x: os.path.join(calendars_dir, x),
filter(get_ics, os.listdir(calendars_dir)))
format_str = "Failed to parse the icalendar file: {}. {}"
check_failed = False
for f in ics_files:
try:
check_if_correct_parse(f)
except ValueError as ve:
check_failed = True
print format_str.format(f, ve.message)
if check_failed:
# Returning a positive error code, since we have nothing to do
# with these errors. They simply have to be reported back to
# caldata maintainers. Also, we have to return something
# other than zero - for travis to fail build over invalid files.
# Please see: http://docs.travis-ci.com/user/build-lifecycle/
# """
# When any of the steps in the script stage fails with a non-zero
# exit code, the build will be marked as failed.
# """
exit(1)
# vim: ts=4 sw=4 et ai
| mpl-2.0 |
LLNL/spack | lib/spack/spack/cmd/modules/lmod.py | 5 | 1702 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import functools
import os
import llnl.util.filesystem
import spack.cmd.common.arguments
import spack.cmd.modules
def add_command(parser, command_dict):
lmod_parser = parser.add_parser(
'lmod', help='manipulate hierarchical module files'
)
sp = spack.cmd.modules.setup_parser(lmod_parser)
# Set default module file for a package
setdefault_parser = sp.add_parser(
'setdefault', help='set the default module file for a package'
)
spack.cmd.common.arguments.add_common_arguments(
setdefault_parser, ['constraint']
)
callbacks = dict(spack.cmd.modules.callbacks.items())
callbacks['setdefault'] = setdefault
command_dict['lmod'] = functools.partial(
spack.cmd.modules.modules_cmd, module_type='lmod', callbacks=callbacks
)
def setdefault(module_type, specs, args):
"""Set the default module file, when multiple are present"""
# For details on the underlying mechanism see:
#
# https://lmod.readthedocs.io/en/latest/060_locating.html#marking-a-version-as-default
#
spack.cmd.modules.one_spec_or_raise(specs)
writer = spack.modules.module_types['lmod'](specs[0])
module_folder = os.path.dirname(writer.layout.filename)
module_basename = os.path.basename(writer.layout.filename)
with llnl.util.filesystem.working_dir(module_folder):
if os.path.exists('default') and os.path.islink('default'):
os.remove('default')
os.symlink(module_basename, 'default')
| lgpl-2.1 |
ming0627/foursquared.eclair | util/gen_parser.py | 262 | 4392 | #!/usr/bin/python
import datetime
import sys
import textwrap
import common
from xml.dom import pulldom
PARSER = """\
/**
* Copyright 2009 Joe LaPenna
*/
package com.joelapenna.foursquare.parsers;
import com.joelapenna.foursquare.Foursquare;
import com.joelapenna.foursquare.error.FoursquareError;
import com.joelapenna.foursquare.error.FoursquareParseException;
import com.joelapenna.foursquare.types.%(type_name)s;
import org.xmlpull.v1.XmlPullParser;
import org.xmlpull.v1.XmlPullParserException;
import java.io.IOException;
import java.util.logging.Level;
import java.util.logging.Logger;
/**
* Auto-generated: %(timestamp)s
*
* @author Joe LaPenna ([email protected])
* @param <T>
*/
public class %(type_name)sParser extends AbstractParser<%(type_name)s> {
private static final Logger LOG = Logger.getLogger(%(type_name)sParser.class.getCanonicalName());
private static final boolean DEBUG = Foursquare.PARSER_DEBUG;
@Override
public %(type_name)s parseInner(XmlPullParser parser) throws XmlPullParserException, IOException,
FoursquareError, FoursquareParseException {
parser.require(XmlPullParser.START_TAG, null, null);
%(type_name)s %(top_node_name)s = new %(type_name)s();
while (parser.nextTag() == XmlPullParser.START_TAG) {
String name = parser.getName();
%(stanzas)s
} else {
// Consume something we don't understand.
if (DEBUG) LOG.log(Level.FINE, "Found tag that we don't recognize: " + name);
skipSubTree(parser);
}
}
return %(top_node_name)s;
}
}"""
BOOLEAN_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(Boolean.valueOf(parser.nextText()));
"""
GROUP_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new GroupParser(new %(sub_parser_camel_case)s()).parse(parser));
"""
COMPLEX_STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(new %(parser_name)s().parse(parser));
"""
STANZA = """\
} else if ("%(name)s".equals(name)) {
%(top_node_name)s.set%(camel_name)s(parser.nextText());
"""
def main():
type_name, top_node_name, attributes = common.WalkNodesForAttributes(
sys.argv[1])
GenerateClass(type_name, top_node_name, attributes)
def GenerateClass(type_name, top_node_name, attributes):
"""generate it.
type_name: the type of object the parser returns
top_node_name: the name of the object the parser returns.
per common.WalkNodsForAttributes
"""
stanzas = []
for name in sorted(attributes):
typ, children = attributes[name]
replacements = Replacements(top_node_name, name, typ, children)
if typ == common.BOOLEAN:
stanzas.append(BOOLEAN_STANZA % replacements)
elif typ == common.GROUP:
stanzas.append(GROUP_STANZA % replacements)
elif typ in common.COMPLEX:
stanzas.append(COMPLEX_STANZA % replacements)
else:
stanzas.append(STANZA % replacements)
if stanzas:
# pop off the extranious } else for the first conditional stanza.
stanzas[0] = stanzas[0].replace('} else ', '', 1)
replacements = Replacements(top_node_name, name, typ, [None])
replacements['stanzas'] = '\n'.join(stanzas).strip()
print PARSER % replacements
def Replacements(top_node_name, name, typ, children):
# CameCaseClassName
type_name = ''.join([word.capitalize() for word in top_node_name.split('_')])
# CamelCaseClassName
camel_name = ''.join([word.capitalize() for word in name.split('_')])
# camelCaseLocalName
attribute_name = camel_name.lower().capitalize()
# mFieldName
field_name = 'm' + camel_name
if children[0]:
sub_parser_camel_case = children[0] + 'Parser'
else:
sub_parser_camel_case = (camel_name[:-1] + 'Parser')
return {
'type_name': type_name,
'name': name,
'top_node_name': top_node_name,
'camel_name': camel_name,
'parser_name': typ + 'Parser',
'attribute_name': attribute_name,
'field_name': field_name,
'typ': typ,
'timestamp': datetime.datetime.now(),
'sub_parser_camel_case': sub_parser_camel_case,
'sub_type': children[0]
}
if __name__ == '__main__':
main()
| apache-2.0 |
hknyldz/pisitools | pisilinux/pisilinux/cli/listnewest.py | 1 | 2965 | # -*- coding:utf-8 -*-
#
# Copyright (C) 2009, TUBITAK/UEKAE
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# Please read the COPYING file.
#
import optparse
import gettext
__trans = gettext.translation('pisilinux', fallback=True)
_ = __trans.ugettext
import pisilinux.cli.command as command
import pisilinux.context as ctx
import pisilinux.api
import pisilinux.db
class ListNewest(command.Command, metaclass=command.autocommand):
__doc__ = _("""List newest packages in the repositories
Usage: list-newest [ <repo1> <repo2> ... repon ]
Gives a list of pisilinux newly published packages in the specified
repositories. If no repository is specified, we list the new
packages from all repositories.
""")
def __init__(self, args):
super(ListNewest, self).__init__(args)
self.componentdb = pisilinux.db.componentdb.ComponentDB()
self.packagedb = pisilinux.db.packagedb.PackageDB()
name = ("list-newest", "ln")
def options(self):
group = optparse.OptionGroup(self.parser, _("list-newest options"))
group.add_option("-s", "--since", action="store",
default=None, help=_("List new packages added to repository after this given date formatted as yyyy-mm-dd"))
group.add_option("-l", "--last", action="store",
default=None, help=_("List new packages added to repository after last nth previous repository update"))
self.parser.add_option_group(group)
def run(self):
self.init(database = True, write = False)
if self.args:
for arg in self.args:
self.print_packages(arg)
else:
# print for all repos
for repo in pisilinux.api.list_repos():
self.print_packages(repo)
def print_packages(self, repo):
if ctx.config.get_option('since'):
since = ctx.config.get_option('since')
elif ctx.config.get_option('last'):
since = pisilinux.db.historydb.HistoryDB().get_last_repo_update(int(ctx.config.get_option('last')))
else:
since = None
l = pisilinux.api.list_newest(repo, since)
if not l:
return
if since:
ctx.ui.info(_("Packages added to %s since %s:\n") % (repo, since))
else:
ctx.ui.info(_("Packages added to %s:") % (repo))
# maxlen is defined dynamically from the longest package name (#9021)
maxlen = max([len(_p) for _p in l])
l.sort()
for p in l:
package = self.packagedb.get_package(p, repo)
lenp = len(p)
p = p + ' ' * max(0, maxlen - lenp)
ctx.ui.info('%s - %s ' % (p, str(package.summary)))
print()
| gpl-3.0 |
dictoon/blenderseed | logger.py | 2 | 1883 | #
# This source file is part of appleseed.
# Visit http://appleseedhq.net/ for additional information and resources.
#
# This software is released under the MIT license.
#
# Copyright (c) 2014-2018 The appleseedhq Organization
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import logging
import bpy
__logger = None
__mapping = {'debug': logging.DEBUG,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
def get_logger():
global __logger
if not __logger:
__logger = logging.getLogger(__name__)
__logger.addHandler(logging.StreamHandler())
log_level = bpy.context.preferences.addons['blenderseed'].preferences.log_level
__logger.setLevel(__mapping[log_level])
return __logger
def set_logger_level(level):
__logger.setLevel(__mapping[level])
| mit |
RasPlex/plex-home-theatre | plex/scripts/merge_translations.py | 5 | 2008 | #!/usr/bin/env python
import sys, os, shutil
lang_map = {
"af-ZA": "Afrikaans",
"cs-CZ": "Czech",
"da": "Danish",
"de": "German",
"en": "English (US)",
"es": "Spanish",
"es-419" : "Spanish (Argentina)",
"fi": "Finnish",
"fr": "French",
"grk": "Greek",
"he": "Hebrew",
"hr-HR": "Croatian",
"is-IS": "Icelandic",
"it": "Italian",
"ko": "Korean",
"lt": "Latvian",
"nl": "Dutch",
"no": "Norwegian",
"pl-PL": "Polish",
"pt-BR": "Portuguese (Brazil)",
"pt-PT": "Portuguese",
"ru": "Russian",
"sr": "Serbian",
"sv": "Swedish",
"zh-CN": "Chinese (Simple)"
}
if __name__ == "__main__":
if len(sys.argv) != 3:
print "Need two arguments"
sys.exit(1)
d = sys.argv[1]
dest = sys.argv[2]
if not os.path.isdir(d):
print "%s is not a dir!" % d
sys.exit(1)
if not os.path.isdir(dest):
print "%s is not a xbmc lang dir" % dest
sys.exit(1)
langdir = os.path.join(dest, "language")
skinlangdir = os.path.join(dest, "addons", "skin.plex", "language")
if not os.path.isdir(langdir) or not os.path.isdir(skinlangdir):
print "Can't find %s and %s" % (langdir, skinlangdir)
sys.exit(1)
for l in os.listdir(d):
if not l in lang_map:
print "Can't find mapping for %s" % l
continue
xlang = lang_map[l]
xlang += "_plex"
xlangfile = os.path.join(langdir, xlang, "strings.po")
xskinlangfile = os.path.join(skinlangdir, xlang, "strings.po")
ld = os.path.join(d, l)
pofile = os.path.join(ld, "strings_%s.po" % l)
spofile = os.path.join(ld, "string_skin_%s.po" % l)
if os.path.exists(pofile):
if not os.path.isdir(os.path.join(langdir, xlang)):
print "Can't find dir %s" % os.path.join(langdir, xlang)
else:
print "%s->%s" % (pofile, xlangfile)
shutil.copyfile(pofile, xlangfile)
if os.path.exists(spofile):
if not os.path.isdir(os.path.join(skilangdir, xlang)):
print "Can't find dir %s" % os.path.join(skinlangdir, xlang)
else:
print "%s->%s" % (spofile, xskinlangfile)
shutil.copyfile(spofile, xskinlangfile)
| gpl-2.0 |
jamestwebber/scipy | scipy/integrate/_ode.py | 2 | 48014 | # Authors: Pearu Peterson, Pauli Virtanen, John Travers
"""
First-order ODE integrators.
User-friendly interface to various numerical integrators for solving a
system of first order ODEs with prescribed initial conditions::
d y(t)[i]
--------- = f(t,y(t))[i],
d t
y(t=0)[i] = y0[i],
where::
i = 0, ..., len(y0) - 1
class ode
---------
A generic interface class to numeric integrators. It has the following
methods::
integrator = ode(f, jac=None)
integrator = integrator.set_integrator(name, **params)
integrator = integrator.set_initial_value(y0, t0=0.0)
integrator = integrator.set_f_params(*args)
integrator = integrator.set_jac_params(*args)
y1 = integrator.integrate(t1, step=False, relax=False)
flag = integrator.successful()
class complex_ode
-----------------
This class has the same generic interface as ode, except it can handle complex
f, y and Jacobians by transparently translating them into the equivalent
real-valued system. It supports the real-valued solvers (i.e., not zvode) and is
an alternative to ode with the zvode solver, sometimes performing better.
"""
from __future__ import division, print_function, absolute_import
# XXX: Integrators must have:
# ===========================
# cvode - C version of vode and vodpk with many improvements.
# Get it from http://www.netlib.org/ode/cvode.tar.gz.
# To wrap cvode to Python, one must write the extension module by
# hand. Its interface is too much 'advanced C' that using f2py
# would be too complicated (or impossible).
#
# How to define a new integrator:
# ===============================
#
# class myodeint(IntegratorBase):
#
# runner = <odeint function> or None
#
# def __init__(self,...): # required
# <initialize>
#
# def reset(self,n,has_jac): # optional
# # n - the size of the problem (number of equations)
# # has_jac - whether user has supplied its own routine for Jacobian
# <allocate memory,initialize further>
#
# def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required
# # this method is called to integrate from t=t0 to t=t1
# # with initial condition y0. f and jac are user-supplied functions
# # that define the problem. f_params,jac_params are additional
# # arguments
# # to these functions.
# <calculate y1>
# if <calculation was unsuccessful>:
# self.success = 0
# return t1,y1
#
# # In addition, one can define step() and run_relax() methods (they
# # take the same arguments as run()) if the integrator can support
# # these features (see IntegratorBase doc strings).
#
# if myodeint.runner:
# IntegratorBase.integrator_classes.append(myodeint)
__all__ = ['ode', 'complex_ode']
__version__ = "$Id$"
__docformat__ = "restructuredtext en"
import re
import warnings
from numpy import asarray, array, zeros, int32, isscalar, real, imag, vstack
from . import vode as _vode
from . import _dop
from . import lsoda as _lsoda
# ------------------------------------------------------------------------------
# User interface
# ------------------------------------------------------------------------------
class ode(object):
"""
A generic interface class to numeric integrators.
Solve an equation system :math:`y'(t) = f(t,y)` with (optional) ``jac = df/dy``.
*Note*: The first two arguments of ``f(t, y, ...)`` are in the
opposite order of the arguments in the system definition function used
by `scipy.integrate.odeint`.
Parameters
----------
f : callable ``f(t, y, *f_args)``
Right-hand side of the differential equation. t is a scalar,
``y.shape == (n,)``.
``f_args`` is set by calling ``set_f_params(*args)``.
`f` should return a scalar, array or list (not a tuple).
jac : callable ``jac(t, y, *jac_args)``, optional
Jacobian of the right-hand side, ``jac[i,j] = d f[i] / d y[j]``.
``jac_args`` is set by calling ``set_jac_params(*args)``.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
See also
--------
odeint : an integrator with a simpler interface based on lsoda from ODEPACK
quad : for finding the area under a curve
Notes
-----
Available integrators are listed below. They can be selected using
the `set_integrator` method.
"vode"
Real-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
implicit Adams method (for non-stiff problems) and a method based on
backward differentiation formulas (BDF) (for stiff problems).
Source: http://www.netlib.org/ode/vode.f
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "vode" integrator at the same time.
This integrator accepts the following parameters in `set_integrator`
method of the `ode` class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- lband : None or int
- uband : None or int
Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.
Setting these requires your jac routine to return the jacobian
in packed format, jac_packed[i-j+uband, j] = jac[i,j]. The
dimension of the matrix must be (lband+uband+1, len(y)).
- method: 'adams' or 'bdf'
Which solver to use, Adams (non-stiff) or BDF (stiff)
- with_jacobian : bool
This option is only considered when the user has not supplied a
Jacobian function and has not indicated (by setting either band)
that the Jacobian is banded. In this case, `with_jacobian` specifies
whether the iteration method of the ODE solver's correction step is
chord iteration with an internally generated full Jacobian or
functional iteration with no Jacobian.
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- min_step : float
- max_step : float
Limits for the step sizes used by the integrator.
- order : int
Maximum order used by the integrator,
order <= 12 for Adams, <= 5 for BDF.
"zvode"
Complex-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
implicit Adams method (for non-stiff problems) and a method based on
backward differentiation formulas (BDF) (for stiff problems).
Source: http://www.netlib.org/ode/zvode.f
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "zvode" integrator at the same time.
This integrator accepts the same parameters in `set_integrator`
as the "vode" solver.
.. note::
When using ZVODE for a stiff system, it should only be used for
the case in which the function f is analytic, that is, when each f(i)
is an analytic function of each y(j). Analyticity means that the
partial derivative df(i)/dy(j) is a unique complex number, and this
fact is critical in the way ZVODE solves the dense or banded linear
systems that arise in the stiff case. For a complex stiff ODE system
in which f is not analytic, ZVODE is likely to have convergence
failures, and for this problem one should instead use DVODE on the
equivalent real system (in the real and imaginary parts of y).
"lsoda"
Real-valued Variable-coefficient Ordinary Differential Equation
solver, with fixed-leading-coefficient implementation. It provides
automatic method switching between implicit Adams method (for non-stiff
problems) and a method based on backward differentiation formulas (BDF)
(for stiff problems).
Source: http://www.netlib.org/odepack
.. warning::
This integrator is not re-entrant. You cannot have two `ode`
instances using the "lsoda" integrator at the same time.
This integrator accepts the following parameters in `set_integrator`
method of the `ode` class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- lband : None or int
- uband : None or int
Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.
Setting these requires your jac routine to return the jacobian
in packed format, jac_packed[i-j+uband, j] = jac[i,j].
- with_jacobian : bool
*Not used.*
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- min_step : float
- max_step : float
Limits for the step sizes used by the integrator.
- max_order_ns : int
Maximum order used in the nonstiff case (default 12).
- max_order_s : int
Maximum order used in the stiff case (default 5).
- max_hnil : int
Maximum number of messages reporting too small step size (t + h = t)
(default 0)
- ixpr : int
Whether to generate extra printing at method switches (default False).
"dopri5"
This is an explicit runge-kutta method of order (4)5 due to Dormand &
Prince (with stepsize control and dense output).
Authors:
E. Hairer and G. Wanner
Universite de Geneve, Dept. de Mathematiques
CH-1211 Geneve 24, Switzerland
e-mail: [email protected], [email protected]
This code is described in [HNW93]_.
This integrator accepts the following parameters in set_integrator()
method of the ode class:
- atol : float or sequence
absolute tolerance for solution
- rtol : float or sequence
relative tolerance for solution
- nsteps : int
Maximum number of (internally defined) steps allowed during one
call to the solver.
- first_step : float
- max_step : float
- safety : float
Safety factor on new step selection (default 0.9)
- ifactor : float
- dfactor : float
Maximum factor to increase/decrease step size by in one step
- beta : float
Beta parameter for stabilised step size control.
- verbosity : int
Switch for printing messages (< 0 for no messages).
"dop853"
This is an explicit runge-kutta method of order 8(5,3) due to Dormand
& Prince (with stepsize control and dense output).
Options and references the same as "dopri5".
Examples
--------
A problem to integrate and the corresponding jacobian:
>>> from scipy.integrate import ode
>>>
>>> y0, t0 = [1.0j, 2.0], 0
>>>
>>> def f(t, y, arg1):
... return [1j*arg1*y[0] + y[1], -arg1*y[1]**2]
>>> def jac(t, y, arg1):
... return [[1j*arg1, 1], [0, -arg1*2*y[1]]]
The integration:
>>> r = ode(f, jac).set_integrator('zvode', method='bdf')
>>> r.set_initial_value(y0, t0).set_f_params(2.0).set_jac_params(2.0)
>>> t1 = 10
>>> dt = 1
>>> while r.successful() and r.t < t1:
... print(r.t+dt, r.integrate(r.t+dt))
1 [-0.71038232+0.23749653j 0.40000271+0.j ]
2.0 [0.19098503-0.52359246j 0.22222356+0.j ]
3.0 [0.47153208+0.52701229j 0.15384681+0.j ]
4.0 [-0.61905937+0.30726255j 0.11764744+0.j ]
5.0 [0.02340997-0.61418799j 0.09523835+0.j ]
6.0 [0.58643071+0.339819j 0.08000018+0.j ]
7.0 [-0.52070105+0.44525141j 0.06896565+0.j ]
8.0 [-0.15986733-0.61234476j 0.06060616+0.j ]
9.0 [0.64850462+0.15048982j 0.05405414+0.j ]
10.0 [-0.38404699+0.56382299j 0.04878055+0.j ]
References
----------
.. [HNW93] E. Hairer, S.P. Norsett and G. Wanner, Solving Ordinary
Differential Equations i. Nonstiff Problems. 2nd edition.
Springer Series in Computational Mathematics,
Springer-Verlag (1993)
"""
def __init__(self, f, jac=None):
self.stiff = 0
self.f = f
self.jac = jac
self.f_params = ()
self.jac_params = ()
self._y = []
@property
def y(self):
return self._y
def set_initial_value(self, y, t=0.0):
"""Set initial conditions y(t) = y."""
if isscalar(y):
y = [y]
n_prev = len(self._y)
if not n_prev:
self.set_integrator('') # find first available integrator
self._y = asarray(y, self._integrator.scalar)
self.t = t
self._integrator.reset(len(self._y), self.jac is not None)
return self
def set_integrator(self, name, **integrator_params):
"""
Set integrator by name.
Parameters
----------
name : str
Name of the integrator.
integrator_params
Additional parameters for the integrator.
"""
integrator = find_integrator(name)
if integrator is None:
# FIXME: this really should be raise an exception. Will that break
# any code?
warnings.warn('No integrator name match with %r or is not '
'available.' % name)
else:
self._integrator = integrator(**integrator_params)
if not len(self._y):
self.t = 0.0
self._y = array([0.0], self._integrator.scalar)
self._integrator.reset(len(self._y), self.jac is not None)
return self
def integrate(self, t, step=False, relax=False):
"""Find y=y(t), set y as an initial condition, and return y.
Parameters
----------
t : float
The endpoint of the integration step.
step : bool
If True, and if the integrator supports the step method,
then perform a single integration step and return.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
relax : bool
If True and if the integrator supports the run_relax method,
then integrate until t_1 >= t and return. ``relax`` is not
referenced if ``step=True``.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
Returns
-------
y : float
The integrated value at t
"""
if step and self._integrator.supports_step:
mth = self._integrator.step
elif relax and self._integrator.supports_run_relax:
mth = self._integrator.run_relax
else:
mth = self._integrator.run
try:
self._y, self.t = mth(self.f, self.jac or (lambda: None),
self._y, self.t, t,
self.f_params, self.jac_params)
except SystemError:
# f2py issue with tuple returns, see ticket 1187.
raise ValueError('Function to integrate must not return a tuple.')
return self._y
def successful(self):
"""Check if integration was successful."""
try:
self._integrator
except AttributeError:
self.set_integrator('')
return self._integrator.success == 1
def get_return_code(self):
"""Extracts the return code for the integration to enable better control
if the integration fails.
In general, a return code > 0 implies success, while a return code < 0
implies failure.
Notes
-----
This section describes possible return codes and their meaning, for available
integrators that can be selected by `set_integrator` method.
"vode"
=========== =======
Return Code Message
=========== =======
2 Integration successful.
-1 Excess work done on this call. (Perhaps wrong MF.)
-2 Excess accuracy requested. (Tolerances too small.)
-3 Illegal input detected. (See printed message.)
-4 Repeated error test failures. (Check all input.)
-5 Repeated convergence failures. (Perhaps bad Jacobian
supplied or wrong choice of MF or tolerances.)
-6 Error weight became zero during problem. (Solution
component i vanished, and ATOL or ATOL(i) = 0.)
=========== =======
"zvode"
=========== =======
Return Code Message
=========== =======
2 Integration successful.
-1 Excess work done on this call. (Perhaps wrong MF.)
-2 Excess accuracy requested. (Tolerances too small.)
-3 Illegal input detected. (See printed message.)
-4 Repeated error test failures. (Check all input.)
-5 Repeated convergence failures. (Perhaps bad Jacobian
supplied or wrong choice of MF or tolerances.)
-6 Error weight became zero during problem. (Solution
component i vanished, and ATOL or ATOL(i) = 0.)
=========== =======
"dopri5"
=========== =======
Return Code Message
=========== =======
1 Integration successful.
2 Integration successful (interrupted by solout).
-1 Input is not consistent.
-2 Larger nsteps is needed.
-3 Step size becomes too small.
-4 Problem is probably stiff (interrupted).
=========== =======
"dop853"
=========== =======
Return Code Message
=========== =======
1 Integration successful.
2 Integration successful (interrupted by solout).
-1 Input is not consistent.
-2 Larger nsteps is needed.
-3 Step size becomes too small.
-4 Problem is probably stiff (interrupted).
=========== =======
"lsoda"
=========== =======
Return Code Message
=========== =======
2 Integration successful.
-1 Excess work done on this call (perhaps wrong Dfun type).
-2 Excess accuracy requested (tolerances too small).
-3 Illegal input detected (internal error).
-4 Repeated error test failures (internal error).
-5 Repeated convergence failures (perhaps bad Jacobian or tolerances).
-6 Error weight became zero during problem.
-7 Internal workspace insufficient to finish (internal error).
=========== =======
"""
try:
self._integrator
except AttributeError:
self.set_integrator('')
return self._integrator.istate
def set_f_params(self, *args):
"""Set extra parameters for user-supplied function f."""
self.f_params = args
return self
def set_jac_params(self, *args):
"""Set extra parameters for user-supplied function jac."""
self.jac_params = args
return self
def set_solout(self, solout):
"""
Set callable to be called at every successful integration step.
Parameters
----------
solout : callable
``solout(t, y)`` is called at each internal integrator step,
t is a scalar providing the current independent position
y is the current soloution ``y.shape == (n,)``
solout should return -1 to stop integration
otherwise it should return None or 0
"""
if self._integrator.supports_solout:
self._integrator.set_solout(solout)
if self._y is not None:
self._integrator.reset(len(self._y), self.jac is not None)
else:
raise ValueError("selected integrator does not support solout,"
" choose another one")
def _transform_banded_jac(bjac):
"""
Convert a real matrix of the form (for example)
[0 0 A B] [0 0 0 B]
[0 0 C D] [0 0 A D]
[E F G H] to [0 F C H]
[I J K L] [E J G L]
[I 0 K 0]
That is, every other column is shifted up one.
"""
# Shift every other column.
newjac = zeros((bjac.shape[0] + 1, bjac.shape[1]))
newjac[1:, ::2] = bjac[:, ::2]
newjac[:-1, 1::2] = bjac[:, 1::2]
return newjac
class complex_ode(ode):
"""
A wrapper of ode for complex systems.
This functions similarly as `ode`, but re-maps a complex-valued
equation system to a real-valued one before using the integrators.
Parameters
----------
f : callable ``f(t, y, *f_args)``
Rhs of the equation. t is a scalar, ``y.shape == (n,)``.
``f_args`` is set by calling ``set_f_params(*args)``.
jac : callable ``jac(t, y, *jac_args)``
Jacobian of the rhs, ``jac[i,j] = d f[i] / d y[j]``.
``jac_args`` is set by calling ``set_f_params(*args)``.
Attributes
----------
t : float
Current time.
y : ndarray
Current variable values.
Examples
--------
For usage examples, see `ode`.
"""
def __init__(self, f, jac=None):
self.cf = f
self.cjac = jac
if jac is None:
ode.__init__(self, self._wrap, None)
else:
ode.__init__(self, self._wrap, self._wrap_jac)
def _wrap(self, t, y, *f_args):
f = self.cf(*((t, y[::2] + 1j * y[1::2]) + f_args))
# self.tmp is a real-valued array containing the interleaved
# real and imaginary parts of f.
self.tmp[::2] = real(f)
self.tmp[1::2] = imag(f)
return self.tmp
def _wrap_jac(self, t, y, *jac_args):
# jac is the complex Jacobian computed by the user-defined function.
jac = self.cjac(*((t, y[::2] + 1j * y[1::2]) + jac_args))
# jac_tmp is the real version of the complex Jacobian. Each complex
# entry in jac, say 2+3j, becomes a 2x2 block of the form
# [2 -3]
# [3 2]
jac_tmp = zeros((2 * jac.shape[0], 2 * jac.shape[1]))
jac_tmp[1::2, 1::2] = jac_tmp[::2, ::2] = real(jac)
jac_tmp[1::2, ::2] = imag(jac)
jac_tmp[::2, 1::2] = -jac_tmp[1::2, ::2]
ml = getattr(self._integrator, 'ml', None)
mu = getattr(self._integrator, 'mu', None)
if ml is not None or mu is not None:
# Jacobian is banded. The user's Jacobian function has computed
# the complex Jacobian in packed format. The corresponding
# real-valued version has every other column shifted up.
jac_tmp = _transform_banded_jac(jac_tmp)
return jac_tmp
@property
def y(self):
return self._y[::2] + 1j * self._y[1::2]
def set_integrator(self, name, **integrator_params):
"""
Set integrator by name.
Parameters
----------
name : str
Name of the integrator
integrator_params
Additional parameters for the integrator.
"""
if name == 'zvode':
raise ValueError("zvode must be used with ode, not complex_ode")
lband = integrator_params.get('lband')
uband = integrator_params.get('uband')
if lband is not None or uband is not None:
# The Jacobian is banded. Override the user-supplied bandwidths
# (which are for the complex Jacobian) with the bandwidths of
# the corresponding real-valued Jacobian wrapper of the complex
# Jacobian.
integrator_params['lband'] = 2 * (lband or 0) + 1
integrator_params['uband'] = 2 * (uband or 0) + 1
return ode.set_integrator(self, name, **integrator_params)
def set_initial_value(self, y, t=0.0):
"""Set initial conditions y(t) = y."""
y = asarray(y)
self.tmp = zeros(y.size * 2, 'float')
self.tmp[::2] = real(y)
self.tmp[1::2] = imag(y)
return ode.set_initial_value(self, self.tmp, t)
def integrate(self, t, step=False, relax=False):
"""Find y=y(t), set y as an initial condition, and return y.
Parameters
----------
t : float
The endpoint of the integration step.
step : bool
If True, and if the integrator supports the step method,
then perform a single integration step and return.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
relax : bool
If True and if the integrator supports the run_relax method,
then integrate until t_1 >= t and return. ``relax`` is not
referenced if ``step=True``.
This parameter is provided in order to expose internals of
the implementation, and should not be changed from its default
value in most cases.
Returns
-------
y : float
The integrated value at t
"""
y = ode.integrate(self, t, step, relax)
return y[::2] + 1j * y[1::2]
def set_solout(self, solout):
"""
Set callable to be called at every successful integration step.
Parameters
----------
solout : callable
``solout(t, y)`` is called at each internal integrator step,
t is a scalar providing the current independent position
y is the current soloution ``y.shape == (n,)``
solout should return -1 to stop integration
otherwise it should return None or 0
"""
if self._integrator.supports_solout:
self._integrator.set_solout(solout, complex=True)
else:
raise TypeError("selected integrator does not support solouta,"
+ "choose another one")
# ------------------------------------------------------------------------------
# ODE integrators
# ------------------------------------------------------------------------------
def find_integrator(name):
for cl in IntegratorBase.integrator_classes:
if re.match(name, cl.__name__, re.I):
return cl
return None
class IntegratorConcurrencyError(RuntimeError):
"""
Failure due to concurrent usage of an integrator that can be used
only for a single problem at a time.
"""
def __init__(self, name):
msg = ("Integrator `%s` can be used to solve only a single problem "
"at a time. If you want to integrate multiple problems, "
"consider using a different integrator "
"(see `ode.set_integrator`)") % name
RuntimeError.__init__(self, msg)
class IntegratorBase(object):
runner = None # runner is None => integrator is not available
success = None # success==1 if integrator was called successfully
istate = None # istate > 0 means success, istate < 0 means failure
supports_run_relax = None
supports_step = None
supports_solout = False
integrator_classes = []
scalar = float
def acquire_new_handle(self):
# Some of the integrators have internal state (ancient
# Fortran...), and so only one instance can use them at a time.
# We keep track of this, and fail when concurrent usage is tried.
self.__class__.active_global_handle += 1
self.handle = self.__class__.active_global_handle
def check_handle(self):
if self.handle is not self.__class__.active_global_handle:
raise IntegratorConcurrencyError(self.__class__.__name__)
def reset(self, n, has_jac):
"""Prepare integrator for call: allocate memory, set flags, etc.
n - number of equations.
has_jac - if user has supplied function for evaluating Jacobian.
"""
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Integrate from t=t0 to t=t1 using y0 as an initial condition.
Return 2-tuple (y1,t1) where y1 is the result and t=t1
defines the stoppage coordinate of the result.
"""
raise NotImplementedError('all integrators must define '
'run(f, jac, t0, t1, y0, f_params, jac_params)')
def step(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Make one integration step and return (y1,t1)."""
raise NotImplementedError('%s does not support step() method' %
self.__class__.__name__)
def run_relax(self, f, jac, y0, t0, t1, f_params, jac_params):
"""Integrate from t=t0 to t>=t1 and return (y1,t)."""
raise NotImplementedError('%s does not support run_relax() method' %
self.__class__.__name__)
# XXX: __str__ method for getting visual state of the integrator
def _vode_banded_jac_wrapper(jacfunc, ml, jac_params):
"""
Wrap a banded Jacobian function with a function that pads
the Jacobian with `ml` rows of zeros.
"""
def jac_wrapper(t, y):
jac = asarray(jacfunc(t, y, *jac_params))
padded_jac = vstack((jac, zeros((ml, jac.shape[1]))))
return padded_jac
return jac_wrapper
class vode(IntegratorBase):
runner = getattr(_vode, 'dvode', None)
messages = {-1: 'Excess work done on this call. (Perhaps wrong MF.)',
-2: 'Excess accuracy requested. (Tolerances too small.)',
-3: 'Illegal input detected. (See printed message.)',
-4: 'Repeated error test failures. (Check all input.)',
-5: 'Repeated convergence failures. (Perhaps bad'
' Jacobian supplied or wrong choice of MF or tolerances.)',
-6: 'Error weight became zero during problem. (Solution'
' component i vanished, and ATOL or ATOL(i) = 0.)'
}
supports_run_relax = 1
supports_step = 1
active_global_handle = 0
def __init__(self,
method='adams',
with_jacobian=False,
rtol=1e-6, atol=1e-12,
lband=None, uband=None,
order=12,
nsteps=500,
max_step=0.0, # corresponds to infinite
min_step=0.0,
first_step=0.0, # determined by solver
):
if re.match(method, r'adams', re.I):
self.meth = 1
elif re.match(method, r'bdf', re.I):
self.meth = 2
else:
raise ValueError('Unknown integration method %s' % method)
self.with_jacobian = with_jacobian
self.rtol = rtol
self.atol = atol
self.mu = uband
self.ml = lband
self.order = order
self.nsteps = nsteps
self.max_step = max_step
self.min_step = min_step
self.first_step = first_step
self.success = 1
self.initialized = False
def _determine_mf_and_set_bands(self, has_jac):
"""
Determine the `MF` parameter (Method Flag) for the Fortran subroutine `dvode`.
In the Fortran code, the legal values of `MF` are:
10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 24, 25,
-11, -12, -14, -15, -21, -22, -24, -25
but this Python wrapper does not use negative values.
Returns
mf = 10*self.meth + miter
self.meth is the linear multistep method:
self.meth == 1: method="adams"
self.meth == 2: method="bdf"
miter is the correction iteration method:
miter == 0: Functional iteraton; no Jacobian involved.
miter == 1: Chord iteration with user-supplied full Jacobian.
miter == 2: Chord iteration with internally computed full Jacobian.
miter == 3: Chord iteration with internally computed diagonal Jacobian.
miter == 4: Chord iteration with user-supplied banded Jacobian.
miter == 5: Chord iteration with internally computed banded Jacobian.
Side effects: If either self.mu or self.ml is not None and the other is None,
then the one that is None is set to 0.
"""
jac_is_banded = self.mu is not None or self.ml is not None
if jac_is_banded:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
# has_jac is True if the user provided a Jacobian function.
if has_jac:
if jac_is_banded:
miter = 4
else:
miter = 1
else:
if jac_is_banded:
if self.ml == self.mu == 0:
miter = 3 # Chord iteration with internal diagonal Jacobian.
else:
miter = 5 # Chord iteration with internal banded Jacobian.
else:
# self.with_jacobian is set by the user in the call to ode.set_integrator.
if self.with_jacobian:
miter = 2 # Chord iteration with internal full Jacobian.
else:
miter = 0 # Functional iteraton; no Jacobian involved.
mf = 10 * self.meth + miter
return mf
def reset(self, n, has_jac):
mf = self._determine_mf_and_set_bands(has_jac)
if mf == 10:
lrw = 20 + 16 * n
elif mf in [11, 12]:
lrw = 22 + 16 * n + 2 * n * n
elif mf == 13:
lrw = 22 + 17 * n
elif mf in [14, 15]:
lrw = 22 + 18 * n + (3 * self.ml + 2 * self.mu) * n
elif mf == 20:
lrw = 20 + 9 * n
elif mf in [21, 22]:
lrw = 22 + 9 * n + 2 * n * n
elif mf == 23:
lrw = 22 + 10 * n
elif mf in [24, 25]:
lrw = 22 + 11 * n + (3 * self.ml + 2 * self.mu) * n
else:
raise ValueError('Unexpected mf=%s' % mf)
if mf % 10 in [0, 3]:
liw = 30
else:
liw = 30 + n
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), int32)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.order
iwork[5] = self.nsteps
iwork[6] = 2 # mxhnil
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.rwork, self.iwork, mf]
self.success = 1
self.initialized = False
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
if self.initialized:
self.check_handle()
else:
self.initialized = True
self.acquire_new_handle()
if self.ml is not None and self.ml > 0:
# Banded Jacobian. Wrap the user-provided function with one
# that pads the Jacobian array with the extra `self.ml` rows
# required by the f2py-generated wrapper.
jac = _vode_banded_jac_wrapper(jac, self.ml, jac_params)
args = ((f, jac, y0, t0, t1) + tuple(self.call_args) +
(f_params, jac_params))
y1, t, istate = self.runner(*args)
self.istate = istate
if istate < 0:
unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)
warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
self.messages.get(istate, unexpected_istate_msg)))
self.success = 0
else:
self.call_args[3] = 2 # upgrade istate from 1 to 2
self.istate = 2
return y1, t
def step(self, *args):
itask = self.call_args[2]
self.call_args[2] = 2
r = self.run(*args)
self.call_args[2] = itask
return r
def run_relax(self, *args):
itask = self.call_args[2]
self.call_args[2] = 3
r = self.run(*args)
self.call_args[2] = itask
return r
if vode.runner is not None:
IntegratorBase.integrator_classes.append(vode)
class zvode(vode):
runner = getattr(_vode, 'zvode', None)
supports_run_relax = 1
supports_step = 1
scalar = complex
active_global_handle = 0
def reset(self, n, has_jac):
mf = self._determine_mf_and_set_bands(has_jac)
if mf in (10,):
lzw = 15 * n
elif mf in (11, 12):
lzw = 15 * n + 2 * n ** 2
elif mf in (-11, -12):
lzw = 15 * n + n ** 2
elif mf in (13,):
lzw = 16 * n
elif mf in (14, 15):
lzw = 17 * n + (3 * self.ml + 2 * self.mu) * n
elif mf in (-14, -15):
lzw = 16 * n + (2 * self.ml + self.mu) * n
elif mf in (20,):
lzw = 8 * n
elif mf in (21, 22):
lzw = 8 * n + 2 * n ** 2
elif mf in (-21, -22):
lzw = 8 * n + n ** 2
elif mf in (23,):
lzw = 9 * n
elif mf in (24, 25):
lzw = 10 * n + (3 * self.ml + 2 * self.mu) * n
elif mf in (-24, -25):
lzw = 9 * n + (2 * self.ml + self.mu) * n
lrw = 20 + n
if mf % 10 in (0, 3):
liw = 30
else:
liw = 30 + n
zwork = zeros((lzw,), complex)
self.zwork = zwork
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), int32)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.order
iwork[5] = self.nsteps
iwork[6] = 2 # mxhnil
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.zwork, self.rwork, self.iwork, mf]
self.success = 1
self.initialized = False
if zvode.runner is not None:
IntegratorBase.integrator_classes.append(zvode)
class dopri5(IntegratorBase):
runner = getattr(_dop, 'dopri5', None)
name = 'dopri5'
supports_solout = True
messages = {1: 'computation successful',
2: 'computation successful (interrupted by solout)',
-1: 'input is not consistent',
-2: 'larger nsteps is needed',
-3: 'step size becomes too small',
-4: 'problem is probably stiff (interrupted)',
}
def __init__(self,
rtol=1e-6, atol=1e-12,
nsteps=500,
max_step=0.0,
first_step=0.0, # determined by solver
safety=0.9,
ifactor=10.0,
dfactor=0.2,
beta=0.0,
method=None,
verbosity=-1, # no messages if negative
):
self.rtol = rtol
self.atol = atol
self.nsteps = nsteps
self.max_step = max_step
self.first_step = first_step
self.safety = safety
self.ifactor = ifactor
self.dfactor = dfactor
self.beta = beta
self.verbosity = verbosity
self.success = 1
self.set_solout(None)
def set_solout(self, solout, complex=False):
self.solout = solout
self.solout_cmplx = complex
if solout is None:
self.iout = 0
else:
self.iout = 1
def reset(self, n, has_jac):
work = zeros((8 * n + 21,), float)
work[1] = self.safety
work[2] = self.dfactor
work[3] = self.ifactor
work[4] = self.beta
work[5] = self.max_step
work[6] = self.first_step
self.work = work
iwork = zeros((21,), int32)
iwork[0] = self.nsteps
iwork[2] = self.verbosity
self.iwork = iwork
self.call_args = [self.rtol, self.atol, self._solout,
self.iout, self.work, self.iwork]
self.success = 1
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
x, y, iwork, istate = self.runner(*((f, t0, y0, t1) +
tuple(self.call_args) + (f_params,)))
self.istate = istate
if istate < 0:
unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)
warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
self.messages.get(istate, unexpected_istate_msg)))
self.success = 0
return y, x
def _solout(self, nr, xold, x, y, nd, icomp, con):
if self.solout is not None:
if self.solout_cmplx:
y = y[::2] + 1j * y[1::2]
return self.solout(x, y)
else:
return 1
if dopri5.runner is not None:
IntegratorBase.integrator_classes.append(dopri5)
class dop853(dopri5):
runner = getattr(_dop, 'dop853', None)
name = 'dop853'
def __init__(self,
rtol=1e-6, atol=1e-12,
nsteps=500,
max_step=0.0,
first_step=0.0, # determined by solver
safety=0.9,
ifactor=6.0,
dfactor=0.3,
beta=0.0,
method=None,
verbosity=-1, # no messages if negative
):
super(self.__class__, self).__init__(rtol, atol, nsteps, max_step,
first_step, safety, ifactor,
dfactor, beta, method,
verbosity)
def reset(self, n, has_jac):
work = zeros((11 * n + 21,), float)
work[1] = self.safety
work[2] = self.dfactor
work[3] = self.ifactor
work[4] = self.beta
work[5] = self.max_step
work[6] = self.first_step
self.work = work
iwork = zeros((21,), int32)
iwork[0] = self.nsteps
iwork[2] = self.verbosity
self.iwork = iwork
self.call_args = [self.rtol, self.atol, self._solout,
self.iout, self.work, self.iwork]
self.success = 1
if dop853.runner is not None:
IntegratorBase.integrator_classes.append(dop853)
class lsoda(IntegratorBase):
runner = getattr(_lsoda, 'lsoda', None)
active_global_handle = 0
messages = {
2: "Integration successful.",
-1: "Excess work done on this call (perhaps wrong Dfun type).",
-2: "Excess accuracy requested (tolerances too small).",
-3: "Illegal input detected (internal error).",
-4: "Repeated error test failures (internal error).",
-5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
-6: "Error weight became zero during problem.",
-7: "Internal workspace insufficient to finish (internal error)."
}
def __init__(self,
with_jacobian=False,
rtol=1e-6, atol=1e-12,
lband=None, uband=None,
nsteps=500,
max_step=0.0, # corresponds to infinite
min_step=0.0,
first_step=0.0, # determined by solver
ixpr=0,
max_hnil=0,
max_order_ns=12,
max_order_s=5,
method=None
):
self.with_jacobian = with_jacobian
self.rtol = rtol
self.atol = atol
self.mu = uband
self.ml = lband
self.max_order_ns = max_order_ns
self.max_order_s = max_order_s
self.nsteps = nsteps
self.max_step = max_step
self.min_step = min_step
self.first_step = first_step
self.ixpr = ixpr
self.max_hnil = max_hnil
self.success = 1
self.initialized = False
def reset(self, n, has_jac):
# Calculate parameters for Fortran subroutine dvode.
if has_jac:
if self.mu is None and self.ml is None:
jt = 1
else:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
jt = 4
else:
if self.mu is None and self.ml is None:
jt = 2
else:
if self.mu is None:
self.mu = 0
if self.ml is None:
self.ml = 0
jt = 5
lrn = 20 + (self.max_order_ns + 4) * n
if jt in [1, 2]:
lrs = 22 + (self.max_order_s + 4) * n + n * n
elif jt in [4, 5]:
lrs = 22 + (self.max_order_s + 5 + 2 * self.ml + self.mu) * n
else:
raise ValueError('Unexpected jt=%s' % jt)
lrw = max(lrn, lrs)
liw = 20 + n
rwork = zeros((lrw,), float)
rwork[4] = self.first_step
rwork[5] = self.max_step
rwork[6] = self.min_step
self.rwork = rwork
iwork = zeros((liw,), int32)
if self.ml is not None:
iwork[0] = self.ml
if self.mu is not None:
iwork[1] = self.mu
iwork[4] = self.ixpr
iwork[5] = self.nsteps
iwork[6] = self.max_hnil
iwork[7] = self.max_order_ns
iwork[8] = self.max_order_s
self.iwork = iwork
self.call_args = [self.rtol, self.atol, 1, 1,
self.rwork, self.iwork, jt]
self.success = 1
self.initialized = False
def run(self, f, jac, y0, t0, t1, f_params, jac_params):
if self.initialized:
self.check_handle()
else:
self.initialized = True
self.acquire_new_handle()
args = [f, y0, t0, t1] + self.call_args[:-1] + \
[jac, self.call_args[-1], f_params, 0, jac_params]
y1, t, istate = self.runner(*args)
self.istate = istate
if istate < 0:
unexpected_istate_msg = 'Unexpected istate={:d}'.format(istate)
warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
self.messages.get(istate, unexpected_istate_msg)))
self.success = 0
else:
self.call_args[3] = 2 # upgrade istate from 1 to 2
self.istate = 2
return y1, t
def step(self, *args):
itask = self.call_args[2]
self.call_args[2] = 2
r = self.run(*args)
self.call_args[2] = itask
return r
def run_relax(self, *args):
itask = self.call_args[2]
self.call_args[2] = 3
r = self.run(*args)
self.call_args[2] = itask
return r
if lsoda.runner:
IntegratorBase.integrator_classes.append(lsoda)
| bsd-3-clause |
hubig/CSCI121-Final-Project | poetry_gen.py | 1 | 7225 | #the_poetry_generator 2017
import random #needed for random selection of words
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
import os
def main():
#"""Opens up one of the two random files."""
poem = open("Poem_Generator.txt","w") #Opens up new file "Poem_Generator.txt"
sentence = []
for i in range(5): #Create 5 sentences
sentence.append(create_sentence())
poem.write(sentence[i])
poem.write("\n")
poem.close()
def create_sentence():
#Articles
articles1 = ["the","an"]
articles2 = ['the','a']
articles3 = ['The','An']
articles4 = ['The',"A"]
#Subject
animal = open("Animals.txt", "r") #Opens up the animals string
animal_list =animal.readline().split(",") #Splits the string into a list
subject = animal_list[random.randrange(0,len(animal_list))] #Subject is a random word
#Verb
verb = open("Verbs.txt","r") #Opens verbs
verb_list = verb.readline().split(",")
verbs = verb_list[random.randrange(0,len(verb_list))] #verbs is random verb
#Object
if (random.randrange(1,2) == 1): #if a random number between 1 and 2 is equal to 1:
object_file = open("Objects.txt","r") #we choose an objects.txt entry as an object
object_list = object_file.readline().split(",")
objects = object_list[random.randrange(0,len(object_list))] #random object
else:
objects = animal_list[random.randrange(0,len(animal_list))] #object is an animal entry
#chooses a random adjective
adj = open("Adj.txt","r")
adj_list = adj.readline().split(",")
adjs = adj_list[random.randrange(0,len(adj_list))]
if adjs[0] in "aeiouAEIOU":
Article = articles3[random.randrange(0,len(articles1))] #if adjective begins with vowel, article is either the or a
else:
Article = articles4[random.randrange(0,len(articles2))]
# Noun Phrase + Object Phrase
nounphrase = noun_phrase(subject,adjs) #nounphrase is a concatenation of the article, the adjective, and the subject
if objects[0] in "aeiouAEIOU":
articles = articles1[random.randrange(0,len(articles1))] #if adjective begins with vowel, article is either the or a
else:
articles = articles2[random.randrange(0,len(articles2))]
objectphrase = obj_phrase(objects)
#adverbs
adv = open("Adverbs.txt")
adv_list = adv.readline().split(",")
advs = adv_list[random.randrange(0,len(adv_list))]
#Creates the verb phrase and decides the present ending of the verb depending on the object of the sentence
if verbs[len(verbs)-1] == 's' or verbs[len(verbs)-1] == 'h':
verbs = verbs +("es")
else:
verbs = verbs + 's'
verbphrase = verb_phrase(verbs,advs)
#close all the open files
animal.close()
verb.close()
object_file.close()
adj.close()
adv.close()
return Article+" "+repr(nounphrase) + repr(verbphrase) + " " + articles + " "+ repr(objectphrase) #return the sentence
class noun_phrase:
def __init__(noun,word,adj):
noun.x = word
noun.y = adj
def getNoun(noun):
"""Gets the noun"""
return noun.x
def getAdj(noun):
"""Gets the adjective"""
return noun.y
def __repr__(noun):
return str(noun.y)+" "+str(noun.x)+" "
class verb_phrase:
def __init__(verb,word,adv):
verb.x = word
verb.y = adv
def getVerb(verb):
return verb.x
def getAdv(verb):
return verb.y
def __repr__(verb):
return str(verb.y) + " " + str(verb.x)
class obj_phrase:
def __init__(obj,word):
obj.x = word
def getWord(obj):
return obj.x
def __repr__(obj):
return str(obj.x) + "."
class user_gui:
def __init__(self):
self.create_window() #creates window with title
self.create_widgets() #creates widgets
def open_file(self):
"""opens and returns poem text"""
f = open("Poem_Generator.txt", "r")
poems = f.read()
return poems
def create_window(self):
"""creates the window."""
self.root= tk.Tk() #creating window
self.root.title("Poem Generator")
def create_widgets(self):
"""creates all the widgets and their frames."""
s = ttk.Style() #using ttk style
s.configure('.', font=('Helvetica', 12), sticky=tk.N+tk.E+tk.S+tk.W)
"""ABOUT"""
about_frame = ttk.Frame(self.root, width = 240, height = 300)
about_frame.grid(row = 1, column = 1, sticky=tk.N+tk.E, ipadx = 10, ipady = 10)
about_frame.columnconfigure(0, weight = 1)
about_frame.rowconfigure(0, weight = 1)
about_text = """ABOUT
This is a random poem generator created by Charlie Carlson, Iain Irwin, and Nic Hubig for the CSCI121 final project."""
about_label = ttk.Label(about_frame, wraplength = 240, text = about_text)
about_label.grid(row = 0, column = 0, sticky=tk.N+tk.E, ipadx = 10, ipady = 10)
about_label.columnconfigure(0, weight = 1)
about_label.rowconfigure(0, weight = 1)
"""POETRY"""
poetry_frame = ttk.Frame(self.root, width = 240, height = 300)
poetry_frame.grid(row = 1, column = 2)
poetry_text = self.open_file()
poetry_label = ttk.Label(poetry_frame, wraplength = 240, text = poetry_text)
poetry_label.grid(row = 0, column = 0, sticky=tk.N+tk.E, ipadx = 10, ipady = 10)
poetry_label.columnconfigure(0, weight = 1)
poetry_label.rowconfigure(0, weight = 1)
"""GENERATE BUTTON"""
generate = ttk.Button(self.root, text="Generate poetry")
generate.grid(row=3, column= 1)
generate.columnconfigure(0, weight = 1)
generate.rowconfigure(0, weight = 1)
"""QUIT BUTTON"""
quit_button = ttk.Button(self.root, text="Quit")
quit_button.grid(row=3, column=2)
quit_button['command'] = self.root.destroy
program = user_gui()
program.root.mainloop()
| apache-2.0 |
zstackio/zstack-woodpecker | integrationtest/vm/mini/multiclusters/paths/multi_path270.py | 1 | 2778 | import zstackwoodpecker.test_state as ts_header
import os
TestAction = ts_header.TestAction
def path():
return dict(initial_formation="template5", checking_point=1, faild_point=100000, path_list=[
[TestAction.create_mini_vm, 'vm1', 'cluster=cluster2'],
[TestAction.destroy_vm, 'vm1'],
[TestAction.recover_vm, 'vm1'],
[TestAction.create_mini_vm, 'vm2', 'cluster=cluster1'],
[TestAction.create_vm_backup, 'vm2', 'vm2-backup1'],
[TestAction.create_mini_vm, 'vm3', 'cluster=cluster1'],
[TestAction.stop_vm, 'vm3'],
[TestAction.start_vm, 'vm3'],
[TestAction.start_vm, 'vm1'],
[TestAction.migrate_vm, 'vm1'],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.create_volume, 'volume1', 'cluster=cluster2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume1'],
[TestAction.create_volume, 'volume2', 'cluster=cluster1', 'flag=scsi'],
[TestAction.delete_volume, 'volume2'],
[TestAction.add_image, 'image1', 'root', 'http://172.20.1.28/mirror/diskimages/centos_vdbench.qcow2'],
[TestAction.create_volume, 'volume3', 'cluster=cluster1', 'flag=scsi'],
[TestAction.attach_volume, 'vm2', 'volume3'],
[TestAction.create_volume_backup, 'volume3', 'volume3-backup2'],
[TestAction.delete_volume_backup, 'volume3-backup2'],
[TestAction.delete_image, 'image1'],
[TestAction.recover_image, 'image1'],
[TestAction.delete_image, 'image1'],
[TestAction.expunge_image, 'image1'],
[TestAction.create_volume, 'volume4', 'cluster=cluster2', 'flag=scsi'],
[TestAction.attach_volume, 'vm1', 'volume4'],
[TestAction.start_vm, 'vm1'],
[TestAction.create_volume_backup, 'volume4', 'volume4-backup3'],
[TestAction.stop_vm, 'vm1'],
[TestAction.change_vm_ha, 'vm1'],
[TestAction.poweroff_only, 'cluster=cluster1'],
[TestAction.create_image_from_volume, 'vm3', 'vm3-image2'],
[TestAction.detach_volume, 'volume4'],
[TestAction.create_volume, 'volume5', 'cluster=cluster2', 'flag=thin,scsi'],
[TestAction.use_volume_backup, 'volume4-backup3'],
[TestAction.start_vm, 'vm2'],
[TestAction.delete_volume, 'volume3'],
[TestAction.expunge_volume, 'volume3'],
[TestAction.destroy_vm, 'vm3'],
[TestAction.attach_volume, 'vm1', 'volume4'],
[TestAction.create_volume_backup, 'volume4', 'volume4-backup4'],
[TestAction.migrate_vm, 'vm1'],
[TestAction.poweroff_only, 'cluster=cluster2'],
[TestAction.stop_vm, 'vm2'],
[TestAction.use_vm_backup, 'vm2-backup1'],
[TestAction.start_vm, 'vm2'],
])
'''
The final status:
Running:['vm1', 'vm2']
Stopped:[]
Enadbled:['vm2-backup1', 'volume4-backup3', 'volume4-backup4', 'vm3-image2']
attached:['volume1', 'volume4']
Detached:['volume5']
Deleted:['vm3', 'volume2', 'volume3-backup2']
Expunged:['volume3', 'image1']
Ha:['vm1']
Group:
vm_backup1:['vm2-backup1']---vm2@
''' | apache-2.0 |
britcey/ansible | lib/ansible/modules/cloud/azure/azure_rm_securitygroup.py | 36 | 27324 | #!/usr/bin/python
#
# Copyright (c) 2016 Matt Davis, <[email protected]>
# Chris Houseknecht, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'curated'}
DOCUMENTATION = '''
---
module: azure_rm_securitygroup
version_added: "2.1"
short_description: Manage Azure network security groups.
description:
- Create, update or delete a network security group. A security group contains Access Control List (ACL) rules
that allow or deny network traffic to subnets or individual network interfaces. A security group is created
with a set of default security rules and an empty set of security rules. Shape traffic flow by adding
rules to the empty set of security rules.
options:
default_rules:
description:
- The set of default rules automatically added to a security group at creation. In general default
rules will not be modified. Modify rules to shape the flow of traffic to or from a subnet or NIC. See
rules below for the makeup of a rule dict.
required: false
default: null
location:
description:
- Valid azure location. Defaults to location of the resource group.
default: resource_group location
required: false
name:
description:
- Name of the security group to operate on.
required: false
default: null
purge_default_rules:
description:
- Remove any existing rules not matching those defined in the default_rules parameter.
default: false
required: false
purge_rules:
description:
- Remove any existing rules not matching those defined in the rules parameters.
default: false
required: false
resource_group:
description:
- Name of the resource group the security group belongs to.
required: true
rules:
description:
- Set of rules shaping traffic flow to or from a subnet or NIC. Each rule is a dictionary.
required: false
default: null
suboptions:
name:
description:
- Unique name for the rule.
required: true
description:
description:
- Short description of the rule's purpose.
protocol:
description: Accepted traffic protocol.
choices:
- Udp
- Tcp
- "*"
default: "*"
source_port_range:
description:
- Port or range of ports from which traffic originates.
default: "*"
destination_port_range:
description:
- Port or range of ports to which traffic is headed.
default: "*"
source_address_prefix:
description:
- IP address or CIDR from which traffic originates.
default: "*"
destination_address_prefix:
description:
- IP address or CIDR to which traffic is headed.
default: "*"
access:
description:
- Whether or not to allow the traffic flow.
choices:
- Allow
- Deny
default: Allow
priority:
description:
- Order in which to apply the rule. Must a unique integer between 100 and 4096 inclusive.
required: true
direction:
description:
- Indicates the direction of the traffic flow.
choices:
- Inbound
- Outbound
default: Inbound
state:
description:
- Assert the state of the security group. Set to 'present' to create or update a security group. Set to
'absent' to remove a security group.
default: present
required: false
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- "Chris Houseknecht (@chouseknecht)"
- "Matt Davis (@nitzmahone)"
'''
EXAMPLES = '''
# Create a security group
- azure_rm_securitygroup:
resource_group: mygroup
name: mysecgroup
purge_rules: yes
rules:
- name: DenySSH
protocol: TCP
destination_port_range: 22
access: Deny
priority: 100
direction: Inbound
- name: 'AllowSSH'
protocol: TCP
source_address_prefix: '174.109.158.0/24'
destination_port_range: 22
access: Allow
priority: 101
direction: Inbound
# Update rules on existing security group
- azure_rm_securitygroup:
resource_group: mygroup
name: mysecgroup
rules:
- name: DenySSH
protocol: TCP
destination_port_range: 22-23
access: Deny
priority: 100
direction: Inbound
- name: AllowSSHFromHome
protocol: TCP
source_address_prefix: '174.109.158.0/24'
destination_port_range: 22-23
access: Allow
priority: 102
direction: Inbound
tags:
testing: testing
delete: on-exit
# Delete security group
- azure_rm_securitygroup:
resource_group: mygroup
name: mysecgroup
state: absent
'''
RETURN = '''
state:
description: Current state of the security group.
returned: always
type: dict
sample: {
"default_rules": [
{
"access": "Allow",
"description": "Allow inbound traffic from all VMs in VNET",
"destination_address_prefix": "VirtualNetwork",
"destination_port_range": "*",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowVnetInBound",
"name": "AllowVnetInBound",
"priority": 65000,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "VirtualNetwork",
"source_port_range": "*"
},
{
"access": "Allow",
"description": "Allow inbound traffic from azure load balancer",
"destination_address_prefix": "*",
"destination_port_range": "*",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowAzureLoadBalancerInBound",
"name": "AllowAzureLoadBalancerInBound",
"priority": 65001,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "AzureLoadBalancer",
"source_port_range": "*"
},
{
"access": "Deny",
"description": "Deny all inbound traffic",
"destination_address_prefix": "*",
"destination_port_range": "*",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/DenyAllInBound",
"name": "DenyAllInBound",
"priority": 65500,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "*",
"source_port_range": "*"
},
{
"access": "Allow",
"description": "Allow outbound traffic from all VMs to all VMs in VNET",
"destination_address_prefix": "VirtualNetwork",
"destination_port_range": "*",
"direction": "Outbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowVnetOutBound",
"name": "AllowVnetOutBound",
"priority": 65000,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "VirtualNetwork",
"source_port_range": "*"
},
{
"access": "Allow",
"description": "Allow outbound traffic from all VMs to Internet",
"destination_address_prefix": "Internet",
"destination_port_range": "*",
"direction": "Outbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/AllowInternetOutBound",
"name": "AllowInternetOutBound",
"priority": 65001,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "*",
"source_port_range": "*"
},
{
"access": "Deny",
"description": "Deny all outbound traffic",
"destination_address_prefix": "*",
"destination_port_range": "*",
"direction": "Outbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/defaultSecurityRules/DenyAllOutBound",
"name": "DenyAllOutBound",
"priority": 65500,
"protocol": "*",
"provisioning_state": "Succeeded",
"source_address_prefix": "*",
"source_port_range": "*"
}
],
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup",
"location": "westus",
"name": "mysecgroup",
"network_interfaces": [],
"rules": [
{
"access": "Deny",
"description": null,
"destination_address_prefix": "*",
"destination_port_range": "22",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/securityRules/DenySSH",
"name": "DenySSH",
"priority": 100,
"protocol": "Tcp",
"provisioning_state": "Succeeded",
"source_address_prefix": "*",
"source_port_range": "*"
},
{
"access": "Allow",
"description": null,
"destination_address_prefix": "*",
"destination_port_range": "22",
"direction": "Inbound",
"etag": 'W/"edf48d56-b315-40ca-a85d-dbcb47f2da7d"',
"id": "/subscriptions/3f7e29ba-24e0-42f6-8d9c-5149a14bda37/resourceGroups/Testing/providers/Microsoft.Network/networkSecurityGroups/mysecgroup/securityRules/AllowSSH",
"name": "AllowSSH",
"priority": 101,
"protocol": "Tcp",
"provisioning_state": "Succeeded",
"source_address_prefix": "174.109.158.0/24",
"source_port_range": "*"
}
],
"subnets": [],
"tags": {
"delete": "on-exit",
"foo": "bar",
"testing": "testing"
},
"type": "Microsoft.Network/networkSecurityGroups"
}
''' # NOQA
from ansible.module_utils.basic import *
from ansible.module_utils.azure_rm_common import *
try:
from msrestazure.azure_exceptions import CloudError
from azure.common import AzureHttpError
from azure.mgmt.network.models import NetworkSecurityGroup, SecurityRule
from azure.mgmt.network.models.network_management_client_enums import (SecurityRuleAccess,
SecurityRuleDirection,
SecurityRuleProtocol)
except ImportError:
# This is handled in azure_rm_common
pass
def validate_rule(rule, rule_type=None):
'''
Apply defaults to a rule dictionary and check that all values are valid.
:param rule: rule dict
:param rule_type: Set to 'default' if the rule is part of the default set of rules.
:return: None
'''
if not rule.get('name'):
raise Exception("Rule name value is required.")
priority = rule.get('priority', None)
if not priority:
raise Exception("Rule priority is required.")
if not isinstance(priority, (int, long)):
raise Exception("Rule priority attribute must be an integer.")
if rule_type != 'default' and (priority < 100 or priority > 4096):
raise Exception("Rule priority must be between 100 and 4096")
if not rule.get('access'):
rule['access'] = 'Allow'
access_names = [member.value for member in SecurityRuleAccess]
if rule['access'] not in access_names:
raise Exception("Rule access must be one of [{0}]".format(', '.join(access_names)))
if not rule.get('destination_address_prefix'):
rule['destination_address_prefix'] = '*'
if not rule.get('source_address_prefix'):
rule['source_address_prefix'] = '*'
if not rule.get('protocol'):
rule['protocol'] = '*'
protocol_names = [member.value for member in SecurityRuleProtocol]
if rule['protocol'] not in protocol_names:
raise Exception("Rule protocol must be one of [{0}]".format(', '.join(protocol_names)))
if not rule.get('direction'):
rule['direction'] = 'Inbound'
direction_names = [member.value for member in SecurityRuleDirection]
if rule['direction'] not in direction_names:
raise Exception("Rule direction must be one of [{0}]".format(', '.join(direction_names)))
if not rule.get('source_port_range'):
rule['source_port_range'] = '*'
if not rule.get('destination_port_range'):
rule['destination_port_range'] = '*'
def compare_rules(r, rule):
matched = False
changed = False
if r['name'] == rule['name']:
matched = True
if rule.get('description', None) != r['description']:
changed = True
r['description'] = rule['description']
if rule['protocol'] != r['protocol']:
changed = True
r['protocol'] = rule['protocol']
if rule['source_port_range'] != r['source_port_range']:
changed = True
r['source_port_range'] = rule['source_port_range']
if rule['destination_port_range'] != r['destination_port_range']:
changed = True
r['destination_port_range'] = rule['destination_port_range']
if rule['access'] != r['access']:
changed = True
r['access'] = rule['access']
if rule['priority'] != r['priority']:
changed = True
r['priority'] = rule['priority']
if rule['direction'] != r['direction']:
changed = True
r['direction'] = rule['direction']
return matched, changed
def create_rule_instance(rule):
'''
Create an instance of SecurityRule from a dict.
:param rule: dict
:return: SecurityRule
'''
return SecurityRule(
rule['protocol'],
rule['source_address_prefix'],
rule['destination_address_prefix'],
rule['access'],
rule['direction'],
id=rule.get('id', None),
description=rule.get('description', None),
source_port_range=rule.get('source_port_range', None),
destination_port_range=rule.get('destination_port_range', None),
priority=rule.get('priority', None),
provisioning_state=rule.get('provisioning_state', None),
name=rule.get('name', None),
etag=rule.get('etag', None)
)
def create_rule_dict_from_obj(rule):
'''
Create a dict from an instance of a SecurityRule.
:param rule: SecurityRule
:return: dict
'''
return dict(
id=rule.id,
name=rule.name,
description=rule.description,
protocol=rule.protocol,
source_port_range=rule.source_port_range,
destination_port_range=rule.destination_port_range,
source_address_prefix=rule.source_address_prefix,
destination_address_prefix=rule.destination_address_prefix,
access=rule.access,
priority=rule.priority,
direction=rule.direction,
provisioning_state=rule.provisioning_state,
etag=rule.etag
)
def create_network_security_group_dict(nsg):
results = dict(
id=nsg.id,
name=nsg.name,
type=nsg.type,
location=nsg.location,
tags=nsg.tags,
)
results['rules'] = []
if nsg.security_rules:
for rule in nsg.security_rules:
results['rules'].append(create_rule_dict_from_obj(rule))
results['default_rules'] = []
if nsg.default_security_rules:
for rule in nsg.default_security_rules:
results['default_rules'].append(create_rule_dict_from_obj(rule))
results['network_interfaces'] = []
if nsg.network_interfaces:
for interface in nsg.network_interfaces:
results['network_interfaces'].append(interface.id)
results['subnets'] = []
if nsg.subnets:
for subnet in nsg.subnets:
results['subnets'].append(subnet.id)
return results
class AzureRMSecurityGroup(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
default_rules=dict(type='list'),
location=dict(type='str'),
name=dict(type='str', required=True),
purge_default_rules=dict(type='bool', default=False),
purge_rules=dict(type='bool', default=False),
resource_group=dict(required=True, type='str'),
rules=dict(type='list'),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
self.default_rules = None
self.location = None
self.name = None
self.purge_default_rules = None
self.purge_rules = None
self.resource_group = None
self.rules = None
self.state = None
self.tags = None
self.results = dict(
changed=False,
state=dict()
)
super(AzureRMSecurityGroup, self).__init__(self.module_arg_spec,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in self.module_arg_spec.keys() + ['tags']:
setattr(self, key, kwargs[key])
changed = False
results = dict()
resource_group = self.get_resource_group(self.resource_group)
if not self.location:
# Set default location
self.location = resource_group.location
if self.rules:
for rule in self.rules:
try:
validate_rule(rule)
except Exception as exc:
self.fail("Error validating rule {0} - {1}".format(rule, str(exc)))
if self.default_rules:
for rule in self.default_rules:
try:
validate_rule(rule, 'default')
except Exception as exc:
self.fail("Error validating default rule {0} - {1}".format(rule, str(exc)))
try:
nsg = self.network_client.network_security_groups.get(self.resource_group, self.name)
results = create_network_security_group_dict(nsg)
self.log("Found security group:")
self.log(results, pretty_print=True)
self.check_provisioning_state(nsg, self.state)
if self.state == 'present':
pass
elif self.state == 'absent':
self.log("CHANGED: security group found but state is 'absent'")
changed = True
except CloudError:
if self.state == 'present':
self.log("CHANGED: security group not found and state is 'present'")
changed = True
if self.state == 'present' and not changed:
# update the security group
self.log("Update security group {0}".format(self.name))
if self.rules:
for rule in self.rules:
rule_matched = False
for r in results['rules']:
match, changed = compare_rules(r, rule)
if changed:
changed = True
if match:
rule_matched = True
if not rule_matched:
changed = True
results['rules'].append(rule)
if self.purge_rules:
new_rules = []
for rule in results['rules']:
for r in self.rules:
if rule['name'] == r['name']:
new_rules.append(rule)
results['rules'] = new_rules
if self.default_rules:
for rule in self.default_rules:
rule_matched = False
for r in results['default_rules']:
match, changed = compare_rules(r, rule)
if changed:
changed = True
if match:
rule_matched = True
if not rule_matched:
changed = True
results['default_rules'].append(rule)
if self.purge_default_rules:
new_default_rules = []
for rule in results['default_rules']:
for r in self.default_rules:
if rule['name'] == r['name']:
new_default_rules.append(rule)
results['default_rules'] = new_default_rules
update_tags, results['tags'] = self.update_tags(results['tags'])
if update_tags:
changed = True
self.results['changed'] = changed
self.results['state'] = results
if not self.check_mode:
self.results['state'] = self.create_or_update(results)
elif self.state == 'present' and changed:
# create the security group
self.log("Create security group {0}".format(self.name))
if not self.location:
self.fail("Parameter error: location required when creating a security group.")
results['name'] = self.name
results['location'] = self.location
results['rules'] = []
results['default_rules'] = []
results['tags'] = {}
if self.rules:
results['rules'] = self.rules
if self.default_rules:
results['default_rules'] = self.default_rules
if self.tags:
results['tags'] = self.tags
self.results['changed'] = changed
self.results['state'] = results
if not self.check_mode:
self.results['state'] = self.create_or_update(results)
elif self.state == 'absent' and changed:
self.log("Delete security group {0}".format(self.name))
self.results['changed'] = changed
self.results['state'] = dict()
if not self.check_mode:
self.delete()
# the delete does not actually return anything. if no exception, then we'll assume
# it worked.
self.results['state']['status'] = 'Deleted'
return self.results
def create_or_update(self, results):
parameters = NetworkSecurityGroup()
if results.get('rules'):
parameters.security_rules = []
for rule in results.get('rules'):
parameters.security_rules.append(create_rule_instance(rule))
if results.get('default_rules'):
parameters.default_security_rules = []
for rule in results.get('default_rules'):
parameters.default_security_rules.append(create_rule_instance(rule))
parameters.tags = results.get('tags')
parameters.location = results.get('location')
try:
poller = self.network_client.network_security_groups.create_or_update(self.resource_group,
self.name,
parameters)
result = self.get_poller_result(poller)
except AzureHttpError as exc:
self.fail("Error creating/upating security group {0} - {1}".format(self.name, str(exc)))
return create_network_security_group_dict(result)
def delete(self):
try:
poller = self.network_client.network_security_groups.delete(self.resource_group, self.name)
result = self.get_poller_result(poller)
except AzureHttpError as exc:
raise Exception("Error deleting security group {0} - {1}".format(self.name, str(exc)))
return result
def main():
AzureRMSecurityGroup()
if __name__ == '__main__':
main()
| gpl-3.0 |
thomasgilgenast/spqr-nonrel | django/contrib/admindocs/urls.py | 336 | 1089 | from django.conf.urls.defaults import *
from django.contrib.admindocs import views
urlpatterns = patterns('',
url('^$',
views.doc_index,
name='django-admindocs-docroot'
),
url('^bookmarklets/$',
views.bookmarklets,
name='django-admindocs-bookmarklets'
),
url('^tags/$',
views.template_tag_index,
name='django-admindocs-tags'
),
url('^filters/$',
views.template_filter_index,
name='django-admindocs-filters'
),
url('^views/$',
views.view_index,
name='django-admindocs-views-index'
),
url('^views/(?P<view>[^/]+)/$',
views.view_detail,
name='django-admindocs-views-detail'
),
url('^models/$',
views.model_index,
name='django-admindocs-models-index'
),
url('^models/(?P<app_label>[^\.]+)\.(?P<model_name>[^/]+)/$',
views.model_detail,
name='django-admindocs-models-detail'
),
url('^templates/(?P<template>.*)/$',
views.template_detail,
name='django-admindocs-templates'
),
)
| bsd-3-clause |
fastavro/fastavro | fastavro/_validation_py.py | 1 | 11047 | import array
import numbers
from collections.abc import Mapping, Sequence
from fastavro.const import INT_MAX_VALUE, INT_MIN_VALUE, LONG_MAX_VALUE, LONG_MIN_VALUE
from ._validate_common import ValidationError, ValidationErrorData
from .schema import extract_record_type, extract_logical_type, schema_name, parse_schema
from .logical_writers import LOGICAL_WRITERS
from ._schema_common import UnknownType
def validate_null(datum, **kwargs):
"""
Checks that the data value is None.
Parameters
----------
datum: Any
Data being validated
kwargs: Any
Unused kwargs
"""
return datum is None
def validate_boolean(datum, **kwargs):
"""
Check that the data value is bool instance
Parameters
----------
datum: Any
Data being validated
kwargs: Any
Unused kwargs
"""
return isinstance(datum, bool)
def validate_string(datum, **kwargs):
"""
Check that the data value is string
Parameters
----------
datum: Any
Data being validated
kwargs: Any
Unused kwargs
"""
return isinstance(datum, str)
def validate_bytes(datum, **kwargs):
"""
Check that the data value is python bytes type
Parameters
----------
datum: Any
Data being validated
kwargs: Any
Unused kwargs
"""
return isinstance(datum, (bytes, bytearray))
def validate_int(datum, **kwargs):
"""
Check that the data value is a non floating
point number with size less that Int32.
Int32 = -2147483648<=datum<=2147483647
conditional python types: int, numbers.Integral
Parameters
----------
datum: Any
Data being validated
kwargs: Any
Unused kwargs
"""
return (
isinstance(datum, (int, numbers.Integral))
and INT_MIN_VALUE <= datum <= INT_MAX_VALUE
and not isinstance(datum, bool)
)
def validate_long(datum, **kwargs):
"""
Check that the data value is a non floating
point number with size less that long64.
Int64 = -9223372036854775808 <= datum <= 9223372036854775807
conditional python types: int, numbers.Integral
:Parameters
----------
datum: Any
Data being validated
kwargs: Any
Unused kwargs
"""
return (
isinstance(datum, (int, numbers.Integral))
and LONG_MIN_VALUE <= datum <= LONG_MAX_VALUE
and not isinstance(datum, bool)
)
def validate_float(datum, **kwargs):
"""
Check that the data value is a floating
point number or double precision.
conditional python types
(int, float, numbers.Real)
Parameters
----------
datum: Any
Data being validated
kwargs: Any
Unused kwargs
"""
return isinstance(datum, (int, float, numbers.Real)) and not isinstance(datum, bool)
def validate_fixed(datum, schema, **kwargs):
"""
Check that the data value is fixed width bytes,
matching the schema['size'] exactly!
Parameters
----------
datum: Any
Data being validated
schema: dict
Schema
kwargs: Any
Unused kwargs
"""
return isinstance(datum, bytes) and len(datum) == schema["size"]
def validate_enum(datum, schema, **kwargs):
"""
Check that the data value matches one of the enum symbols.
i.e "blue" in ["red", green", "blue"]
Parameters
----------
datum: Any
Data being validated
schema: dict
Schema
kwargs: Any
Unused kwargs
"""
return datum in schema["symbols"]
def validate_array(datum, schema, named_schemas, parent_ns=None, raise_errors=True):
"""
Check that the data list values all match schema['items'].
Parameters
----------
datum: Any
Data being validated
schema: dict
Schema
parent_ns: str
parent namespace
raise_errors: bool
If true, raises ValidationError on invalid data
"""
return (
isinstance(datum, (Sequence, array.array))
and not isinstance(datum, str)
and all(
_validate(
datum=d,
schema=schema["items"],
named_schemas=named_schemas,
field=parent_ns,
raise_errors=raise_errors,
)
for d in datum
)
)
def validate_map(datum, schema, named_schemas, parent_ns=None, raise_errors=True):
"""
Check that the data is a Map(k,v)
matching values to schema['values'] type.
Parameters
----------
datum: Any
Data being validated
schema: dict
Schema
parent_ns: str
parent namespace
raise_errors: bool
If true, raises ValidationError on invalid data
"""
return (
isinstance(datum, Mapping)
and all(isinstance(k, str) for k in datum)
and all(
_validate(
datum=v,
schema=schema["values"],
named_schemas=named_schemas,
field=parent_ns,
raise_errors=raise_errors,
)
for v in datum.values()
)
)
def validate_record(datum, schema, named_schemas, parent_ns=None, raise_errors=True):
"""
Check that the data is a Mapping type with all schema defined fields
validated as True.
Parameters
----------
datum: Any
Data being validated
schema: dict
Schema
parent_ns: str
parent namespace
raise_errors: bool
If true, raises ValidationError on invalid data
"""
_, fullname = schema_name(schema, parent_ns)
return (
isinstance(datum, Mapping)
and not ("-type" in datum and datum["-type"] != fullname)
and all(
_validate(
datum=datum.get(f["name"], f.get("default")),
schema=f["type"],
named_schemas=named_schemas,
field=f"{fullname}.{f['name']}",
raise_errors=raise_errors,
)
for f in schema["fields"]
)
)
def validate_union(datum, schema, named_schemas, parent_ns=None, raise_errors=True):
"""
Check that the data is a list type with possible options to
validate as True.
Parameters
----------
datum: Any
Data being validated
schema: dict
Schema
parent_ns: str
parent namespace
raise_errors: bool
If true, raises ValidationError on invalid data
"""
if isinstance(datum, tuple):
(name, datum) = datum
for candidate in schema:
if extract_record_type(candidate) == "record":
schema_name = candidate["name"]
else:
schema_name = candidate
if schema_name == name:
return _validate(
datum,
schema=candidate,
named_schemas=named_schemas,
field=parent_ns,
raise_errors=raise_errors,
)
else:
return False
errors = []
for s in schema:
try:
ret = _validate(
datum,
schema=s,
named_schemas=named_schemas,
field=parent_ns,
raise_errors=raise_errors,
)
if ret:
# We exit on the first passing type in Unions
return True
except ValidationError as e:
errors.extend(e.errors)
if raise_errors:
raise ValidationError(*errors)
return False
VALIDATORS = {
"null": validate_null,
"boolean": validate_boolean,
"string": validate_string,
"int": validate_int,
"long": validate_long,
"float": validate_float,
"double": validate_float,
"bytes": validate_bytes,
"fixed": validate_fixed,
"enum": validate_enum,
"array": validate_array,
"map": validate_map,
"union": validate_union,
"error_union": validate_union,
"record": validate_record,
"error": validate_record,
"request": validate_record,
}
def _validate(datum, schema, named_schemas, field=None, raise_errors=True):
# This function expects the schema to already be parsed
record_type = extract_record_type(schema)
result = None
logical_type = extract_logical_type(schema)
if logical_type:
prepare = LOGICAL_WRITERS.get(logical_type)
if prepare:
datum = prepare(datum, schema)
validator = VALIDATORS.get(record_type)
if validator:
result = validator(
datum,
schema=schema,
named_schemas=named_schemas,
parent_ns=field,
raise_errors=raise_errors,
)
elif record_type in named_schemas:
result = _validate(
datum,
schema=named_schemas[record_type],
named_schemas=named_schemas,
field=field,
raise_errors=raise_errors,
)
else:
raise UnknownType(record_type)
if raise_errors and result is False:
raise ValidationError(ValidationErrorData(datum, schema, field))
return result
def validate(datum, schema, field=None, raise_errors=True):
"""
Determine if a python datum is an instance of a schema.
Parameters
----------
datum: Any
Data being validated
schema: dict
Schema
field: str, optional
Record field being validated
raise_errors: bool, optional
If true, errors are raised for invalid data. If false, a simple
True (valid) or False (invalid) result is returned
Example::
from fastavro.validation import validate
schema = {...}
record = {...}
validate(record, schema)
"""
named_schemas = {}
parsed_schema = parse_schema(schema, named_schemas)
return _validate(datum, parsed_schema, named_schemas, field, raise_errors)
def validate_many(records, schema, raise_errors=True):
"""
Validate a list of data!
Parameters
----------
records: iterable
List of records to validate
schema: dict
Schema
raise_errors: bool, optional
If true, errors are raised for invalid data. If false, a simple
True (valid) or False (invalid) result is returned
Example::
from fastavro.validation import validate_many
schema = {...}
records = [{...}, {...}, ...]
validate_many(records, schema)
"""
named_schemas = {}
parsed_schema = parse_schema(schema, named_schemas)
errors = []
results = []
for record in records:
try:
results.append(
_validate(
record, parsed_schema, named_schemas, raise_errors=raise_errors
)
)
except ValidationError as e:
errors.extend(e.errors)
if raise_errors and errors:
raise ValidationError(*errors)
return all(results)
| mit |
tamland/xbmc | lib/libUPnP/Neptune/Extras/Tools/Logging/NeptuneLogConsole.py | 22 | 2839 | #!/usr/bin/env python
from socket import *
from optparse import OptionParser
UDP_ADDR = "0.0.0.0"
UDP_PORT = 7724
BUFFER_SIZE = 65536
#HEADER_KEYS = ['Logger', 'Level', 'Source-File', 'Source-Function', 'Source-Line', 'TimeStamp']
HEADER_KEYS = {
'mini': ('Level'),
'standard': ('Logger', 'Level', 'Source-Function'),
'long': ('Logger', 'Level', 'Source-File', 'Source-Line', 'Source-Function'),
'all': ('Logger', 'Level', 'Source-File', 'Source-Line', 'Source-Function', 'TimeStamp'),
'custom': ()
}
Senders = {}
class LogRecord:
def __init__(self, data):
offset = 0
self.headers = {}
for line in data.split("\r\n"):
offset += len(line)+2
if ':' not in line: break
key,value=line.split(":",1)
self.headers[key] = value.strip()
self.body = data[offset:]
def __getitem__(self, index):
return self.headers[index]
def format(self, sender_index, keys):
parts = ['['+str(sender_index)+']']
if 'Level' in keys:
parts.append('['+self.headers['Level']+']')
if 'Logger' in keys:
parts.append(self.headers['Logger'])
if 'Source-File' in keys:
if 'Source-Line' in keys:
parts.append(self.headers['Source-File']+':'+self.headers['Source-Line'])
else:
parts.append(self.headers['Source-File'])
if 'TimeStamp' in keys:
parts.append(self.headers['TimeStamp'])
if 'Source-Function' in keys:
parts.append(self.headers['Source-Function'])
parts.append(self.body)
return ' '.join(parts)
class Listener:
def __init__(self, format='standard', port=UDP_PORT):
self.socket = socket(AF_INET,SOCK_DGRAM)
self.socket.bind((UDP_ADDR, port))
self.format_keys = HEADER_KEYS[format]
def listen(self):
while True:
data,addr = self.socket.recvfrom(BUFFER_SIZE)
sender_index = len(Senders.keys())
if addr in Senders:
sender_index = Senders[addr]
else:
print "### NEW SENDER:", addr
Senders[addr] = sender_index
record = LogRecord(data)
print record.format(sender_index, self.format_keys)
### main
parser = OptionParser(usage="%prog [options]")
parser.add_option("-p", "--port", dest="port", help="port number to listen on", type="int", default=UDP_PORT)
parser.add_option("-f", "--format", dest="format", help="log format (mini, standard, long, or all)", choices=('mini', 'standard', 'long', 'all'), default='standard')
(options, args) = parser.parse_args()
print "Listening on port", options.port
l = Listener(format=options.format, port=options.port)
l.listen()
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.