repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
vitan/hue | desktop/core/ext-py/Django-1.6.10/django/contrib/messages/tests/base.py | 104 | 14243 | from django import http
from django.conf import settings, global_settings
from django.contrib.messages import constants, utils, get_level, set_level
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.storage import default_storage, base
from django.contrib.messages.storage.base import Message
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from django.utils.translation import ugettext_lazy
from django.utils.unittest import skipIf
def skipUnlessAuthIsInstalled(func):
return skipIf(
'django.contrib.auth' not in settings.INSTALLED_APPS,
"django.contrib.auth isn't installed")(func)
def add_level_messages(storage):
"""
Adds 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class override_settings_tags(override_settings):
def enable(self):
super(override_settings_tags, self).enable()
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, we need to update that constant too.
self.old_level_tags = base.LEVEL_TAGS
base.LEVEL_TAGS = utils.get_level_tags()
def disable(self):
super(override_settings_tags, self).disable()
base.LEVEL_TAGS = self.old_level_tags
class BaseTests(object):
storage_class = default_storage
urls = 'django.contrib.messages.tests.urls'
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self.settings_override = override_settings_tags(
TEMPLATE_DIRS = (),
TEMPLATE_CONTEXT_PROCESSORS = global_settings.TEMPLATE_CONTEXT_PROCESSORS,
MESSAGE_TAGS = '',
MESSAGE_STORAGE = '%s.%s' % (self.storage_class.__module__,
self.storage_class.__name__),
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer',
)
self.settings_override.enable()
def tearDown(self):
self.settings_override.disable()
def get_request(self):
return http.HttpRequest()
def get_response(self):
return http.HttpResponse()
def get_storage(self, data=None):
"""
Returns the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assertTrue(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, ugettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, tests that messages are properly
stored and then retrieved across the full request/redirect/response
cycle.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
messages = [Message(self.levels[level], msg) for msg in
data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_with_template_response(self):
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('django.contrib.messages.tests.urls.show_template_response')
for level in self.levels.keys():
add_url = reverse('django.contrib.messages.tests.urls.add_template_response',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
for msg in data['messages']:
self.assertContains(response, msg)
# there shouldn't be any messages on second GET request
response = self.client.get(show_url)
for msg in data['messages']:
self.assertNotContains(response, msg)
@override_settings(MESSAGE_LEVEL=constants.DEBUG)
def test_multiple_posts(self):
"""
Tests that messages persist properly when multiple POSTs are made
before a GET.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend([Message(self.levels[level], msg) for msg in
data['messages']])
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertTrue('messages' in response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
@override_settings(
INSTALLED_APPS=filter(
lambda app:app!='django.contrib.messages', settings.INSTALLED_APPS),
MIDDLEWARE_CLASSES=filter(
lambda m:'MessageMiddleware' not in m, settings.MIDDLEWARE_CLASSES),
TEMPLATE_CONTEXT_PROCESSORS=filter(
lambda p:'context_processors.messages' not in p,
settings.TEMPLATE_CONTEXT_PROCESSORS),
MESSAGE_LEVEL=constants.DEBUG
)
def test_middleware_disabled(self):
"""
Tests that, when the middleware is disabled, an exception is raised
when one attempts to store a message.
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
self.assertRaises(MessageFailure, self.client.post, add_url,
data, follow=True)
@override_settings(
INSTALLED_APPS=filter(
lambda app:app!='django.contrib.messages', settings.INSTALLED_APPS),
MIDDLEWARE_CLASSES=filter(
lambda m:'MessageMiddleware' not in m, settings.MIDDLEWARE_CLASSES),
TEMPLATE_CONTEXT_PROCESSORS=filter(
lambda p:'context_processors.messages' not in p,
settings.TEMPLATE_CONTEXT_PROCESSORS),
MESSAGE_LEVEL=constants.DEBUG
)
def test_middleware_disabled_fail_silently(self):
"""
Tests that, when the middleware is disabled, an exception is not
raised if 'fail_silently' = True
"""
data = {
'messages': ['Test message %d' % x for x in range(5)],
'fail_silently': True,
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertFalse('messages' in response.context)
def stored_messages_count(self, storage, response):
"""
Returns the number of messages being stored after a
``storage.update()`` call.
"""
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2',
extra_tags='tag')])
def test_existing_read(self):
"""
Tests that reading the existing storage doesn't cause the data to be
lost.
"""
storage = self.get_existing_storage()
self.assertFalse(storage.used)
# After iterating the storage engine directly, the used flag is set.
data = list(storage)
self.assertTrue(storage.used)
# The data does not disappear because it has been iterated.
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assertTrue(storage.added_new)
def test_default_level(self):
# get_level works even with no storage on the request.
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
# get_level returns the default level if it hasn't been set.
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assertTrue(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
@override_settings(MESSAGE_LEVEL=29)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', '', 'extra-tag debug', 'warning', 'error',
'success'])
@override_settings_tags(MESSAGE_TAGS={
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
}
)
def test_custom_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', 'custom', 'extra-tag', '', 'bad', 'success'])
| apache-2.0 |
ContextLogic/luigi | luigi/contrib/ecs.py | 17 | 6185 | # -*- coding: utf-8 -*-
#
# Copyright 2015 Outlier Bio, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
EC2 Container Service wrapper for Luigi
From the AWS website:
Amazon EC2 Container Service (ECS) is a highly scalable, high performance
container management service that supports Docker containers and allows you
to easily run applications on a managed cluster of Amazon EC2 instances.
To use ECS, you create a taskDefinition_ JSON that defines the `docker run`_
command for one or more containers in a task or service, and then submit this
JSON to the API to run the task.
This `boto3-powered`_ wrapper allows you to create Luigi Tasks to submit ECS
``taskDefinition`` s. You can either pass a dict (mapping directly to the
``taskDefinition`` JSON) OR an Amazon Resource Name (arn) for a previously
registered ``taskDefinition``.
Requires:
- boto3 package
- Amazon AWS credentials discoverable by boto3 (e.g., by using ``aws configure``
from awscli_)
- A running ECS cluster (see `ECS Get Started`_)
Written and maintained by Jake Feala (@jfeala) for Outlier Bio (@outlierbio)
.. _`docker run`: https://docs.docker.com/reference/commandline/run
.. _taskDefinition: http://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_defintions.html
.. _`boto3-powered`: https://boto3.readthedocs.io
.. _awscli: https://aws.amazon.com/cli
.. _`ECS Get Started`: http://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_GetStarted.html
"""
import time
import logging
import luigi
logger = logging.getLogger('luigi-interface')
try:
import boto3
client = boto3.client('ecs')
except ImportError:
logger.warning('boto3 is not installed. ECSTasks require boto3')
POLL_TIME = 2
def _get_task_statuses(task_ids):
"""
Retrieve task statuses from ECS API
Returns list of {RUNNING|PENDING|STOPPED} for each id in task_ids
"""
response = client.describe_tasks(tasks=task_ids)
# Error checking
if response['failures'] != []:
raise Exception('There were some failures:\n{0}'.format(
response['failures']))
status_code = response['ResponseMetadata']['HTTPStatusCode']
if status_code != 200:
msg = 'Task status request received status code {0}:\n{1}'
raise Exception(msg.format(status_code, response))
return [t['lastStatus'] for t in response['tasks']]
def _track_tasks(task_ids):
"""Poll task status until STOPPED"""
while True:
statuses = _get_task_statuses(task_ids)
if all([status == 'STOPPED' for status in statuses]):
logger.info('ECS tasks {0} STOPPED'.format(','.join(task_ids)))
break
time.sleep(POLL_TIME)
logger.debug('ECS task status for tasks {0}: {1}'.format(
','.join(task_ids), status))
class ECSTask(luigi.Task):
"""
Base class for an Amazon EC2 Container Service Task
Amazon ECS requires you to register "tasks", which are JSON descriptions
for how to issue the ``docker run`` command. This Luigi Task can either
run a pre-registered ECS taskDefinition, OR register the task on the fly
from a Python dict.
:param task_def_arn: pre-registered task definition ARN (Amazon Resource
Name), of the form::
arn:aws:ecs:<region>:<user_id>:task-definition/<family>:<tag>
:param task_def: dict describing task in taskDefinition JSON format, for
example::
task_def = {
'family': 'hello-world',
'volumes': [],
'containerDefinitions': [
{
'memory': 1,
'essential': True,
'name': 'hello-world',
'image': 'ubuntu',
'command': ['/bin/echo', 'hello world']
}
]
}
"""
task_def_arn = luigi.Parameter(default=None)
task_def = luigi.Parameter(default=None)
@property
def ecs_task_ids(self):
"""Expose the ECS task ID"""
if hasattr(self, '_task_ids'):
return self._task_ids
@property
def command(self):
"""
Command passed to the containers
Override to return list of dicts with keys 'name' and 'command',
describing the container names and commands to pass to the container.
Directly corresponds to the `overrides` parameter of runTask API. For
example::
[
{
'name': 'myContainer',
'command': ['/bin/sleep', '60']
}
]
"""
pass
def run(self):
if (not self.task_def and not self.task_def_arn) or \
(self.task_def and self.task_def_arn):
raise ValueError(('Either (but not both) a task_def (dict) or'
'task_def_arn (string) must be assigned'))
if not self.task_def_arn:
# Register the task and get assigned taskDefinition ID (arn)
response = client.register_task_definition(**self.task_def)
self.task_def_arn = response['taskDefinition']['taskDefinitionArn']
# Submit the task to AWS ECS and get assigned task ID
# (list containing 1 string)
if self.command:
overrides = {'containerOverrides': self.command}
else:
overrides = {}
response = client.run_task(taskDefinition=self.task_def_arn,
overrides=overrides)
self._task_ids = [task['taskArn'] for task in response['tasks']]
# Wait on task completion
_track_tasks(self._task_ids)
| apache-2.0 |
amghost/myblog | node_modules/pygmentize-bundled/vendor/pygments/build-3.3/pygments/cmdline.py | 95 | 13621 | # -*- coding: utf-8 -*-
"""
pygments.cmdline
~~~~~~~~~~~~~~~~
Command line interface.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import sys
import getopt
from textwrap import dedent
from pygments import __version__, highlight
from pygments.util import ClassNotFound, OptionError, docstring_headline
from pygments.lexers import get_all_lexers, get_lexer_by_name, get_lexer_for_filename, \
find_lexer_class, guess_lexer, TextLexer
from pygments.formatters import get_all_formatters, get_formatter_by_name, \
get_formatter_for_filename, find_formatter_class, \
TerminalFormatter # pylint:disable-msg=E0611
from pygments.filters import get_all_filters, find_filter_class
from pygments.styles import get_all_styles, get_style_by_name
USAGE = """\
Usage: %s [-l <lexer> | -g] [-F <filter>[:<options>]] [-f <formatter>]
[-O <options>] [-P <option=value>] [-o <outfile>] [<infile>]
%s -S <style> -f <formatter> [-a <arg>] [-O <options>] [-P <option=value>]
%s -L [<which> ...]
%s -N <filename>
%s -H <type> <name>
%s -h | -V
Highlight the input file and write the result to <outfile>.
If no input file is given, use stdin, if -o is not given, use stdout.
<lexer> is a lexer name (query all lexer names with -L). If -l is not
given, the lexer is guessed from the extension of the input file name
(this obviously doesn't work if the input is stdin). If -g is passed,
attempt to guess the lexer from the file contents, or pass through as
plain text if this fails (this can work for stdin).
Likewise, <formatter> is a formatter name, and will be guessed from
the extension of the output file name. If no output file is given,
the terminal formatter will be used by default.
With the -O option, you can give the lexer and formatter a comma-
separated list of options, e.g. ``-O bg=light,python=cool``.
The -P option adds lexer and formatter options like the -O option, but
you can only give one option per -P. That way, the option value may
contain commas and equals signs, which it can't with -O, e.g.
``-P "heading=Pygments, the Python highlighter".
With the -F option, you can add filters to the token stream, you can
give options in the same way as for -O after a colon (note: there must
not be spaces around the colon).
The -O, -P and -F options can be given multiple times.
With the -S option, print out style definitions for style <style>
for formatter <formatter>. The argument given by -a is formatter
dependent.
The -L option lists lexers, formatters, styles or filters -- set
`which` to the thing you want to list (e.g. "styles"), or omit it to
list everything.
The -N option guesses and prints out a lexer name based solely on
the given filename. It does not take input or highlight anything.
If no specific lexer can be determined "text" is returned.
The -H option prints detailed help for the object <name> of type <type>,
where <type> is one of "lexer", "formatter" or "filter".
The -h option prints this help.
The -V option prints the package version.
"""
def _parse_options(o_strs):
opts = {}
if not o_strs:
return opts
for o_str in o_strs:
if not o_str:
continue
o_args = o_str.split(',')
for o_arg in o_args:
o_arg = o_arg.strip()
try:
o_key, o_val = o_arg.split('=')
o_key = o_key.strip()
o_val = o_val.strip()
except ValueError:
opts[o_arg] = True
else:
opts[o_key] = o_val
return opts
def _parse_filters(f_strs):
filters = []
if not f_strs:
return filters
for f_str in f_strs:
if ':' in f_str:
fname, fopts = f_str.split(':', 1)
filters.append((fname, _parse_options([fopts])))
else:
filters.append((f_str, {}))
return filters
def _print_help(what, name):
try:
if what == 'lexer':
cls = find_lexer_class(name)
print("Help on the %s lexer:" % cls.name)
print(dedent(cls.__doc__))
elif what == 'formatter':
cls = find_formatter_class(name)
print("Help on the %s formatter:" % cls.name)
print(dedent(cls.__doc__))
elif what == 'filter':
cls = find_filter_class(name)
print("Help on the %s filter:" % name)
print(dedent(cls.__doc__))
except AttributeError:
print("%s not found!" % what, file=sys.stderr)
def _print_list(what):
if what == 'lexer':
print()
print("Lexers:")
print("~~~~~~~")
info = []
for fullname, names, exts, _ in get_all_lexers():
tup = (', '.join(names)+':', fullname,
exts and '(filenames ' + ', '.join(exts) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print(('* %s\n %s %s') % i)
elif what == 'formatter':
print()
print("Formatters:")
print("~~~~~~~~~~~")
info = []
for cls in get_all_formatters():
doc = docstring_headline(cls)
tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and
'(filenames ' + ', '.join(cls.filenames) + ')' or '')
info.append(tup)
info.sort()
for i in info:
print(('* %s\n %s %s') % i)
elif what == 'filter':
print()
print("Filters:")
print("~~~~~~~~")
for name in get_all_filters():
cls = find_filter_class(name)
print("* " + name + ':')
print(" %s" % docstring_headline(cls))
elif what == 'style':
print()
print("Styles:")
print("~~~~~~~")
for name in get_all_styles():
cls = get_style_by_name(name)
print("* " + name + ':')
print(" %s" % docstring_headline(cls))
def main(args=sys.argv):
"""
Main command line entry point.
"""
# pylint: disable-msg=R0911,R0912,R0915
usage = USAGE % ((args[0],) * 6)
if sys.platform in ['win32', 'cygwin']:
try:
# Provide coloring under Windows, if possible
import colorama
colorama.init()
except ImportError:
pass
try:
popts, args = getopt.getopt(args[1:], "l:f:F:o:O:P:LS:a:N:hVHg")
except getopt.GetoptError as err:
print(usage, file=sys.stderr)
return 2
opts = {}
O_opts = []
P_opts = []
F_opts = []
for opt, arg in popts:
if opt == '-O':
O_opts.append(arg)
elif opt == '-P':
P_opts.append(arg)
elif opt == '-F':
F_opts.append(arg)
opts[opt] = arg
if not opts and not args:
print(usage)
return 0
if opts.pop('-h', None) is not None:
print(usage)
return 0
if opts.pop('-V', None) is not None:
print('Pygments version %s, (c) 2006-2013 by Georg Brandl.' % __version__)
return 0
# handle ``pygmentize -L``
L_opt = opts.pop('-L', None)
if L_opt is not None:
if opts:
print(usage, file=sys.stderr)
return 2
# print version
main(['', '-V'])
if not args:
args = ['lexer', 'formatter', 'filter', 'style']
for arg in args:
_print_list(arg.rstrip('s'))
return 0
# handle ``pygmentize -H``
H_opt = opts.pop('-H', None)
if H_opt is not None:
if opts or len(args) != 2:
print(usage, file=sys.stderr)
return 2
what, name = args
if what not in ('lexer', 'formatter', 'filter'):
print(usage, file=sys.stderr)
return 2
_print_help(what, name)
return 0
# parse -O options
parsed_opts = _parse_options(O_opts)
opts.pop('-O', None)
# parse -P options
for p_opt in P_opts:
try:
name, value = p_opt.split('=', 1)
except ValueError:
parsed_opts[p_opt] = True
else:
parsed_opts[name] = value
opts.pop('-P', None)
# handle ``pygmentize -N``
infn = opts.pop('-N', None)
if infn is not None:
try:
lexer = get_lexer_for_filename(infn, **parsed_opts)
except ClassNotFound as err:
lexer = TextLexer()
except OptionError as err:
print('Error:', err, file=sys.stderr)
return 1
print(lexer.aliases[0])
return 0
# handle ``pygmentize -S``
S_opt = opts.pop('-S', None)
a_opt = opts.pop('-a', None)
if S_opt is not None:
f_opt = opts.pop('-f', None)
if not f_opt:
print(usage, file=sys.stderr)
return 2
if opts or args:
print(usage, file=sys.stderr)
return 2
try:
parsed_opts['style'] = S_opt
fmter = get_formatter_by_name(f_opt, **parsed_opts)
except ClassNotFound as err:
print(err, file=sys.stderr)
return 1
arg = a_opt or ''
try:
print(fmter.get_style_defs(arg))
except Exception as err:
print('Error:', err, file=sys.stderr)
return 1
return 0
# if no -S is given, -a is not allowed
if a_opt is not None:
print(usage, file=sys.stderr)
return 2
# parse -F options
F_opts = _parse_filters(F_opts)
opts.pop('-F', None)
# select formatter
outfn = opts.pop('-o', None)
fmter = opts.pop('-f', None)
if fmter:
try:
fmter = get_formatter_by_name(fmter, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
if outfn:
if not fmter:
try:
fmter = get_formatter_for_filename(outfn, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
try:
outfile = open(outfn, 'wb')
except Exception as err:
print('Error: cannot open outfile:', err, file=sys.stderr)
return 1
else:
if not fmter:
fmter = TerminalFormatter(**parsed_opts)
outfile = sys.stdout
# select lexer
lexer = opts.pop('-l', None)
if lexer:
try:
lexer = get_lexer_by_name(lexer, **parsed_opts)
except (OptionError, ClassNotFound) as err:
print('Error:', err, file=sys.stderr)
return 1
if args:
if len(args) > 1:
print(usage, file=sys.stderr)
return 2
infn = args[0]
try:
code = open(infn, 'rb').read()
except Exception as err:
print('Error: cannot read infile:', err, file=sys.stderr)
return 1
if not lexer:
try:
lexer = get_lexer_for_filename(infn, code, **parsed_opts)
except ClassNotFound as err:
if '-g' in opts:
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
else:
print('Error:', err, file=sys.stderr)
return 1
except OptionError as err:
print('Error:', err, file=sys.stderr)
return 1
else:
if '-g' in opts:
code = sys.stdin.read()
try:
lexer = guess_lexer(code, **parsed_opts)
except ClassNotFound:
lexer = TextLexer(**parsed_opts)
elif not lexer:
print('Error: no lexer name given and reading ' + \
'from stdin (try using -g or -l <lexer>)', file=sys.stderr)
return 2
else:
code = sys.stdin.read()
# No encoding given? Use latin1 if output file given,
# stdin/stdout encoding otherwise.
# (This is a compromise, I'm not too happy with it...)
if 'encoding' not in parsed_opts and 'outencoding' not in parsed_opts:
if outfn:
# encoding pass-through
fmter.encoding = 'latin1'
else:
if sys.version_info < (3,):
# use terminal encoding; Python 3's terminals already do that
lexer.encoding = getattr(sys.stdin, 'encoding',
None) or 'ascii'
fmter.encoding = getattr(sys.stdout, 'encoding',
None) or 'ascii'
elif not outfn and sys.version_info > (3,):
# output to terminal with encoding -> use .buffer
outfile = sys.stdout.buffer
# ... and do it!
try:
# process filters
for fname, fopts in F_opts:
lexer.add_filter(fname, **fopts)
highlight(code, lexer, fmter, outfile)
except Exception as err:
import traceback
info = traceback.format_exception(*sys.exc_info())
msg = info[-1].strip()
if len(info) >= 3:
# extract relevant file and position info
msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:]
print(file=sys.stderr)
print('*** Error while highlighting:', file=sys.stderr)
print(msg, file=sys.stderr)
return 1
return 0
| mit |
RadioFreeAsia/RDacity | lib-src/lv2/suil/waflib/Tools/d_scan.py | 292 | 3029 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import re
from waflib import Utils,Logs
def filter_comments(filename):
txt=Utils.readf(filename)
i=0
buf=[]
max=len(txt)
begin=0
while i<max:
c=txt[i]
if c=='"'or c=="'":
buf.append(txt[begin:i])
delim=c
i+=1
while i<max:
c=txt[i]
if c==delim:break
elif c=='\\':
i+=1
i+=1
i+=1
begin=i
elif c=='/':
buf.append(txt[begin:i])
i+=1
if i==max:break
c=txt[i]
if c=='+':
i+=1
nesting=1
c=None
while i<max:
prev=c
c=txt[i]
if prev=='/'and c=='+':
nesting+=1
c=None
elif prev=='+'and c=='/':
nesting-=1
if nesting==0:break
c=None
i+=1
elif c=='*':
i+=1
c=None
while i<max:
prev=c
c=txt[i]
if prev=='*'and c=='/':break
i+=1
elif c=='/':
i+=1
while i<max and txt[i]!='\n':
i+=1
else:
begin=i-1
continue
i+=1
begin=i
buf.append(' ')
else:
i+=1
buf.append(txt[begin:])
return buf
class d_parser(object):
def __init__(self,env,incpaths):
self.allnames=[]
self.re_module=re.compile("module\s+([^;]+)")
self.re_import=re.compile("import\s+([^;]+)")
self.re_import_bindings=re.compile("([^:]+):(.*)")
self.re_import_alias=re.compile("[^=]+=(.+)")
self.env=env
self.nodes=[]
self.names=[]
self.incpaths=incpaths
def tryfind(self,filename):
found=0
for n in self.incpaths:
found=n.find_resource(filename.replace('.','/')+'.d')
if found:
self.nodes.append(found)
self.waiting.append(found)
break
if not found:
if not filename in self.names:
self.names.append(filename)
def get_strings(self,code):
self.module=''
lst=[]
mod_name=self.re_module.search(code)
if mod_name:
self.module=re.sub('\s+','',mod_name.group(1))
import_iterator=self.re_import.finditer(code)
if import_iterator:
for import_match in import_iterator:
import_match_str=re.sub('\s+','',import_match.group(1))
bindings_match=self.re_import_bindings.match(import_match_str)
if bindings_match:
import_match_str=bindings_match.group(1)
matches=import_match_str.split(',')
for match in matches:
alias_match=self.re_import_alias.match(match)
if alias_match:
match=alias_match.group(1)
lst.append(match)
return lst
def start(self,node):
self.waiting=[node]
while self.waiting:
nd=self.waiting.pop(0)
self.iter(nd)
def iter(self,node):
path=node.abspath()
code="".join(filter_comments(path))
names=self.get_strings(code)
for x in names:
if x in self.allnames:continue
self.allnames.append(x)
self.tryfind(x)
def scan(self):
env=self.env
gruik=d_parser(env,self.generator.includes_nodes)
node=self.inputs[0]
gruik.start(node)
nodes=gruik.nodes
names=gruik.names
if Logs.verbose:
Logs.debug('deps: deps for %s: %r; unresolved %r'%(str(node),nodes,names))
return(nodes,names)
| gpl-2.0 |
jakereps/qiime2 | qiime2/core/archive/format/tests/test_v0.py | 2 | 2307 | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2021, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import unittest
import tempfile
import uuid as _uuid
import pathlib
import io
from qiime2.core.testing.type import IntSequence1
from qiime2.core.testing.format import IntSequenceDirectoryFormat
from qiime2.core.archive.archiver import _ZipArchive, ArchiveRecord
from qiime2.core.archive.format.v0 import ArchiveFormat
class TestArchiveFormat(unittest.TestCase):
def setUp(self):
prefix = "qiime2-test-temp-"
self.temp_dir = tempfile.TemporaryDirectory(prefix=prefix)
def test_format_metadata(self):
uuid = _uuid.uuid4()
with io.StringIO() as fh:
ArchiveFormat._format_metadata(fh, uuid, IntSequence1,
IntSequenceDirectoryFormat)
result = fh.getvalue()
self.assertEqual(result,
"uuid: %s\ntype: IntSequence1\nformat: "
"IntSequenceDirectoryFormat\n" % uuid)
def test_format_metadata_none(self):
uuid = _uuid.uuid4()
with io.StringIO() as fh:
ArchiveFormat._format_metadata(fh, uuid, IntSequence1, None)
result = fh.getvalue()
self.assertEqual(result,
"uuid: %s\ntype: IntSequence1\nformat: null\n" % uuid)
def test_load_root_dir_metadata_uuid_mismatch(self):
fp = pathlib.Path(self.temp_dir.name) / 'root-dir-metadata-mismatch'
fp.mkdir()
r = _ZipArchive.setup(fp, 'foo', 'bar')
fake = ArchiveRecord(r.root, r.version_fp,
_uuid.uuid4(), # This will trick the format
r.version, r.framework_version)
ArchiveFormat.write(fake, IntSequence1, IntSequenceDirectoryFormat,
lambda x: None, None)
with self.assertRaisesRegex(
ValueError, 'root directory must match UUID.*metadata'):
ArchiveFormat(r)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
Endika/OpenUpgrade | addons/pad/py_etherpad/__init__.py | 505 | 7804 | """Module to talk to EtherpadLite API."""
import json
import urllib
import urllib2
class EtherpadLiteClient:
"""Client to talk to EtherpadLite API."""
API_VERSION = 1 # TODO probably 1.1 sometime soon
CODE_OK = 0
CODE_INVALID_PARAMETERS = 1
CODE_INTERNAL_ERROR = 2
CODE_INVALID_FUNCTION = 3
CODE_INVALID_API_KEY = 4
TIMEOUT = 20
apiKey = ""
baseUrl = "http://localhost:9001/api"
def __init__(self, apiKey=None, baseUrl=None):
if apiKey:
self.apiKey = apiKey
if baseUrl:
self.baseUrl = baseUrl
def call(self, function, arguments=None):
"""Create a dictionary of all parameters"""
url = '%s/%d/%s' % (self.baseUrl, self.API_VERSION, function)
params = arguments or {}
params.update({'apikey': self.apiKey})
data = urllib.urlencode(params, True)
try:
opener = urllib2.build_opener()
request = urllib2.Request(url=url, data=data)
response = opener.open(request, timeout=self.TIMEOUT)
result = response.read()
response.close()
except urllib2.HTTPError:
raise
result = json.loads(result)
if result is None:
raise ValueError("JSON response could not be decoded")
return self.handleResult(result)
def handleResult(self, result):
"""Handle API call result"""
if 'code' not in result:
raise Exception("API response has no code")
if 'message' not in result:
raise Exception("API response has no message")
if 'data' not in result:
result['data'] = None
if result['code'] == self.CODE_OK:
return result['data']
elif result['code'] == self.CODE_INVALID_PARAMETERS or result['code'] == self.CODE_INVALID_API_KEY:
raise ValueError(result['message'])
elif result['code'] == self.CODE_INTERNAL_ERROR:
raise Exception(result['message'])
elif result['code'] == self.CODE_INVALID_FUNCTION:
raise Exception(result['message'])
else:
raise Exception("An unexpected error occurred whilst handling the response")
# GROUPS
# Pads can belong to a group. There will always be public pads that do not belong to a group (or we give this group the id 0)
def createGroup(self):
"""creates a new group"""
return self.call("createGroup")
def createGroupIfNotExistsFor(self, groupMapper):
"""this functions helps you to map your application group ids to etherpad lite group ids"""
return self.call("createGroupIfNotExistsFor", {
"groupMapper": groupMapper
})
def deleteGroup(self, groupID):
"""deletes a group"""
return self.call("deleteGroup", {
"groupID": groupID
})
def listPads(self, groupID):
"""returns all pads of this group"""
return self.call("listPads", {
"groupID": groupID
})
def createGroupPad(self, groupID, padName, text=''):
"""creates a new pad in this group"""
params = {
"groupID": groupID,
"padName": padName,
}
if text:
params['text'] = text
return self.call("createGroupPad", params)
# AUTHORS
# Theses authors are bind to the attributes the users choose (color and name).
def createAuthor(self, name=''):
"""creates a new author"""
params = {}
if name:
params['name'] = name
return self.call("createAuthor", params)
def createAuthorIfNotExistsFor(self, authorMapper, name=''):
"""this functions helps you to map your application author ids to etherpad lite author ids"""
params = {
'authorMapper': authorMapper
}
if name:
params['name'] = name
return self.call("createAuthorIfNotExistsFor", params)
# SESSIONS
# Sessions can be created between a group and a author. This allows
# an author to access more than one group. The sessionID will be set as
# a cookie to the client and is valid until a certain date.
def createSession(self, groupID, authorID, validUntil):
"""creates a new session"""
return self.call("createSession", {
"groupID": groupID,
"authorID": authorID,
"validUntil": validUntil
})
def deleteSession(self, sessionID):
"""deletes a session"""
return self.call("deleteSession", {
"sessionID": sessionID
})
def getSessionInfo(self, sessionID):
"""returns informations about a session"""
return self.call("getSessionInfo", {
"sessionID": sessionID
})
def listSessionsOfGroup(self, groupID):
"""returns all sessions of a group"""
return self.call("listSessionsOfGroup", {
"groupID": groupID
})
def listSessionsOfAuthor(self, authorID):
"""returns all sessions of an author"""
return self.call("listSessionsOfAuthor", {
"authorID": authorID
})
# PAD CONTENT
# Pad content can be updated and retrieved through the API
def getText(self, padID, rev=None):
"""returns the text of a pad"""
params = {"padID": padID}
if rev is not None:
params['rev'] = rev
return self.call("getText", params)
# introduced with pull request merge
def getHtml(self, padID, rev=None):
"""returns the html of a pad"""
params = {"padID": padID}
if rev is not None:
params['rev'] = rev
return self.call("getHTML", params)
def setText(self, padID, text):
"""sets the text of a pad"""
return self.call("setText", {
"padID": padID,
"text": text
})
def setHtml(self, padID, html):
"""sets the text of a pad from html"""
return self.call("setHTML", {
"padID": padID,
"html": html
})
# PAD
# Group pads are normal pads, but with the name schema
# GROUPID$PADNAME. A security manager controls access of them and its
# forbidden for normal pads to include a in the name.
def createPad(self, padID, text=''):
"""creates a new pad"""
params = {
"padID": padID,
}
if text:
params['text'] = text
return self.call("createPad", params)
def getRevisionsCount(self, padID):
"""returns the number of revisions of this pad"""
return self.call("getRevisionsCount", {
"padID": padID
})
def deletePad(self, padID):
"""deletes a pad"""
return self.call("deletePad", {
"padID": padID
})
def getReadOnlyID(self, padID):
"""returns the read only link of a pad"""
return self.call("getReadOnlyID", {
"padID": padID
})
def setPublicStatus(self, padID, publicStatus):
"""sets a boolean for the public status of a pad"""
return self.call("setPublicStatus", {
"padID": padID,
"publicStatus": publicStatus
})
def getPublicStatus(self, padID):
"""return true of false"""
return self.call("getPublicStatus", {
"padID": padID
})
def setPassword(self, padID, password):
"""returns ok or a error message"""
return self.call("setPassword", {
"padID": padID,
"password": password
})
def isPasswordProtected(self, padID):
"""returns true or false"""
return self.call("isPasswordProtected", {
"padID": padID
})
| agpl-3.0 |
angelbot/geoincentives | geoincentives/models.py | 1 | 3064 | from django.db import models
from django.contrib.auth.models import User as DjangoUser
from django.contrib.auth.models import AbstractBaseUser
from django.contrib.auth.models import UserManager
import hashlib
class User(models.Model):
USER_TYPE = (
(1, 'student'),
(2, 'business'),
(3, 'nonprofit')
)
auth_user = models.OneToOneField(DjangoUser)
type = models.CharField(max_length=100, null=True, blank=False, choices=USER_TYPE, default=USER_TYPE[1])
company = models.CharField(max_length=255, null=True, db_index=True, blank=True)
address = models.CharField(max_length=255, null=True, db_index=True, blank=False)
city = models.CharField(max_length=255, null=True, db_index=True, blank=False)
state = models.CharField(max_length=30, null=True, db_index=True, blank=False)
zipcode = models.CharField(max_length=5, null=True, db_index=True, blank=False)
school = models.CharField(max_length=255, null=True, db_index=True, blank=False)
birthdate = models.DateField(blank=True, null=True)
points = models.IntegerField(null=True, blank=True)
def __unicode__(self):
return u'%s' % self.auth_user.email
class EventType(models.Model):
name = models.CharField(max_length=255, null=True, blank=False)
max_checkin = models.IntegerField()
def __unicode__(self):
return u'%s' % self.name
class Event(models.Model):
EVENT_STATUS = (
(1, 'active'),
(2, 'inactive')
)
name = models.CharField(max_length=255, null=True, blank=False)
type = models.ForeignKey(EventType, null=True, blank=True)
status = models.IntegerField(max_length=100, null=True, blank=False, choices=EVENT_STATUS)
start_time = models.CharField(max_length=5, null=True, blank=False)
end_time = models.CharField(max_length=5, null=True, blank=False)
date = models.DateField(null=True, blank=True)
point_value = models.IntegerField()
recurring = models.BooleanField()
verified = models.BooleanField()
address = models.CharField(max_length=255, null=True, db_index=True, blank=False)
city = models.CharField(max_length=255, null=True, db_index=True, blank=False)
state = models.CharField(max_length=30, null=True, db_index=True, blank=False)
zipcode = models.CharField(max_length=5, null=True, db_index=True, blank=False)
latitude = models.FloatField(null=True, blank=True)
longitude = models.FloatField(null=True, blank=True)
def __unicode__(self):
return u'%s' % self.name
class UserEvent(models.Model):
user = models.ForeignKey(DjangoUser, null=True, blank=True)
event = models.ForeignKey(Event, null=True, blank=True)
date = models.DateField()
def __unicode__(self):
return u'%s %s' % (self.user.username, self.event.name)
class Reward(models.Model):
name = models.CharField(max_length=255, null=True, blank=False)
available = models.IntegerField()
points = models.IntegerField()
def __unicode__(self):
return u'%s' % (self.name)
| gpl-2.0 |
zhenv5/scikit-learn | examples/feature_selection/plot_feature_selection.py | 249 | 2827 | """
===============================
Univariate Feature Selection
===============================
An example showing univariate feature selection.
Noisy (non informative) features are added to the iris data and
univariate feature selection is applied. For each feature, we plot the
p-values for the univariate feature selection and the corresponding
weights of an SVM. We can see that univariate feature selection
selects the informative features and that these have larger SVM weights.
In the total set of features, only the 4 first ones are significant. We
can see that they have the highest score with univariate feature
selection. The SVM assigns a large weight to one of these features, but also
Selects many of the non-informative features.
Applying univariate feature selection before the SVM
increases the SVM weight attributed to the significant features, and will
thus improve classification.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
from sklearn.feature_selection import SelectPercentile, f_classif
###############################################################################
# import some data to play with
# The iris dataset
iris = datasets.load_iris()
# Some noisy data not correlated
E = np.random.uniform(0, 0.1, size=(len(iris.data), 20))
# Add the noisy data to the informative features
X = np.hstack((iris.data, E))
y = iris.target
###############################################################################
plt.figure(1)
plt.clf()
X_indices = np.arange(X.shape[-1])
###############################################################################
# Univariate feature selection with F-test for feature scoring
# We use the default selection function: the 10% most significant features
selector = SelectPercentile(f_classif, percentile=10)
selector.fit(X, y)
scores = -np.log10(selector.pvalues_)
scores /= scores.max()
plt.bar(X_indices - .45, scores, width=.2,
label=r'Univariate score ($-Log(p_{value})$)', color='g')
###############################################################################
# Compare to the weights of an SVM
clf = svm.SVC(kernel='linear')
clf.fit(X, y)
svm_weights = (clf.coef_ ** 2).sum(axis=0)
svm_weights /= svm_weights.max()
plt.bar(X_indices - .25, svm_weights, width=.2, label='SVM weight', color='r')
clf_selected = svm.SVC(kernel='linear')
clf_selected.fit(selector.transform(X), y)
svm_weights_selected = (clf_selected.coef_ ** 2).sum(axis=0)
svm_weights_selected /= svm_weights_selected.max()
plt.bar(X_indices[selector.get_support()] - .05, svm_weights_selected,
width=.2, label='SVM weights after selection', color='b')
plt.title("Comparing feature selection")
plt.xlabel('Feature number')
plt.yticks(())
plt.axis('tight')
plt.legend(loc='upper right')
plt.show()
| bsd-3-clause |
unnikrishnankgs/va | venv/lib/python3.5/site-packages/tensorflow/contrib/keras/api/keras/applications/xception/__init__.py | 57 | 1148 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Xception Keras application."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.keras.python.keras.applications.xception import decode_predictions
from tensorflow.contrib.keras.python.keras.applications.xception import preprocess_input
from tensorflow.contrib.keras.python.keras.applications.xception import Xception
del absolute_import
del division
del print_function
| bsd-2-clause |
delhivery/django | tests/gis_tests/gdal_tests/test_driver.py | 335 | 1253 | import unittest
from django.contrib.gis.gdal import HAS_GDAL
if HAS_GDAL:
from django.contrib.gis.gdal import Driver, GDALException
valid_drivers = (
# vector
'ESRI Shapefile', 'MapInfo File', 'TIGER', 'S57', 'DGN', 'Memory', 'CSV',
'GML', 'KML',
# raster
'GTiff', 'JPEG', 'MEM', 'PNG',
)
invalid_drivers = ('Foo baz', 'clucka', 'ESRI Shp', 'ESRI rast')
aliases = {
'eSrI': 'ESRI Shapefile',
'TigER/linE': 'TIGER',
'SHAPE': 'ESRI Shapefile',
'sHp': 'ESRI Shapefile',
'tiFf': 'GTiff',
'tIf': 'GTiff',
'jPEg': 'JPEG',
'jpG': 'JPEG',
}
@unittest.skipUnless(HAS_GDAL, "GDAL is required")
class DriverTest(unittest.TestCase):
def test01_valid_driver(self):
"Testing valid GDAL/OGR Data Source Drivers."
for d in valid_drivers:
dr = Driver(d)
self.assertEqual(d, str(dr))
def test02_invalid_driver(self):
"Testing invalid GDAL/OGR Data Source Drivers."
for i in invalid_drivers:
self.assertRaises(GDALException, Driver, i)
def test03_aliases(self):
"Testing driver aliases."
for alias, full_name in aliases.items():
dr = Driver(alias)
self.assertEqual(full_name, str(dr))
| bsd-3-clause |
kezabelle/django-sniplates | docs/conf.py | 4 | 8236 | # -*- coding: utf-8 -*-
#
# Django Sniplates documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 6 10:23:25 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django Sniplates'
copyright = u'2014, Curtis Maloney'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3'
# The full version, including alpha/beta/rc tags.
release = '0.3.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoSniplatesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'DjangoSniplates.tex', u'Django Sniplates Documentation',
u'Curtis Maloney', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'djangosniplates', u'Django Sniplates Documentation',
[u'Curtis Maloney'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'DjangoSniplates', u'Django Sniplates Documentation',
u'Curtis Maloney', 'DjangoSniplates', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
damianam/easybuild-framework | easybuild/toolchains/linalg/atlas.py | 3 | 1662 | ##
# Copyright 2012-2017 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be),
# Flemish Research Foundation (FWO) (http://www.fwo.be/en)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
##
"""
Support for ATLAS as toolchain linear algebra library.
:author: Stijn De Weirdt (Ghent University)
:author: Kenneth Hoste (Ghent University)
"""
from easybuild.tools.toolchain.linalg import LinAlg
TC_CONSTANT_ATLAS = 'ATLAS'
class Atlas(LinAlg):
"""
Provides ATLAS BLAS/LAPACK support.
LAPACK is a build dependency only
"""
BLAS_MODULE_NAME = ['ATLAS']
BLAS_LIB = ["cblas", "f77blas", "atlas"]
BLAS_LIB_MT = ["ptcblas", "ptf77blas", "atlas"]
BLAS_FAMILY = TC_CONSTANT_ATLAS
LAPACK_MODULE_NAME = ['ATLAS']
LAPACK_LIB = ['lapack']
LAPACK_FAMILY = TC_CONSTANT_ATLAS
| gpl-2.0 |
sunze/py_flask | venv/lib/python3.4/site-packages/pip/req/req_requirement.py | 118 | 1245 | from pip._vendor.packaging.version import parse as parse_version
class InstallationCandidate(object):
def __init__(self, project, version, location):
self.project = project
self.version = parse_version(version)
self.location = location
self._key = (self.project, self.version, self.location)
def __repr__(self):
return "<InstallationCandidate({0!r}, {1!r}, {2!r})>".format(
self.project, self.version, self.location,
)
def __hash__(self):
return hash(self._key)
def __lt__(self, other):
return self._compare(other, lambda s, o: s < o)
def __le__(self, other):
return self._compare(other, lambda s, o: s <= o)
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ne__(self, other):
return self._compare(other, lambda s, o: s != o)
def _compare(self, other, method):
if not isinstance(other, InstallationCandidate):
return NotImplemented
return method(self._key, other._key)
| mit |
rchlin/ShadowsocksFork | shadowsocks/crypto/util.py | 1032 | 4287 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import logging
def find_library_nt(name):
# modified from ctypes.util
# ctypes.util.find_library just returns first result he found
# but we want to try them all
# because on Windows, users may have both 32bit and 64bit version installed
results = []
for directory in os.environ['PATH'].split(os.pathsep):
fname = os.path.join(directory, name)
if os.path.isfile(fname):
results.append(fname)
if fname.lower().endswith(".dll"):
continue
fname = fname + ".dll"
if os.path.isfile(fname):
results.append(fname)
return results
def find_library(possible_lib_names, search_symbol, library_name):
import ctypes.util
from ctypes import CDLL
paths = []
if type(possible_lib_names) not in (list, tuple):
possible_lib_names = [possible_lib_names]
lib_names = []
for lib_name in possible_lib_names:
lib_names.append(lib_name)
lib_names.append('lib' + lib_name)
for name in lib_names:
if os.name == "nt":
paths.extend(find_library_nt(name))
else:
path = ctypes.util.find_library(name)
if path:
paths.append(path)
if not paths:
# We may get here when find_library fails because, for example,
# the user does not have sufficient privileges to access those
# tools underlying find_library on linux.
import glob
for name in lib_names:
patterns = [
'/usr/local/lib*/lib%s.*' % name,
'/usr/lib*/lib%s.*' % name,
'lib%s.*' % name,
'%s.dll' % name]
for pat in patterns:
files = glob.glob(pat)
if files:
paths.extend(files)
for path in paths:
try:
lib = CDLL(path)
if hasattr(lib, search_symbol):
logging.info('loading %s from %s', library_name, path)
return lib
else:
logging.warn('can\'t find symbol %s in %s', search_symbol,
path)
except Exception:
pass
return None
def run_cipher(cipher, decipher):
from os import urandom
import random
import time
BLOCK_SIZE = 16384
rounds = 1 * 1024
plain = urandom(BLOCK_SIZE * rounds)
results = []
pos = 0
print('test start')
start = time.time()
while pos < len(plain):
l = random.randint(100, 32768)
c = cipher.update(plain[pos:pos + l])
results.append(c)
pos += l
pos = 0
c = b''.join(results)
results = []
while pos < len(plain):
l = random.randint(100, 32768)
results.append(decipher.update(c[pos:pos + l]))
pos += l
end = time.time()
print('speed: %d bytes/s' % (BLOCK_SIZE * rounds / (end - start)))
assert b''.join(results) == plain
def test_find_library():
assert find_library('c', 'strcpy', 'libc') is not None
assert find_library(['c'], 'strcpy', 'libc') is not None
assert find_library(('c',), 'strcpy', 'libc') is not None
assert find_library(('crypto', 'eay32'), 'EVP_CipherUpdate',
'libcrypto') is not None
assert find_library('notexist', 'strcpy', 'libnotexist') is None
assert find_library('c', 'symbol_not_exist', 'c') is None
assert find_library(('notexist', 'c', 'crypto', 'eay32'),
'EVP_CipherUpdate', 'libc') is not None
if __name__ == '__main__':
test_find_library()
| apache-2.0 |
kslundberg/pants | tests/python/pants_test/backend/python/tasks/test_python_repl.py | 2 | 6547 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import sys
from contextlib import contextmanager
from textwrap import dedent
from pants.backend.core.tasks.repl_task_mixin import ReplTaskMixin
from pants.backend.python.targets.python_binary import PythonBinary
from pants.backend.python.targets.python_library import PythonLibrary
from pants.backend.python.targets.python_requirement_library import PythonRequirementLibrary
from pants.backend.python.tasks.python_repl import PythonRepl
from pants.base.address import Address
from pants.base.build_file_aliases import BuildFileAliases
from pants.base.exceptions import TaskError
from pants.base.source_root import SourceRoot
from pants.base.target import Target
from pants.util.contextutil import temporary_dir
from pants_test.backend.python.tasks.python_task_test_base import PythonTaskTestBase
class PythonReplTest(PythonTaskTestBase):
@classmethod
def task_type(cls):
return PythonRepl
class JvmTarget(Target):
pass
@property
def alias_groups(self):
return super(PythonReplTest, self).alias_groups.merge(
BuildFileAliases(targets={'jvm_target': self.JvmTarget}))
def create_non_python_target(self, relpath, name):
self.create_file(relpath=self.build_path(relpath), contents=dedent("""
jvm_target(
name='{name}',
)
""").format(name=name))
return self.target(Address(relpath, name).spec)
def setUp(self):
super(PythonReplTest, self).setUp()
SourceRoot.register('3rdparty', PythonRequirementLibrary)
SourceRoot.register('src', PythonBinary, PythonLibrary)
self.six = self.create_python_requirement_library('3rdparty/six', 'six',
requirements=['six==1.9.0'])
self.requests = self.create_python_requirement_library('3rdparty/requests', 'requests',
requirements=['requests==2.6.0'])
self.library = self.create_python_library('src/lib', 'lib', {'lib.py': dedent("""
import six
def go():
six.print_('go', 'go', 'go!', sep='')
""")}, dependencies=['//3rdparty/six'])
self.binary = self.create_python_binary('src/bin', 'bin', 'lib.go', dependencies=['//src/lib'])
self.non_python_target = self.create_non_python_target('src/java', 'java')
def tearDown(self):
super(PythonReplTest, self).tearDown()
SourceRoot.reset()
ReplTaskMixin.reset_implementations()
@contextmanager
def new_io(self, input):
orig_stdin, orig_stdout, orig_stderr = sys.stdin, sys.stdout, sys.stderr
with temporary_dir() as iodir:
stdin = os.path.join(iodir, 'stdin')
stdout = os.path.join(iodir, 'stdout')
stderr = os.path.join(iodir, 'stderr')
with open(stdin, 'w') as fp:
fp.write(input)
with open(stdin, 'rb') as inp, open(stdout, 'wb') as out, open(stderr, 'wb') as err:
sys.stdin, sys.stdout, sys.stderr = inp, out, err
try:
yield inp, out, err
finally:
sys.stdin, sys.stdout, sys.stderr = orig_stdin, orig_stdout, orig_stderr
def do_test_repl(self, code, expected, targets, options=None):
if options:
self.set_options(**options)
class JvmRepl(ReplTaskMixin):
options_scope = 'test_scope_jvm_repl'
@classmethod
def select_targets(cls, target):
return isinstance(target, self.JvmTarget)
def setup_repl_session(_, targets):
raise AssertionError()
def launch_repl(_, session_setup):
raise AssertionError()
# Add a competing REPL impl.
JvmRepl.prepare(self.options, round_manager=None)
python_repl = self.create_task(self.context(target_roots=targets))
original_launcher = python_repl.launch_repl
with self.new_io('\n'.join(code)) as (inp, out, err):
def custom_io_patched_launcher(pex):
return original_launcher(pex, stdin=inp, stdout=out, stderr=err)
python_repl.launch_repl = custom_io_patched_launcher
python_repl.execute()
with open(out.name) as fp:
lines = fp.read()
if not expected:
self.assertEqual('', lines)
else:
for expectation in expected:
self.assertIn(expectation, lines)
def do_test_library(self, *targets):
self.do_test_repl(code=['from lib.lib import go',
'go()'],
expected=['gogogo!'],
targets=targets)
def test_library(self):
self.do_test_library(self.library)
def test_binary(self):
self.do_test_library(self.binary)
def test_requirement(self):
self.do_test_repl(code=['import six',
'print("python 2?:{}".format(six.PY2))'],
expected=['python 2?:True'],
targets=[self.six])
def test_mixed_python(self):
self.do_test_repl(code=['import requests',
'import six',
'from lib.lib import go',
'print("teapot response code is: {}".format(requests.codes.teapot))',
'go()',
'print("python 2?:{}".format(six.PY2))'],
expected=['teapot response code is: 418',
'gogogo!',
'python 2?:True'],
targets=[self.requests, self.binary])
def test_disallowed_mix(self):
with self.assertRaises(TaskError):
self.do_test_repl(code=['print("unreachable")'],
expected=[],
targets=[self.library, self.non_python_target])
def test_non_python_targets(self):
self.do_test_repl(code=['import java.lang.unreachable'],
expected=[''],
targets=[self.non_python_target])
def test_ipython(self):
# IPython supports shelling out with a leading !, so indirectly test its presence by reading
# the head of this very file.
with open(__file__) as fp:
me = fp.readline()
self.do_test_repl(code=['!head -1 {}'.format(__file__)],
expected=[me],
targets=[self.six], # Just to get the repl to pop up.
options={'ipython': True})
| apache-2.0 |
Castronova/EMIT | utilities/geometry.py | 1 | 9692 | __author__ = 'tonycastronova'
import numpy
from osgeo import ogr
import stdlib
from emitLogging import elog
def fromWKB(wkb):
"""
Builds a stdlib.Geometry object from a WKB string
:param wkb: wkb string
:return: stdlib.Geometry
"""
geom = None
# parse the wkt string into ogr
ogrgeom = ogr.CreateGeometryFromWkb(wkb)
# get geometry type
geomtype = ogrgeom.GetGeometryName()
if geomtype == stdlib.GeomType.POINT:
geom = fromGdalPoint(ogrgeom)
elif geomtype == stdlib.GeomType.LINESTRING:
geom = fromGdalLinestring(ogrgeom)
elif geomtype == stdlib.GeomType.POLYGON:
geom = fromGdalPolygon(ogrgeom)
else:
elog.critical("Unsupported geometry type %s, in utilities.geometry.fromWKB" % geomtype)
return geom[0]
def fromWKT(wkt):
"""
Builds a stdlib.Geometry object from a WKT string
:param wkt: wkt string
:return: stdlib.Geometry
"""
geom = None
# parse the wkt string into ogr
ogrgeom = ogr.CreateGeometryFromWkt(wkt)
# get geometry type
geomtype = ogrgeom.GetGeometryName()
if geomtype == stdlib.GeomType.POINT:
geom = fromGdalPoint(ogrgeom)
elif geomtype == stdlib.GeomType.LINESTRING:
geom = fromGdalLinestring(ogrgeom)
elif geomtype == stdlib.GeomType.POLYGON:
geom = fromGdalPolygon(ogrgeom)
elif geomtype == stdlib.GeomType.MULTILINESTRING:
geom = fromGdalMultiLinestring(ogrgeom)
elif geomtype == stdlib.GeomType.MULTIPOINT:
geom = fromGdalMultiPoint(ogrgeom)
elif geomtype == stdlib.GeomType.MULTIPOLYGON:
geom = fromGdalMultiPolygon(ogrgeom)
else:
elog.critical("Unsupported geometry type %s, in utilities.geometry.fromWKT" % geomtype)
return geom
def fromGdalPolygon(gdalpolygon):
"""
Builds a stdlib.Geometry object from a GDAL polygon
:param gdalpolygon: osgeo.gdal.Polygon
:return: numpy.array(stdlib.Geometry)
"""
# get the ring that defines the polygon
ring = gdalpolygon.GetGeometryRef(0)
# create the stdlib geometry
g = stdlib.Geometry2(ogr.wkbPolygon)
# add the ring
g.AddGeometry(ring)
# return the geometry
return numpy.array([g])
def fromGdalPoint(gdalpoint):
"""
Builds a stdlib.Geometry object from a GDAL point
:param gdalpolygon: osgeo.gdal.Point
:return: stdlib.Geometry
"""
# get the geoms point
pt = gdalpoint.GetPoint()
# create the stdlib geometry
g = stdlib.Geometry2(ogr.wkbPoint)
# add the point
g.AddPoint(*pt)
# return the geometry
return numpy.array([g])
def fromGdalLinestring(gdallinestring):
"""
Builds a stdlib.Geometry object from a GDAL linstring
:param gdalpolygon: osgeo.gdal.LineString
:return: stdlib.Geometry
"""
# get the points of the linestring
pts = gdallinestring.GetPoints()
# create the stdlib geometry
g = stdlib.Geometry2(ogr.wkbLineString)
# add points to the linestring
for pt in pts:
g.AddPoint(*pt)
# return the geometry
return numpy.array([g])
def fromGdalMultiLinestring(gdallinestring):
"""
Builds a stdlib.Geometry object from a GDAL linstring
:param gdalpolygon: osgeo.gdal.LineString
:return: stdlib.Geometry
"""
geom_count = gdallinestring.GetGeometryCount()
geometry_array = []
for i in range(0, geom_count):
geom = gdallinestring.GetGeometryRef(i)
# get the points of the linestring
pts = geom.GetPoints()
# create the stdlib geometry
g = stdlib.Geometry2(ogr.wkbLineString)
# add points to the linestring
for pt in pts:
g.AddPoint(*pt)
geometry_array.append(g)
# return the geometry
return numpy.array(geometry_array)
def fromGdalMultiPoint(gdalmultipoint):
"""
Builds a stdlib.Geometry object from a GDAL multipoint
:param gdalmultipoint: osgeo.gdal.MultiPoint
:return: numpy.array(stdlib.Geometry)
"""
geom_count = gdalmultipoint.GetGeometryCount()
geometry_array = []
for i in range(0, geom_count):
geom = gdalmultipoint.GetGeometryRef(i)
# get the points of the linestring
pt = geom.GetPoint()
# create the stdlib geometry
g = stdlib.Geometry2(ogr.wkbPoint)
# add point to geometry
g.AddPoint(*pt)
geometry_array.append(g)
# return the geometry
return numpy.array(geometry_array)
def fromGdalMultiPolygon(gdalmultipolygon):
"""
Builds a stdlib.Geometry object from a GDAL multipolygon
:param gdalmultipolygon: osgeo.gdal.MultiPolygon
:return: numpy.array(stdlib.Geometry)
"""
geom_count = gdalmultipolygon.GetGeometryCount()
geometry_array = []
for i in range(0, geom_count):
polygon = gdalmultipolygon.GetGeometryRef(i)
# create the stdlib geometry
g = stdlib.Geometry2(ogr.wkbPolygon)
ring_count = polygon.GetGeometryCount()
for j in range(0, ring_count):
# get the ring for this geometry
ring = polygon.GetGeometryRef(j)
# add ring to geometry
g.AddGeometry(ring)
# save the polygon geometry in numpy array
geometry_array.append(g)
# return the geometry
return numpy.array(geometry_array)
def build_point_geometry(x, y, z=0):
"""
Builds stdlib point Geometry object
:param x: single value (float)
:param y: single value (float)
:return: stdlib point geometru
"""
# create an empty point
point = stdlib.Geometry2(ogr.wkbPoint)
try:
# add the x, y, z coordinates
point.AddPoint(float(x), float(y), float(z))
except Exception, e:
print e
return point
def build_point_geometries(x, y):
"""
Builds stdlib Geometry objects from a list of x and y coordinates
:param x: single value, list, or numpy array of x coordinates
:param y: single value, list, or numpy array of y coordinates
:return: numpy array of stdlib geometries
"""
# try to convert x,y coordinates into numpy arrays
if type(x) != type(y):
elog.critical('Could not convert the x,y coordinates into numpy array objects: X and Y types do not match')
return None
try:
if not isinstance(x, numpy.ndarray) and not isinstance(y, numpy.ndarray):
if (isinstance(x, list) or isinstance(x, tuple) ) and ( isinstance(y, list) or isinstance(y, tuple) ):
x = numpy.asarray(x)
y = numpy.asarray(y)
else:
x = numpy.array([x])
y = numpy.array([y])
except:
elog.critical('Could not convert the x,y coordinates into numpy array objects!')
return None
geoms = numpy.empty((x.shape[0]), dtype=object)
for i in range(len(x)):
point = stdlib.Geometry2(ogr.wkbPoint)
# point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(float(x[i]), float(y[i]))
geoms[i] = point
return geoms
def build_polygon_geometries(coords):
"""
Builds stdlib Geometry objects from coordinates
:param coords: list or numpy array of polygons coordinates [[[1,2],[2,3], ], ]
:return: numpy array of stdlib geometries
"""
# try to convert x,y coordinates into numpy arrays
try:
if not isinstance(coords, numpy.ndarray):
if isinstance(coords, list):
coords = numpy.asarray(coords)
else:
coords = numpy.array([coords])
except:
elog.critical('Could not convert the x,y coordinates into numpy array objects!')
return None
shape = coords.shape
poly_count = shape[0] if len(shape) == 3 else 1
has_multiple = 1 if len(shape) > 2 else 0
geoms = numpy.empty((poly_count), dtype=object)
if has_multiple:
for i in xrange(0, len(coords)):
ring = ogr.Geometry(ogr.wkbLinearRing)
for pt in coords[i]:
ring.AddPoint(float(pt[0]), float(pt[1]))
poly = stdlib.Geometry2(ogr.wkbPolygon)
poly.AddGeometry(ring)
geoms[i] = poly
else:
ring = ogr.Geometry(ogr.wkbLinearRing)
for pt in coords:
ring.AddPoint(float(pt[0]), float(pt[1]))
poly = stdlib.Geometry2(ogr.wkbPolygon)
poly.AddGeometry(ring)
geoms[0] = poly
return geoms
def build_polyline_geometries(coords):
"""
Builds stdlib Geometry objects from coordinates
:param coords: list or numpy array of polyline coordinates [[[1,2],[2,3], ], ]
:return: numpy array of stdlib geometries
"""
# try to convert x,y coordinates into numpy arrays
try:
if not isinstance(coords, numpy.ndarray):
if isinstance(coords, list):
coords = numpy.asarray(coords)
else:
coords = numpy.array([coords])
except:
elog.critical('Could not convert the x,y coordinates into numpy array objects!')
return None
shape = coords.shape
poly_count = shape[0] if len(shape) == 3 else 1
has_multiple = 1 if len(shape) > 2 else 0
geoms = numpy.empty((poly_count), dtype=object)
if has_multiple:
for i in range(poly_count):
line = stdlib.Geometry2(ogr.wkbLineString)
for pt in coords[i]:
line.AddPoint(float(pt[0]), float(pt[1]))
geoms[i] = line
else:
line = stdlib.Geometry2(ogr.wkbLineString)
for pt in coords:
line.AddPoint(float(pt[0]), float(pt[1]))
geoms[0] = line
return geoms | gpl-2.0 |
GoogleCloudPlatform/PerfKitBenchmarker | perfkitbenchmarker/linux_packages/aws_credentials.py | 1 | 4345 | # Copyright 2018 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing AWS credential file installation and cleanup helpers.
AWS credentials consist of a secret access key and its ID, stored in a single
file. Following PKB's AWS setup instructions (see
https://github.com/GoogleCloudPlatform/PerfKitBenchmarker#install-aws-cli-and-setup-authentication),
the default location of the file will be at ~/.aws/credentials
This package copies the credentials file to the remote VM to make them available
for calls from the VM to other AWS services, such as SQS or Kinesis.
"""
import configparser
import logging
import os
from absl import flags
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
FLAGS = flags.FLAGS
flags.DEFINE_string(
'aws_credentials_local_path', os.path.join('~', '.aws'),
'Path where the AWS credential files can be found on the local machine.')
flags.DEFINE_string(
'aws_credentials_remote_path', '.aws',
'Path where the AWS credential files will be written on remote machines.')
flags.DEFINE_boolean(
'aws_credentials_overwrite', False,
'When set, if an AWS credential file already exists at the destination '
'specified by --aws_credentials_remote_path, it will be overwritten during '
'AWS credential file installation.')
flags.DEFINE_string('aws_s3_region', None, 'Region for the S3 bucket')
def _GetLocalPath():
"""Gets the expanded local path of the credential files.
Returns:
string. Path to the credential files on the local machine.
"""
return os.path.expanduser(FLAGS.aws_credentials_local_path)
def GetCredentials(credentials_file_name='credentials'):
"""Gets the credentials from the local credential file.
AWS credentials file is expected to be called 'credentials'.
AWS credentials file looks like this, and ends with a newline:
[default]
aws_access_key_id = {access_key}
aws_secret_access_key = {secret_access_key}
Args:
credentials_file_name: String name of the file containing the credentials.
Returns:
A string, string tuple of access_key and secret_access_key
"""
config = configparser.ConfigParser()
config.read(os.path.join(_GetLocalPath(), credentials_file_name))
key_id = config['default']['aws_access_key_id']
key = config['default']['aws_secret_access_key']
return key_id, key
def CheckPrerequisites():
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
local_path = _GetLocalPath()
if not os.path.exists(local_path):
raise data.ResourceNotFound(
'AWS credential files were not found at {0}'.format(local_path))
def Install(vm):
"""Copies credential files to the specified VM.
Args:
vm: BaseVirtualMachine. VM that receives the credential files.
Raises:
errors.Error: If the file destination on the VM already exists, and the
overwrite behavior is not specified via --aws_credentials_overwrite.
"""
local_path = _GetLocalPath()
remote_path = FLAGS.aws_credentials_remote_path
overwrite = FLAGS.aws_credentials_overwrite
try:
vm.RemoteCommand('[[ ! -e {0} ]]'.format(remote_path))
except errors.VirtualMachine.RemoteCommandError:
err_msg = 'File {0} already exists on VM {1}.'.format(remote_path, vm)
if overwrite:
logging.info('%s Overwriting.', err_msg)
else:
raise errors.Error(err_msg)
remote_dir = os.path.dirname(remote_path)
if remote_dir:
vm.RemoteCommand('mkdir -p {0}'.format(remote_dir))
vm.PushFile(local_path, remote_path)
def Uninstall(vm):
"""Deletes the credential files from the specified VM.
Args:
vm: BaseVirtualMachine. VM that has the credential files.
"""
vm.RemoveFile(FLAGS.aws_credentials_remote_path)
| apache-2.0 |
jshleap/Collaboration | contactList/contacts-classification.py | 1 | 4165 | #!/usr/bin/python
'''
Utility scripts for contacts
Copyright (C) 2012 Alex Safatli, Christian Blouin, Jose Sergio Hleap
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
E-mail: [email protected]
'''
import centroidContact
import getContactList
import sys
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
# Generates an adjacency list using the all-atom method
# (getContactList.py) and using the centroid method (centroidContact.py).
# Data for three plots are then found as follows:
#
# True Positive (TP): Number of contacts at a given threshold (also found with atom method).
# False Positive (FP): Number of contacts at a given threshold (not found in atom method).
# False Negative (FN): Number of contacts from atom method not predicted at a given threshold.
#
# specificity Sp = TP / (TP+FP)
# sensitivity Sv = TP / (TP+FN)
# F score = 2 * (Sp*Sv)/(Sp+Sv)
# If run from command line: $ python contacts-classification.py pdbFile.pdb
fIn = sys.argv[1]
TPs = [] # List to hold True Positives.
FPs = [] # List to hold False Positives.
FNs = [] # List to hold False Negatives.
specificities = [] # List to hold the specificities for these cutoffs.
sensitivities = [] # List to hold the sensitivities for these cutoffs.
fScores = [] # List to hold the F Scores for these cutoffs.
cutoffs = [x*0.5 for x in xrange(6,41)] # Cutoffs ranging from 3 to 20, 0.5 increments.
# Get atom-based adjacency list.
print "\nLoading file: " + fIn
print "Will first generate atom-based contact list. This will take up to a few minutes.\n"
atomBased = getContactList.processFile(fIn)
REF = atomBased.adjList # Adjacency list.
# Get centroid-based adjacency lists. Calculate appropriately.
print "\nNow, will generate centroid-based adjacency lists. This will take a little while.\n"
for x in cutoffs:
print "\nCutoff = " + str(x) + "\n"
c = centroidContact.processFile(fIn,x)
TP = len(set(REF).intersection(set(c)))
FP = len(set(c).difference(set(REF)))
FN = len(set(REF).difference(set(c)))
TPs.append(TP)
FPs.append(FP)
FNs.append(FN)
Sp = float(TP)/(TP+FP)
Sv = float(TP)/(TP+FN)
specificities.append(Sp)
sensitivities.append(Sv)
# Avoid division by zero.
fScore = 0 if ((Sp+Sv) == 0) else (2.0*((Sp*Sv)/(Sp+Sv)))
fScores.append(fScore)
# Plot the data.
plt.plot(cutoffs,specificities)
plt.title("Specificities for Contact Determination Methods")
plt.ylabel("Specificity")
plt.xlabel("Cutoff (Angstroms)")
pp = PdfPages('contact-Sp-plot.pdf')
plt.savefig(pp, format='pdf')
pp.close()
plt.clf()
plt.plot(cutoffs,sensitivities)
plt.title("Sensitivities for Contact Determination Methods")
plt.ylabel("Sensitivity")
plt.xlabel("Cutoff (Angstroms)")
pp = PdfPages('contact-Sv-plot.pdf')
plt.savefig(pp, format='pdf')
plt.clf()
pp.close()
plt.plot(cutoffs,fScores)
plt.title("F Scores for Contact Determination Methods")
plt.ylabel("F Score")
plt.xlabel("Cutoff (Angstroms)")
pp = PdfPages('contact-Fscore-plot.pdf')
plt.savefig(pp, format='pdf')
pp.close()
# Save raw data to CSV file.
fout = open('classifications.csv','w')
fout.write("Cutoff (Angstroms)" + "\t" + "Specificity" + "\t"
+ "Sensitivity" + "\t" + "F Score" +
"\t" + "TP" + "\t" + "FP" + "\t" + "FN" + "\n")
for x in xrange(0,len(cutoffs)):
fout.write(str(cutoffs[x]) + "\t" + str(specificities[x]) +
"\t" + str(sensitivities[x]) + "\t" + str(fScores[x])
+ "\t" + str(TPs[x]) + "\t" + str(FPs[x])
+ "\t" + str(FNs[x]) + "\n")
fout.close() | gpl-3.0 |
rlbabyuk/integration_tests | utils/version.py | 2 | 11791 | # -*- coding: utf-8 -*-
import re
from cached_property import cached_property
from collections import namedtuple
from datetime import date, datetime
import multimethods as mm
from fixtures.pytest_store import store
def get_product_version(ver):
"""Return product version for given Version obj or version string
"""
ver = Version(ver)
if ver.product_version() is not None:
return ver.product_version()
else:
raise LookupError("no matching product version found for version {}".format(ver))
def get_stream(ver):
"""Return a stream name for given Version obj or version string
"""
ver = Version(ver)
if ver.stream() is not None:
return ver.stream()
else:
raise LookupError("no matching stream found for version {}".format(ver))
def current_stream():
return get_stream(store.current_appliance.version)
def get_version(obj=None):
"""
Return a Version based on obj. For CFME, 'master' version
means always the latest (compares as greater than any other version)
If obj is None, the version will be retrieved from the current appliance
"""
if isinstance(obj, Version):
return obj
if obj.startswith('master'):
return Version.latest()
return Version(obj)
def current_version():
"""A lazy cached method to return the appliance version.
Do not catch errors, since generally we cannot proceed with
testing, without knowing the server version.
"""
return store.current_appliance.version
def appliance_build_datetime():
try:
return store.current_appliance.build_datetime
except:
return None
def appliance_build_date():
try:
return store.current_appliance.build_date
except:
return None
def appliance_is_downstream():
return store.current_appliance.is_downstream
def parsedate(o):
if isinstance(o, date):
return o
elif isinstance(o, datetime):
return o.date()
else:
# 1234-12-13
return date(*[int(x) for x in str(o).split("-", 2)])
def before_date_or_version(date=None, version=None):
"""Function for deciding based on the build date and version.
Usage:
* If both date and version are set, then two things can happen. If the appliance is
downstream, both date and version are checked, otherwise only the date.
* If only date is set, then only date is checked.
* if only version is set, then it checks the version if the appliance is downstream,
otherwise it returns ``False``
The checks are in form ``appliance_build_date() < date`` and ``current_version() < version``.
Therefore when used in ``if`` statement, the truthy value signalizes 'older' version and falsy
signalizes 'newer' version.
"""
if date is not None:
date = parsedate(date)
if date is not None and version is not None:
if not appliance_is_downstream():
return appliance_build_date() < date
else:
return appliance_build_date() < date and current_version() < version
elif date is not None and version is None:
return appliance_build_date() < date
elif date is None and version is not None:
if not appliance_is_downstream():
return False
return current_version() < version
else:
raise TypeError("You have to pass either date or version, or both!")
def since_date_or_version(*args, **kwargs):
"""Opposite of :py:func:`before_date_or_version`"""
return not before_date_or_version(*args, **kwargs)
def appliance_has_netapp():
try:
return store.current_appliance.has_netapp()
except:
return None
def product_version_dispatch(*_args, **_kwargs):
"""Dispatch function for use in multimethods that just ignores
arguments and dispatches on the current product version."""
return current_version()
def dependent(default_function):
m = mm.MultiMethod(default_function.__name__, product_version_dispatch)
m.add_method(mm.Default, default_function)
mm._copy_attrs(default_function, m)
return m
def pick(v_dict):
"""
Collapses an ambiguous series of objects bound to specific versions
by interrogating the CFME Version and returning the correct item.
"""
# convert keys to Versions
v_dict = {get_version(k): v for (k, v) in v_dict.items()}
versions = v_dict.keys()
sorted_matching_versions = sorted(filter(lambda v: v <= current_version(), versions),
reverse=True)
return v_dict.get(sorted_matching_versions[0]) if sorted_matching_versions else None
class Version(object):
"""Version class based on distutil.version.LooseVersion"""
SUFFIXES = ('nightly', 'pre', 'alpha', 'beta', 'rc')
SUFFIXES_STR = "|".join(r'-{}(?:\d+(?:\.\d+)?)?'.format(suff) for suff in SUFFIXES)
component_re = re.compile(r'(?:\s*(\d+|[a-z]+|\.|(?:{})+$))'.format(SUFFIXES_STR))
suffix_item_re = re.compile(r'^([^0-9]+)(\d+(?:\.\d+)?)?$')
def __init__(self, vstring):
self.parse(vstring)
def parse(self, vstring):
if vstring is None:
raise ValueError('Version string cannot be None')
elif isinstance(vstring, (list, tuple)):
vstring = ".".join(map(str, vstring))
elif vstring:
vstring = str(vstring).strip()
if vstring in ('master', 'latest', 'upstream') or 'fine' in vstring or 'euwe' in vstring:
vstring = 'master'
# TODO These aren't used anywhere - remove?
if vstring == 'darga-3':
vstring = '5.6.1'
if vstring == 'darga-4.1':
vstring = '5.6.2'
if vstring == 'darga-5':
vstring = '5.6.3'
components = filter(lambda x: x and x != '.',
self.component_re.findall(vstring))
# Check if we have a version suffix which denotes pre-release
if components and components[-1].startswith('-'):
self.suffix = components[-1][1:].split('-') # Chop off the -
components = components[:-1]
else:
self.suffix = None
for i in range(len(components)):
try:
components[i] = int(components[i])
except ValueError:
pass
self.vstring = vstring
self.version = components
@cached_property
def normalized_suffix(self):
"""Turns the string suffixes to numbers. Creates a list of tuples.
The list of tuples is consisting of 2-tuples, the first value says the position of the
suffix in the list and the second number the numeric value of an eventual numeric suffix.
If the numeric suffix is not present in a field, then the value is 0
"""
numberized = []
if self.suffix is None:
return numberized
for item in self.suffix:
suff_t, suff_ver = self.suffix_item_re.match(item).groups()
if suff_ver is None or len(suff_ver) == 0:
suff_ver = 0.0
else:
suff_ver = float(suff_ver)
suff_t = self.SUFFIXES.index(suff_t)
numberized.append((suff_t, suff_ver))
return numberized
@classmethod
def latest(cls):
try:
return cls._latest
except AttributeError:
cls._latest = cls('latest')
return cls._latest
@classmethod
def lowest(cls):
try:
return cls._lowest
except AttributeError:
cls._lowest = cls('lowest')
return cls._lowest
def __str__(self):
return self.vstring
def __repr__(self):
return '{}({})'.format(type(self).__name__, repr(self.vstring))
def __cmp__(self, other):
try:
if not isinstance(other, type(self)):
other = Version(other)
except:
raise ValueError('Cannot compare Version to {}'.format(type(other).__name__))
if self == other:
return 0
elif self == self.latest() or other == self.lowest():
return 1
elif self == self.lowest() or other == self.latest():
return -1
else:
result = cmp(self.version, other.version)
if result != 0:
return result
# Use suffixes to decide
if self.suffix is None and other.suffix is None:
# No suffix, the same
return 0
elif self.suffix is None:
# This does not have suffix but the other does so this is "newer"
return 1
elif other.suffix is None:
# This one does have suffix and the other does not so this one is older
return -1
else:
# Both have suffixes, so do some math
return cmp(self.normalized_suffix, other.normalized_suffix)
def __eq__(self, other):
try:
if not isinstance(other, type(self)):
other = Version(other)
return (
self.version == other.version and self.normalized_suffix == other.normalized_suffix)
except:
return False
def __contains__(self, ver):
"""Enables to use ``in`` expression for :py:meth:`Version.is_in_series`.
Example:
``"5.5.5.2" in Version("5.5") returns ``True``
Args:
ver: Version that should be checked if it is in series of this version. If
:py:class:`str` provided, it will be converted to :py:class:`Version`.
"""
try:
return Version(ver).is_in_series(self)
except:
return False
def is_in_series(self, series):
"""This method checks whether the version belongs to another version's series.
Eg.: ``Version("5.5.5.2").is_in_series("5.5")`` returns ``True``
Args:
series: Another :py:class:`Version` to check against. If string provided, will be
converted to :py:class:`Version`
"""
if not isinstance(series, Version):
series = get_version(series)
if self in {self.lowest(), self.latest()}:
if series == self:
return True
else:
return False
return series.version == self.version[:len(series.version)]
def series(self, n=2):
return ".".join(self.vstring.split(".")[:n])
def stream(self):
for v, spt in version_stream_product_mapping.items():
if self.is_in_series(v):
return spt.stream
def product_version(self):
for v, spt in version_stream_product_mapping.items():
if self.is_in_series(v):
return spt.product_version
LOWEST = Version.lowest()
LATEST = Version.latest()
UPSTREAM = LATEST
SPTuple = namedtuple('StreamProductTuple', ['stream', 'product_version'])
# Maps stream and product version to each app version
version_stream_product_mapping = {
'5.2': SPTuple('downstream-52z', '3.0'),
'5.3': SPTuple('downstream-53z', '3.1'),
'5.4': SPTuple('downstream-54z', '3.2'),
'5.5': SPTuple('downstream-55z', '4.0'),
'5.6': SPTuple('downstream-56z', '4.1'),
'5.7': SPTuple('downstream-57z', '4.2'),
'5.8': SPTuple('downstream-58z', '4.5'),
LATEST: SPTuple('upstream', 'master')
}
# Compare Versions using > for dispatch
@mm.is_a.method((Version, Version))
def _is_a_loose(x, y):
return x >= y
@mm.is_a.method((str, Version))
def _is_a_slv(x, y):
return mm.is_a(Version(x), y)
@mm.is_a.method((Version, str))
def _is_a_lvs(x, y):
return mm.is_a(x, Version(y))
| gpl-2.0 |
tayfun/django | tests/auth_tests/models/invalid_models.py | 251 | 1340 | from django.contrib.auth.models import AbstractBaseUser, UserManager
from django.db import models
class CustomUserNonUniqueUsername(AbstractBaseUser):
"""
A user with a non-unique username.
This model is not invalid if it is used with a custom authentication
backend which supports non-unique usernames.
"""
username = models.CharField(max_length=30)
email = models.EmailField(blank=True)
is_staff = models.BooleanField(default=False)
is_superuser = models.BooleanField(default=False)
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
objects = UserManager()
class Meta:
app_label = 'auth'
class CustomUserNonListRequiredFields(AbstractBaseUser):
"A user with a non-list REQUIRED_FIELDS"
username = models.CharField(max_length=30, unique=True)
date_of_birth = models.DateField()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = 'date_of_birth'
class Meta:
app_label = 'auth'
class CustomUserBadRequiredFields(AbstractBaseUser):
"A user with a USERNAME_FIELD that appears in REQUIRED_FIELDS (invalid)"
username = models.CharField(max_length=30, unique=True)
date_of_birth = models.DateField()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['username', 'date_of_birth']
class Meta:
app_label = 'auth'
| bsd-3-clause |
iw3hxn/LibrERP | revenue_stamp/revenue_stamp.py | 3 | 7654 | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2014 Didotech SRL (info at didotech.com)
# All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from osv import osv, fields
class partner(osv.osv):
_inherit = 'res.partner'
_columns = {
'charge_revenue_stamp': fields.boolean('Revenue stamp Charged in Invoice', help="In case VAT free, revenue stamp's cost will be charged in invoices."),
'charge_invoice_cost': fields.boolean('Costs Charged in Invoice', help="Costs will be charged in invoices."),
'product_toinvoice_ids': fields.one2many('toinvoice.product', 'product_toinvoice_id', 'Invoice Costs'),
}
class unique_revenue_product(osv.osv):
_name = 'unique.revenue.product'
_description = 'Unique revenue product'
_columns = {
'name': fields.char('Description', size=50,),
'unique_revenue_stamp': fields.boolean('Product for revenue stamp'),
'min_for_stamp': fields.float('Minimal amount for stamp charged in invoice'),
}
_defaults = {
'min_for_stamp': 77.48,
}
_sql_constraints = [
('unique_revenue_stamp', 'unique (unique_revenue_stamp)', 'The revenue stamp product must be unique !'),
]
class toinvoice_product(osv.osv):
_name = 'toinvoice.product'
_columns = {
'name': fields.char('Notes', size=50,),
'product_toinvoice_id': fields.many2one('res.partner', 'Partner related'),
'product_id': fields.many2one('product.product', 'Products to be charged in invoices'),
'qty': fields.float('Quantity to be invoiced'),
}
class product_product(osv.osv):
_inherit = 'product.product'
_columns = {
'unique_revenue_stamp_id': fields.many2one('unique.revenue.product', 'Product id for revenue stamp'),
}
_sql_constraints = [
('unique_revenue_stamp_id', 'unique (unique_revenue_stamp_id)', 'The revenue stamp product must be unique !'),
]
class account_tax_code(osv.osv):
_inherit = 'account.tax.code'
_columns = {
'stamp_in_invoice': fields.boolean('Stamp Charged in Invoice', help="Revenue stamp's cost charged in invoices."),
}
class account_invoice(osv.osv):
_inherit = 'account.invoice'
def button_reset_taxes(self, cr, uid, ids, context=None):
result = super(account_invoice, self).button_reset_taxes(cr, uid, ids, context)
self.revenue_stamp(cr, uid, ids, context)
return result
def action_number(self, cr, uid, ids, context=None):
super(account_invoice, self).action_number(cr, uid, ids, context)
self.revenue_stamp(cr, uid, ids, context)
return True
def revenue_stamp(self, cr, uid, ids, context=None):
"""
This function will add line with revenue stamp charge product:
If partner has 'charge_revenue_stamp' selected it will add product and cost of revenue stamp
Else, it will add product without cost
"""
if not ids:
return True
if isinstance(ids, (list, tuple)):
ids = ids[0]
product_toinvoice_ids = []
invoice = self.browse(cr, uid, ids, context)
if not invoice.partner_id or not invoice.invoice_line:
return False
partner = invoice.partner_id
product_obj = self.pool.get('product.product')
revenue_product_id = product_obj.search(cr, uid, [('unique_revenue_stamp_id.unique_revenue_stamp', '=', True)])
if revenue_product_id:
revenue_product = product_obj.browse(cr, uid, revenue_product_id[0], context)
if partner.charge_invoice_cost:
for product_toinvoice_id in partner.product_toinvoice_ids:
product_toinvoice_ids.append(product_toinvoice_id.product_id.id)
base_tax_amount = 0.0
for invoice_tax in invoice.tax_line:
if invoice_tax.tax_code_id.stamp_in_invoice:
base_tax_amount += invoice_tax.base_amount
add_product_stamp = False
if base_tax_amount >= revenue_product.unique_revenue_stamp_id.min_for_stamp:
add_product_stamp = True
if partner.charge_revenue_stamp:
price = revenue_product.product_tmpl_id.list_price
else:
price = 0.0
for invoice_line in invoice.invoice_line:
if invoice_line.product_id.id == revenue_product_id[0]:
add_product_stamp = False
for invoice_product_id in product_toinvoice_ids:
if invoice_line.product_id.id == invoice_product_id:
product_toinvoice_ids.remove(invoice_product_id)
invoice_lines = []
if add_product_stamp:
invoice_lines.append({
'name': revenue_product.name,
'product_id': revenue_product_id[0],
'quantity': 1.0,
'uos_id': revenue_product.product_tmpl_id.uom_id.id,
'price_unit': price,
'price_subtotal': price,
'partner_id': partner.id,
'invoice_id': invoice.id,
'account_id': invoice.invoice_line[0].account_id.id,
'company_id': invoice.company_id.id,
})
if product_toinvoice_ids:
partner_toinvoice_products = self.pool.get('toinvoice.product').browse(cr, uid, product_toinvoice_ids, context)
for partner_toinvoice_product in partner_toinvoice_products:
invoice_lines.append({
'name': partner_toinvoice_product.product_id.name,
'product_id': partner_toinvoice_product.product_id.id,
'quantity': partner_toinvoice_product.qty,
'uos_id': partner_toinvoice_product.product_id.product_tmpl_id.uom_id.id,
'price_unit': partner_toinvoice_product.product_id.product_tmpl_id.list_price,
'price_subtotal': partner_toinvoice_product.product_id.product_tmpl_id.list_price,
'partner_id': partner.id,
'invoice_id': invoice.id,
'account_id': invoice.invoice_line[0].account_id.id,
'company_id': invoice.company_id.id,
})
invoice_line_obj = self.pool.get('account.invoice.line')
for invoice_line in invoice_lines:
invoice_line_obj.create(cr, uid, invoice_line, context)
return True
| agpl-3.0 |
SivilTaram/edx-platform | lms/djangoapps/shoppingcart/migrations/0018_auto__add_donation.py | 120 | 15611 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Donation'
db.create_table('shoppingcart_donation', (
('orderitem_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['shoppingcart.OrderItem'], unique=True, primary_key=True)),
('donation_type', self.gf('django.db.models.fields.CharField')(default='general', max_length=32)),
('course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255, db_index=True)),
))
db.send_create_signal('shoppingcart', ['Donation'])
def backwards(self, orm):
# Deleting model 'Donation'
db.delete_table('shoppingcart_donation')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'shoppingcart.certificateitem': {
'Meta': {'object_name': 'CertificateItem', '_ormbases': ['shoppingcart.OrderItem']},
'course_enrollment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['student.CourseEnrollment']"}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.coupon': {
'Meta': {'object_name': 'Coupon'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 2, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'percentage_discount': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'shoppingcart.couponredemption': {
'Meta': {'object_name': 'CouponRedemption'},
'coupon': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Coupon']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.courseregistrationcode': {
'Meta': {'object_name': 'CourseRegistrationCode'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 2, 0, 0)'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_by_user'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invoice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Invoice']", 'null': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'purchase_order'", 'null': 'True', 'to': "orm['shoppingcart.Order']"})
},
'shoppingcart.donation': {
'Meta': {'object_name': 'Donation', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'donation_type': ('django.db.models.fields.CharField', [], {'default': "'general'", 'max_length': '32'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.invoice': {
'Meta': {'object_name': 'Invoice'},
'address_line_1': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'address_line_2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'address_line_3': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'company_contact_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_contact_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'company_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'customer_reference_number': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_reference': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'recipient_email': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'recipient_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True'}),
'total_amount': ('django.db.models.fields.FloatField', [], {}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '15', 'null': 'True'})
},
'shoppingcart.order': {
'Meta': {'object_name': 'Order'},
'bill_to_cardtype': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'bill_to_ccnum': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_city': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_country': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_first': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_last': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'bill_to_postalcode': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'bill_to_state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'blank': 'True'}),
'bill_to_street1': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'bill_to_street2': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processor_reply_dump': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'purchase_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'refunded_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'fulfilled_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line_desc': ('django.db.models.fields.CharField', [], {'default': "'Misc. Item'", 'max_length': '1024'}),
'list_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '30', 'decimal_places': '2'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']"}),
'qty': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'refund_requested_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'report_comments': ('django.db.models.fields.TextField', [], {'default': "''"}),
'service_fee': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'cart'", 'max_length': '32', 'db_index': 'True'}),
'unit_cost': ('django.db.models.fields.DecimalField', [], {'default': '0.0', 'max_digits': '30', 'decimal_places': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'shoppingcart.paidcourseregistration': {
'Meta': {'object_name': 'PaidCourseRegistration', '_ormbases': ['shoppingcart.OrderItem']},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '128', 'db_index': 'True'}),
'mode': ('django.db.models.fields.SlugField', [], {'default': "'honor'", 'max_length': '50'}),
'orderitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['shoppingcart.OrderItem']", 'unique': 'True', 'primary_key': 'True'})
},
'shoppingcart.paidcourseregistrationannotation': {
'Meta': {'object_name': 'PaidCourseRegistrationAnnotation'},
'annotation': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'course_id': ('xmodule_django.models.CourseKeyField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'shoppingcart.registrationcoderedemption': {
'Meta': {'object_name': 'RegistrationCodeRedemption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.Order']", 'null': 'True'}),
'redeemed_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2014, 10, 2, 0, 0)', 'null': 'True'}),
'redeemed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'registration_code': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shoppingcart.CourseRegistrationCode']"})
},
'student.courseenrollment': {
'Meta': {'ordering': "('user', 'course_id')", 'unique_together': "(('user', 'course_id'),)", 'object_name': 'CourseEnrollment'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mode': ('django.db.models.fields.CharField', [], {'default': "'honor'", 'max_length': '100'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['shoppingcart']
| agpl-3.0 |
glaudsonml/kurgan-ai | tools/sqlmap/lib/controller/checks.py | 1 | 61798 | #!/usr/bin/env python
"""
Copyright (c) 2006-2016 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import copy
import httplib
import random
import re
import socket
import time
from subprocess import Popen as execute
from extra.beep.beep import beep
from lib.core.agent import agent
from lib.core.common import Backend
from lib.core.common import extractRegexResult
from lib.core.common import extractTextTagContent
from lib.core.common import findDynamicContent
from lib.core.common import Format
from lib.core.common import getLastRequestHTTPError
from lib.core.common import getPublicTypeMembers
from lib.core.common import getSafeExString
from lib.core.common import getSortedInjectionTests
from lib.core.common import getUnicode
from lib.core.common import hashDBRetrieve
from lib.core.common import hashDBWrite
from lib.core.common import intersect
from lib.core.common import listToStrValue
from lib.core.common import parseFilePaths
from lib.core.common import popValue
from lib.core.common import pushValue
from lib.core.common import randomInt
from lib.core.common import randomStr
from lib.core.common import readInput
from lib.core.common import showStaticWords
from lib.core.common import singleTimeLogMessage
from lib.core.common import singleTimeWarnMessage
from lib.core.common import urlencode
from lib.core.common import wasLastResponseDBMSError
from lib.core.common import wasLastResponseHTTPError
from lib.core.defaults import defaults
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.datatype import AttribDict
from lib.core.datatype import InjectionDict
from lib.core.decorators import cachedmethod
from lib.core.dicts import FROM_DUMMY_TABLE
from lib.core.enums import DBMS
from lib.core.enums import HASHDB_KEYS
from lib.core.enums import HEURISTIC_TEST
from lib.core.enums import HTTP_HEADER
from lib.core.enums import HTTPMETHOD
from lib.core.enums import NOTE
from lib.core.enums import NULLCONNECTION
from lib.core.enums import PAYLOAD
from lib.core.enums import PLACE
from lib.core.enums import REDIRECTION
from lib.core.exception import SqlmapConnectionException
from lib.core.exception import SqlmapNoneDataException
from lib.core.exception import SqlmapSilentQuitException
from lib.core.exception import SqlmapUserQuitException
from lib.core.settings import DEFAULT_GET_POST_DELIMITER
from lib.core.settings import DUMMY_NON_SQLI_CHECK_APPENDIX
from lib.core.settings import FORMAT_EXCEPTION_STRINGS
from lib.core.settings import HEURISTIC_CHECK_ALPHABET
from lib.core.settings import IDS_WAF_CHECK_PAYLOAD
from lib.core.settings import IDS_WAF_CHECK_RATIO
from lib.core.settings import IDS_WAF_CHECK_TIMEOUT
from lib.core.settings import NON_SQLI_CHECK_PREFIX_SUFFIX_LENGTH
from lib.core.settings import SUHOSIN_MAX_VALUE_LENGTH
from lib.core.settings import SUPPORTED_DBMS
from lib.core.settings import URI_HTTP_HEADER
from lib.core.settings import UPPER_RATIO_BOUND
from lib.core.threads import getCurrentThreadData
from lib.request.connect import Connect as Request
from lib.request.inject import checkBooleanExpression
from lib.request.templates import getPageTemplate
from lib.techniques.union.test import unionTest
from lib.techniques.union.use import configUnion
def checkSqlInjection(place, parameter, value):
# Store here the details about boundaries and payload used to
# successfully inject
injection = InjectionDict()
# Localized thread data needed for some methods
threadData = getCurrentThreadData()
# Set the flag for SQL injection test mode
kb.testMode = True
paramType = conf.method if conf.method not in (None, HTTPMETHOD.GET, HTTPMETHOD.POST) else place
tests = getSortedInjectionTests()
seenPayload = set()
while tests:
test = tests.pop(0)
try:
if kb.endDetection:
break
if conf.dbms is None:
# If the DBMS has not yet been fingerprinted (via simple heuristic check
# or via DBMS-specific payload) and boolean-based blind has been identified
# then attempt to identify with a simple DBMS specific boolean-based
# test what the DBMS may be
if not injection.dbms and PAYLOAD.TECHNIQUE.BOOLEAN in injection.data:
if not Backend.getIdentifiedDbms() and kb.heuristicDbms is None:
kb.heuristicDbms = heuristicCheckDbms(injection)
# If the DBMS has already been fingerprinted (via DBMS-specific
# error message, simple heuristic check or via DBMS-specific
# payload), ask the user to limit the tests to the fingerprinted
# DBMS
if kb.reduceTests is None and not conf.testFilter and (intersect(Backend.getErrorParsedDBMSes(), \
SUPPORTED_DBMS, True) or kb.heuristicDbms or injection.dbms):
msg = "it looks like the back-end DBMS is '%s'. " % (Format.getErrorParsedDBMSes() or kb.heuristicDbms or injection.dbms)
msg += "Do you want to skip test payloads specific for other DBMSes? [Y/n]"
kb.reduceTests = (Backend.getErrorParsedDBMSes() or [kb.heuristicDbms]) if readInput(msg, default='Y').upper() == 'Y' else []
# If the DBMS has been fingerprinted (via DBMS-specific error
# message, via simple heuristic check or via DBMS-specific
# payload), ask the user to extend the tests to all DBMS-specific,
# regardless of --level and --risk values provided
if kb.extendTests is None and not conf.testFilter and (conf.level < 5 or conf.risk < 3) \
and (intersect(Backend.getErrorParsedDBMSes(), SUPPORTED_DBMS, True) or \
kb.heuristicDbms or injection.dbms):
msg = "for the remaining tests, do you want to include all tests "
msg += "for '%s' extending provided " % (Format.getErrorParsedDBMSes() or kb.heuristicDbms or injection.dbms)
msg += "level (%d)" % conf.level if conf.level < 5 else ""
msg += " and " if conf.level < 5 and conf.risk < 3 else ""
msg += "risk (%d)" % conf.risk if conf.risk < 3 else ""
msg += " values? [Y/n]" if conf.level < 5 and conf.risk < 3 else " value? [Y/n]"
kb.extendTests = (Backend.getErrorParsedDBMSes() or [kb.heuristicDbms]) if readInput(msg, default='Y').upper() == 'Y' else []
title = test.title
kb.testType = stype = test.stype
clause = test.clause
unionExtended = False
if stype == PAYLOAD.TECHNIQUE.UNION:
configUnion(test.request.char)
if "[CHAR]" in title:
if conf.uChar is None:
continue
else:
title = title.replace("[CHAR]", conf.uChar)
elif "[RANDNUM]" in title or "(NULL)" in title:
title = title.replace("[RANDNUM]", "random number")
if test.request.columns == "[COLSTART]-[COLSTOP]":
if conf.uCols is None:
continue
else:
title = title.replace("[COLSTART]", str(conf.uColsStart))
title = title.replace("[COLSTOP]", str(conf.uColsStop))
elif conf.uCols is not None:
debugMsg = "skipping test '%s' because the user " % title
debugMsg += "provided custom column range %s" % conf.uCols
logger.debug(debugMsg)
continue
match = re.search(r"(\d+)-(\d+)", test.request.columns)
if injection.data and match:
lower, upper = int(match.group(1)), int(match.group(2))
for _ in (lower, upper):
if _ > 1:
unionExtended = True
test.request.columns = re.sub(r"\b%d\b" % _, str(2 * _), test.request.columns)
title = re.sub(r"\b%d\b" % _, str(2 * _), title)
test.title = re.sub(r"\b%d\b" % _, str(2 * _), test.title)
# Skip test if the user's wants to test only for a specific
# technique
if conf.tech and isinstance(conf.tech, list) and stype not in conf.tech:
debugMsg = "skipping test '%s' because the user " % title
debugMsg += "specified to test only for "
debugMsg += "%s techniques" % " & ".join(map(lambda x: PAYLOAD.SQLINJECTION[x], conf.tech))
logger.debug(debugMsg)
continue
# Skip test if it is the same SQL injection type already
# identified by another test
if injection.data and stype in injection.data:
debugMsg = "skipping test '%s' because " % title
debugMsg += "the payload for %s has " % PAYLOAD.SQLINJECTION[stype]
debugMsg += "already been identified"
logger.debug(debugMsg)
continue
# Parse DBMS-specific payloads' details
if "details" in test and "dbms" in test.details:
payloadDbms = test.details.dbms
else:
payloadDbms = None
# Skip tests if title, vector or DBMS is not included by the
# given test filter
if conf.testFilter and not any(conf.testFilter in str(item) or \
re.search(conf.testFilter, str(item), re.I) for item in \
(test.title, test.vector, payloadDbms)):
debugMsg = "skipping test '%s' because its " % title
debugMsg += "name/vector/DBMS is not included by the given filter"
logger.debug(debugMsg)
continue
# Skip tests if title, vector or DBMS is included by the
# given skip filter
if conf.testSkip and any(conf.testSkip in str(item) or \
re.search(conf.testSkip, str(item), re.I) for item in \
(test.title, test.vector, payloadDbms)):
debugMsg = "skipping test '%s' because its " % title
debugMsg += "name/vector/DBMS is included by the given skip filter"
logger.debug(debugMsg)
continue
if payloadDbms is not None:
# Skip DBMS-specific test if it does not match the user's
# provided DBMS
if conf.dbms is not None and not intersect(payloadDbms, conf.dbms, True):
debugMsg = "skipping test '%s' because " % title
debugMsg += "the provided DBMS is %s" % conf.dbms
logger.debug(debugMsg)
continue
# Skip DBMS-specific test if it does not match the
# previously identified DBMS (via DBMS-specific payload)
if injection.dbms is not None and not intersect(payloadDbms, injection.dbms, True):
debugMsg = "skipping test '%s' because the identified " % title
debugMsg += "back-end DBMS is %s" % injection.dbms
logger.debug(debugMsg)
continue
# Skip DBMS-specific test if it does not match the
# previously identified DBMS (via DBMS-specific error message)
if kb.reduceTests and not intersect(payloadDbms, kb.reduceTests, True):
debugMsg = "skipping test '%s' because the parsed " % title
debugMsg += "error message(s) showed that the back-end DBMS "
debugMsg += "could be %s" % Format.getErrorParsedDBMSes()
logger.debug(debugMsg)
continue
# If the user did not decide to extend the tests to all
# DBMS-specific or the test payloads is not specific to the
# identified DBMS, then only test for it if both level and risk
# are below the corrisponding configuration's level and risk
# values
if not conf.testFilter and not (kb.extendTests and intersect(payloadDbms, kb.extendTests, True)):
# Skip test if the risk is higher than the provided (or default)
# value
if test.risk > conf.risk:
debugMsg = "skipping test '%s' because the risk (%d) " % (title, test.risk)
debugMsg += "is higher than the provided (%d)" % conf.risk
logger.debug(debugMsg)
continue
# Skip test if the level is higher than the provided (or default)
# value
if test.level > conf.level:
debugMsg = "skipping test '%s' because the level (%d) " % (title, test.level)
debugMsg += "is higher than the provided (%d)" % conf.level
logger.debug(debugMsg)
continue
# Skip test if it does not match the same SQL injection clause
# already identified by another test
clauseMatch = False
for clauseTest in clause:
if injection.clause is not None and clauseTest in injection.clause:
clauseMatch = True
break
if clause != [0] and injection.clause and injection.clause != [0] and not clauseMatch:
debugMsg = "skipping test '%s' because the clauses " % title
debugMsg += "differ from the clause already identified"
logger.debug(debugMsg)
continue
# Skip test if the user provided custom character (for UNION-based payloads)
if conf.uChar is not None and ("random number" in title or "(NULL)" in title):
debugMsg = "skipping test '%s' because the user " % title
debugMsg += "provided a specific character, %s" % conf.uChar
logger.debug(debugMsg)
continue
infoMsg = "testing '%s'" % title
logger.info(infoMsg)
# Force back-end DBMS according to the current test DBMS value
# for proper payload unescaping
Backend.forceDbms(payloadDbms[0] if isinstance(payloadDbms, list) else payloadDbms)
# Parse test's <request>
comment = agent.getComment(test.request) if len(conf.boundaries) > 1 else None
fstPayload = agent.cleanupPayload(test.request.payload, origValue=value if place not in (PLACE.URI, PLACE.CUSTOM_POST, PLACE.CUSTOM_HEADER) else None)
# Favoring non-string specific boundaries in case of digit-like parameter values
if value.isdigit():
boundaries = sorted(copy.deepcopy(conf.boundaries), key=lambda x: any(_ in (x.prefix or "") or _ in (x.suffix or "") for _ in ('"', '\'')))
else:
boundaries = conf.boundaries
for boundary in boundaries:
injectable = False
# Skip boundary if the level is higher than the provided (or
# default) value
# Parse boundary's <level>
if boundary.level > conf.level and not (kb.extendTests and intersect(payloadDbms, kb.extendTests, True)):
continue
# Skip boundary if it does not match against test's <clause>
# Parse test's <clause> and boundary's <clause>
clauseMatch = False
for clauseTest in test.clause:
if clauseTest in boundary.clause:
clauseMatch = True
break
if test.clause != [0] and boundary.clause != [0] and not clauseMatch:
continue
# Skip boundary if it does not match against test's <where>
# Parse test's <where> and boundary's <where>
whereMatch = False
for where in test.where:
if where in boundary.where:
whereMatch = True
break
if not whereMatch:
continue
# Parse boundary's <prefix>, <suffix> and <ptype>
prefix = boundary.prefix if boundary.prefix else ""
suffix = boundary.suffix if boundary.suffix else ""
ptype = boundary.ptype
# Options --prefix/--suffix have a higher priority (if set by user)
prefix = conf.prefix if conf.prefix is not None else prefix
suffix = conf.suffix if conf.suffix is not None else suffix
comment = None if conf.suffix is not None else comment
# If the previous injections succeeded, we know which prefix,
# suffix and parameter type to use for further tests, no
# need to cycle through the boundaries for the following tests
condBound = (injection.prefix is not None and injection.suffix is not None)
condBound &= (injection.prefix != prefix or injection.suffix != suffix)
condType = injection.ptype is not None and injection.ptype != ptype
# If the payload is an inline query test for it regardless
# of previously identified injection types
if stype != PAYLOAD.TECHNIQUE.QUERY and (condBound or condType):
continue
# For each test's <where>
for where in test.where:
templatePayload = None
vector = None
# Threat the parameter original value according to the
# test's <where> tag
if where == PAYLOAD.WHERE.ORIGINAL or conf.prefix:
origValue = value
if kb.tamperFunctions:
templatePayload = agent.payload(place, parameter, value="", newValue=origValue, where=where)
elif where == PAYLOAD.WHERE.NEGATIVE:
# Use different page template than the original
# one as we are changing parameters value, which
# will likely result in a different content
kb.data.setdefault("randomInt", str(randomInt(10)))
kb.data.setdefault("randomStr", str(randomStr(10)))
if conf.invalidLogical:
_ = int(kb.data.randomInt[:2])
origValue = "%s AND %s=%s" % (value, _, _ + 1)
elif conf.invalidBignum:
origValue = kb.data.randomInt[:6]
elif conf.invalidString:
origValue = kb.data.randomStr[:6]
else:
origValue = "-%s" % kb.data.randomInt[:4]
templatePayload = agent.payload(place, parameter, value="", newValue=origValue, where=where)
elif where == PAYLOAD.WHERE.REPLACE:
origValue = ""
kb.pageTemplate, kb.errorIsNone = getPageTemplate(templatePayload, place)
# Forge request payload by prepending with boundary's
# prefix and appending the boundary's suffix to the
# test's ' <payload><comment> ' string
if fstPayload:
boundPayload = agent.prefixQuery(fstPayload, prefix, where, clause)
boundPayload = agent.suffixQuery(boundPayload, comment, suffix, where)
reqPayload = agent.payload(place, parameter, newValue=boundPayload, where=where)
if reqPayload:
if reqPayload in seenPayload:
continue
else:
seenPayload.add(reqPayload)
else:
reqPayload = None
# Perform the test's request and check whether or not the
# payload was successful
# Parse test's <response>
for method, check in test.response.items():
check = agent.cleanupPayload(check, origValue=value if place not in (PLACE.URI, PLACE.CUSTOM_POST, PLACE.CUSTOM_HEADER) else None)
# In case of boolean-based blind SQL injection
if method == PAYLOAD.METHOD.COMPARISON:
# Generate payload used for comparison
def genCmpPayload():
sndPayload = agent.cleanupPayload(test.response.comparison, origValue=value if place not in (PLACE.URI, PLACE.CUSTOM_POST, PLACE.CUSTOM_HEADER) else None)
# Forge response payload by prepending with
# boundary's prefix and appending the boundary's
# suffix to the test's ' <payload><comment> '
# string
boundPayload = agent.prefixQuery(sndPayload, prefix, where, clause)
boundPayload = agent.suffixQuery(boundPayload, comment, suffix, where)
cmpPayload = agent.payload(place, parameter, newValue=boundPayload, where=where)
return cmpPayload
# Useful to set kb.matchRatio at first based on
# the False response content
kb.matchRatio = None
kb.negativeLogic = (where == PAYLOAD.WHERE.NEGATIVE)
Request.queryPage(genCmpPayload(), place, raise404=False)
falsePage = threadData.lastComparisonPage or ""
# Perform the test's True request
trueResult = Request.queryPage(reqPayload, place, raise404=False)
truePage = threadData.lastComparisonPage or ""
if trueResult and not(truePage == falsePage and not kb.nullConnection):
# Perform the test's False request
falseResult = Request.queryPage(genCmpPayload(), place, raise404=False)
if not falseResult:
if kb.negativeLogic:
boundPayload = agent.prefixQuery(kb.data.randomStr, prefix, where, clause)
boundPayload = agent.suffixQuery(boundPayload, comment, suffix, where)
errorPayload = agent.payload(place, parameter, newValue=boundPayload, where=where)
errorResult = Request.queryPage(errorPayload, place, raise404=False)
if errorResult:
continue
infoMsg = "%s parameter '%s' seems to be '%s' injectable " % (paramType, parameter, title)
logger.info(infoMsg)
injectable = True
if not injectable and not any((conf.string, conf.notString, conf.regexp)) and kb.pageStable:
trueSet = set(extractTextTagContent(truePage))
falseSet = set(extractTextTagContent(falsePage))
candidates = filter(None, (_.strip() if _.strip() in (kb.pageTemplate or "") and _.strip() not in falsePage and _.strip() not in threadData.lastComparisonHeaders else None for _ in (trueSet - falseSet)))
if candidates:
conf.string = candidates[0]
infoMsg = "%s parameter '%s' seems to be '%s' injectable (with --string=\"%s\")" % (paramType, parameter, title, repr(conf.string).lstrip('u').strip("'"))
logger.info(infoMsg)
injectable = True
# In case of error-based SQL injection
elif method == PAYLOAD.METHOD.GREP:
# Perform the test's request and grep the response
# body for the test's <grep> regular expression
try:
page, headers = Request.queryPage(reqPayload, place, content=True, raise404=False)
output = extractRegexResult(check, page, re.DOTALL | re.IGNORECASE) \
or extractRegexResult(check, listToStrValue( \
[headers[key] for key in headers.keys() if key.lower() != URI_HTTP_HEADER.lower()] \
if headers else None), re.DOTALL | re.IGNORECASE) \
or extractRegexResult(check, threadData.lastRedirectMsg[1] \
if threadData.lastRedirectMsg and threadData.lastRedirectMsg[0] == \
threadData.lastRequestUID else None, re.DOTALL | re.IGNORECASE)
if output:
result = output == "1"
if result:
infoMsg = "%s parameter '%s' is '%s' injectable " % (paramType, parameter, title)
logger.info(infoMsg)
injectable = True
except SqlmapConnectionException, msg:
debugMsg = "problem occurred most likely because the "
debugMsg += "server hasn't recovered as expected from the "
debugMsg += "error-based payload used ('%s')" % msg
logger.debug(debugMsg)
# In case of time-based blind or stacked queries
# SQL injections
elif method == PAYLOAD.METHOD.TIME:
# Perform the test's request
trueResult = Request.queryPage(reqPayload, place, timeBasedCompare=True, raise404=False)
if trueResult:
# Confirm test's results
trueResult = Request.queryPage(reqPayload, place, timeBasedCompare=True, raise404=False)
if trueResult:
infoMsg = "%s parameter '%s' seems to be '%s' injectable " % (paramType, parameter, title)
logger.info(infoMsg)
injectable = True
# In case of UNION query SQL injection
elif method == PAYLOAD.METHOD.UNION:
# Test for UNION injection and set the sample
# payload as well as the vector.
# NOTE: vector is set to a tuple with 6 elements,
# used afterwards by Agent.forgeUnionQuery()
# method to forge the UNION query payload
configUnion(test.request.char, test.request.columns)
if not Backend.getIdentifiedDbms():
if kb.heuristicDbms is None:
warnMsg = "using unescaped version of the test "
warnMsg += "because of zero knowledge of the "
warnMsg += "back-end DBMS. You can try to "
warnMsg += "explicitly set it using option '--dbms'"
singleTimeWarnMessage(warnMsg)
else:
Backend.forceDbms(kb.heuristicDbms)
if unionExtended:
infoMsg = "automatically extending ranges for UNION "
infoMsg += "query injection technique tests as "
infoMsg += "there is at least one other (potential) "
infoMsg += "technique found"
singleTimeLogMessage(infoMsg)
elif not injection.data:
_ = test.request.columns.split('-')[-1]
if _.isdigit() and int(_) > 10:
if kb.futileUnion is None:
msg = "it is not recommended to perform "
msg += "extended UNION tests if there is not "
msg += "at least one other (potential) "
msg += "technique found. Do you want to skip? [Y/n] "
kb.futileUnion = readInput(msg, default="Y").strip().upper() == 'N'
if kb.futileUnion is False:
continue
# Test for UNION query SQL injection
reqPayload, vector = unionTest(comment, place, parameter, value, prefix, suffix)
if isinstance(reqPayload, basestring):
infoMsg = "%s parameter '%s' is '%s' injectable" % (paramType, parameter, title)
logger.info(infoMsg)
injectable = True
# Overwrite 'where' because it can be set
# by unionTest() directly
where = vector[6]
kb.previousMethod = method
if conf.dummy or conf.offline:
injectable = False
# If the injection test was successful feed the injection
# object with the test's details
if injectable is True:
# Feed with the boundaries details only the first time a
# test has been successful
if injection.place is None or injection.parameter is None:
if place in (PLACE.USER_AGENT, PLACE.REFERER, PLACE.HOST):
injection.parameter = place
else:
injection.parameter = parameter
injection.place = place
injection.ptype = ptype
injection.prefix = prefix
injection.suffix = suffix
injection.clause = clause
# Feed with test details every time a test is successful
if hasattr(test, "details"):
for dKey, dValue in test.details.items():
if dKey == "dbms":
injection.dbms = dValue
if not isinstance(dValue, list):
Backend.setDbms(dValue)
else:
Backend.forceDbms(dValue[0], True)
elif dKey == "dbms_version" and injection.dbms_version is None and not conf.testFilter:
injection.dbms_version = Backend.setVersion(dValue)
elif dKey == "os" and injection.os is None:
injection.os = Backend.setOs(dValue)
if vector is None and "vector" in test and test.vector is not None:
vector = test.vector
injection.data[stype] = AttribDict()
injection.data[stype].title = title
injection.data[stype].payload = agent.removePayloadDelimiters(reqPayload)
injection.data[stype].where = where
injection.data[stype].vector = vector
injection.data[stype].comment = comment
injection.data[stype].templatePayload = templatePayload
injection.data[stype].matchRatio = kb.matchRatio
injection.conf.textOnly = conf.textOnly
injection.conf.titles = conf.titles
injection.conf.string = conf.string
injection.conf.notString = conf.notString
injection.conf.regexp = conf.regexp
injection.conf.optimize = conf.optimize
if not kb.alerted:
if conf.beep:
beep()
if conf.alert:
infoMsg = "executing alerting shell command(s) ('%s')" % conf.alert
logger.info(infoMsg)
process = execute(conf.alert, shell=True)
process.wait()
kb.alerted = True
# There is no need to perform this test for other
# <where> tags
break
if injectable is True:
kb.vulnHosts.add(conf.hostname)
break
# Reset forced back-end DBMS value
Backend.flushForcedDbms()
except KeyboardInterrupt:
warnMsg = "user aborted during detection phase"
logger.warn(warnMsg)
msg = "how do you want to proceed? [(S)kip current test/(e)nd detection phase/(n)ext parameter/(c)hange verbosity/(q)uit]"
choice = readInput(msg, default="S", checkBatch=False)
if choice[0] in ("s", "S"):
pass
elif choice[0] in ("c", "C"):
choice = None
while not ((choice or "").isdigit() and 0 <= int(choice) <= 6):
if choice:
logger.warn("invalid value")
msg = "enter new verbosity level: [0-6] "
choice = readInput(msg, default=str(conf.verbose), checkBatch=False).strip()
conf.verbose = int(choice)
setVerbosity()
tests.insert(0, test)
elif choice[0] in ("n", "N"):
return None
elif choice[0] in ("e", "E"):
kb.endDetection = True
elif choice[0] in ("q", "Q"):
raise SqlmapUserQuitException
finally:
# Reset forced back-end DBMS value
Backend.flushForcedDbms()
Backend.flushForcedDbms(True)
# Return the injection object
if injection.place is not None and injection.parameter is not None:
if not conf.dropSetCookie and PAYLOAD.TECHNIQUE.BOOLEAN in injection.data and injection.data[PAYLOAD.TECHNIQUE.BOOLEAN].vector.startswith('OR'):
warnMsg = "in OR boolean-based injections, please consider usage "
warnMsg += "of switch '--drop-set-cookie' if you experience any "
warnMsg += "problems during data retrieval"
logger.warn(warnMsg)
if not checkFalsePositives(injection):
kb.vulnHosts.remove(conf.hostname)
injection.notes.add(NOTE.FALSE_POSITIVE_OR_UNEXPLOITABLE)
else:
injection = None
if injection:
checkSuhosinPatch(injection)
checkFilteredChars(injection)
return injection
def heuristicCheckDbms(injection):
"""
This functions is called when boolean-based blind is identified with a
generic payload and the DBMS has not yet been fingerprinted to attempt
to identify with a simple DBMS specific boolean-based test what the DBMS
may be
"""
retVal = False
pushValue(kb.injection)
kb.injection = injection
for dbms in getPublicTypeMembers(DBMS, True):
randStr1, randStr2 = randomStr(), randomStr()
Backend.forceDbms(dbms)
if conf.noEscape and dbms not in FROM_DUMMY_TABLE:
continue
if checkBooleanExpression("(SELECT '%s'%s)='%s'" % (randStr1, FROM_DUMMY_TABLE.get(dbms, ""), randStr1)):
if not checkBooleanExpression("(SELECT '%s'%s)='%s'" % (randStr1, FROM_DUMMY_TABLE.get(dbms, ""), randStr2)):
retVal = dbms
break
Backend.flushForcedDbms()
kb.injection = popValue()
if retVal:
infoMsg = "heuristic (extended) test shows that the back-end DBMS " # Not as important as "parsing" counter-part (because of false-positives)
infoMsg += "could be '%s' " % retVal
logger.info(infoMsg)
return retVal
def checkFalsePositives(injection):
"""
Checks for false positives (only in single special cases)
"""
retVal = True
if all(_ in (PAYLOAD.TECHNIQUE.BOOLEAN, PAYLOAD.TECHNIQUE.TIME, PAYLOAD.TECHNIQUE.STACKED) for _ in injection.data) or\
(len(injection.data) == 1 and PAYLOAD.TECHNIQUE.UNION in injection.data and "Generic" in injection.data[PAYLOAD.TECHNIQUE.UNION].title):
pushValue(kb.injection)
infoMsg = "checking if the injection point on %s " % injection.place
infoMsg += "parameter '%s' is a false positive" % injection.parameter
logger.info(infoMsg)
def _():
return int(randomInt(2)) + 1
kb.injection = injection
for i in xrange(conf.level):
while True:
randInt1, randInt2, randInt3 = (_() for j in xrange(3))
randInt1 = min(randInt1, randInt2, randInt3)
randInt3 = max(randInt1, randInt2, randInt3)
if randInt3 > randInt2 > randInt1:
break
if not checkBooleanExpression("%d=%d" % (randInt1, randInt1)):
retVal = False
break
# Just in case if DBMS hasn't properly recovered from previous delayed request
if PAYLOAD.TECHNIQUE.BOOLEAN not in injection.data:
checkBooleanExpression("%d=%d" % (randInt1, randInt2))
if checkBooleanExpression("%d=%d" % (randInt1, randInt3)): # this must not be evaluated to True
retVal = False
break
elif checkBooleanExpression("%d=%d" % (randInt3, randInt2)): # this must not be evaluated to True
retVal = False
break
elif not checkBooleanExpression("%d=%d" % (randInt2, randInt2)): # this must be evaluated to True
retVal = False
break
elif checkBooleanExpression("%d %d" % (randInt3, randInt2)): # this must not be evaluated to True (invalid statement)
retVal = False
break
if not retVal:
warnMsg = "false positive or unexploitable injection point detected"
logger.warn(warnMsg)
kb.injection = popValue()
return retVal
def checkSuhosinPatch(injection):
"""
Checks for existence of Suhosin-patch (and alike) protection mechanism(s)
"""
if injection.place == PLACE.GET:
debugMsg = "checking for parameter length "
debugMsg += "constrainting mechanisms"
logger.debug(debugMsg)
pushValue(kb.injection)
kb.injection = injection
randInt = randomInt()
if not checkBooleanExpression("%d=%s%d" % (randInt, ' ' * SUHOSIN_MAX_VALUE_LENGTH, randInt)):
warnMsg = "parameter length constrainting "
warnMsg += "mechanism detected (e.g. Suhosin patch). "
warnMsg += "Potential problems in enumeration phase can be expected"
logger.warn(warnMsg)
kb.injection = popValue()
def checkFilteredChars(injection):
debugMsg = "checking for filtered characters"
logger.debug(debugMsg)
pushValue(kb.injection)
kb.injection = injection
randInt = randomInt()
# all other techniques are already using parentheses in tests
if len(injection.data) == 1 and PAYLOAD.TECHNIQUE.BOOLEAN in injection.data:
if not checkBooleanExpression("(%d)=%d" % (randInt, randInt)):
warnMsg = "it appears that some non-alphanumeric characters (i.e. ()) are "
warnMsg += "filtered by the back-end server. There is a strong "
warnMsg += "possibility that sqlmap won't be able to properly "
warnMsg += "exploit this vulnerability"
logger.warn(warnMsg)
# inference techniques depend on character '>'
if not any(_ in injection.data for _ in (PAYLOAD.TECHNIQUE.ERROR, PAYLOAD.TECHNIQUE.UNION, PAYLOAD.TECHNIQUE.QUERY)):
if not checkBooleanExpression("%d>%d" % (randInt+1, randInt)):
warnMsg = "it appears that the character '>' is "
warnMsg += "filtered by the back-end server. You are strongly "
warnMsg += "advised to rerun with the '--tamper=between'"
logger.warn(warnMsg)
kb.injection = popValue()
def heuristicCheckSqlInjection(place, parameter):
if kb.nullConnection:
debugMsg = "heuristic check skipped because NULL connection used"
logger.debug(debugMsg)
return None
origValue = conf.paramDict[place][parameter]
paramType = conf.method if conf.method not in (None, HTTPMETHOD.GET, HTTPMETHOD.POST) else place
prefix = ""
suffix = ""
if conf.prefix or conf.suffix:
if conf.prefix:
prefix = conf.prefix
if conf.suffix:
suffix = conf.suffix
randStr = ""
while '\'' not in randStr:
randStr = randomStr(length=10, alphabet=HEURISTIC_CHECK_ALPHABET)
kb.heuristicMode = True
payload = "%s%s%s" % (prefix, randStr, suffix)
payload = agent.payload(place, parameter, newValue=payload)
page, _ = Request.queryPage(payload, place, content=True, raise404=False)
kb.heuristicMode = False
parseFilePaths(page)
result = wasLastResponseDBMSError()
infoMsg = "heuristic (basic) test shows that %s parameter " % paramType
infoMsg += "'%s' might " % parameter
def _(page):
return any(_ in (page or "") for _ in FORMAT_EXCEPTION_STRINGS)
casting = _(page) and not _(kb.originalPage)
if not casting and not result and kb.dynamicParameter and origValue.isdigit():
randInt = int(randomInt())
payload = "%s%s%s" % (prefix, "%d-%d" % (int(origValue) + randInt, randInt), suffix)
payload = agent.payload(place, parameter, newValue=payload, where=PAYLOAD.WHERE.REPLACE)
result = Request.queryPage(payload, place, raise404=False)
if not result:
randStr = randomStr()
payload = "%s%s%s" % (prefix, "%s.%d%s" % (origValue, random.randint(1, 9), randStr), suffix)
payload = agent.payload(place, parameter, newValue=payload, where=PAYLOAD.WHERE.REPLACE)
casting = Request.queryPage(payload, place, raise404=False)
kb.heuristicTest = HEURISTIC_TEST.CASTED if casting else HEURISTIC_TEST.NEGATIVE if not result else HEURISTIC_TEST.POSITIVE
if casting:
errMsg = "possible %s casting " % ("integer" if origValue.isdigit() else "type")
errMsg += "detected (e.g. \"$%s=intval($_REQUEST['%s'])\") " % (parameter, parameter)
errMsg += "at the back-end web application"
logger.error(errMsg)
if kb.ignoreCasted is None:
message = "do you want to skip those kind of cases (and save scanning time)? %s " % ("[Y/n]" if conf.multipleTargets else "[y/N]")
kb.ignoreCasted = readInput(message, default='Y' if conf.multipleTargets else 'N').upper() != 'N'
elif result:
infoMsg += "be injectable"
if Backend.getErrorParsedDBMSes():
infoMsg += " (possible DBMS: '%s')" % Format.getErrorParsedDBMSes()
logger.info(infoMsg)
else:
infoMsg += "not be injectable"
logger.warn(infoMsg)
kb.heuristicMode = True
randStr1, randStr2 = randomStr(NON_SQLI_CHECK_PREFIX_SUFFIX_LENGTH), randomStr(NON_SQLI_CHECK_PREFIX_SUFFIX_LENGTH)
value = "%s%s%s" % (randStr1, DUMMY_NON_SQLI_CHECK_APPENDIX, randStr2)
payload = "%s%s%s" % (prefix, "'%s" % value, suffix)
payload = agent.payload(place, parameter, newValue=payload)
page, _ = Request.queryPage(payload, place, content=True, raise404=False)
paramType = conf.method if conf.method not in (None, HTTPMETHOD.GET, HTTPMETHOD.POST) else place
if value.lower() in (page or "").lower():
infoMsg = "heuristic (XSS) test shows that %s parameter " % paramType
infoMsg += "'%s' might be vulnerable to cross-site scripting attacks" % parameter
logger.info(infoMsg)
for match in re.finditer("(?i)[^\n]*(no such file|failed (to )?open)[^\n]*", page or ""):
if randStr1.lower() in match.group(0).lower():
infoMsg = "heuristic (FI) test shows that %s parameter " % paramType
infoMsg += "'%s' might be vulnerable to file inclusion attacks" % parameter
logger.info(infoMsg)
break
kb.heuristicMode = False
return kb.heuristicTest
def checkDynParam(place, parameter, value):
"""
This function checks if the URL parameter is dynamic. If it is
dynamic, the content of the page differs, otherwise the
dynamicity might depend on another parameter.
"""
if kb.redirectChoice:
return None
kb.matchRatio = None
dynResult = None
randInt = randomInt()
paramType = conf.method if conf.method not in (None, HTTPMETHOD.GET, HTTPMETHOD.POST) else place
infoMsg = "testing if %s parameter '%s' is dynamic" % (paramType, parameter)
logger.info(infoMsg)
try:
payload = agent.payload(place, parameter, value, getUnicode(randInt))
dynResult = Request.queryPage(payload, place, raise404=False)
if not dynResult:
infoMsg = "confirming that %s parameter '%s' is dynamic" % (paramType, parameter)
logger.info(infoMsg)
randInt = randomInt()
payload = agent.payload(place, parameter, value, getUnicode(randInt))
dynResult = Request.queryPage(payload, place, raise404=False)
except SqlmapConnectionException:
pass
result = None if dynResult is None else not dynResult
kb.dynamicParameter = result
return result
def checkDynamicContent(firstPage, secondPage):
"""
This function checks for the dynamic content in the provided pages
"""
if kb.nullConnection:
debugMsg = "dynamic content checking skipped "
debugMsg += "because NULL connection used"
logger.debug(debugMsg)
return
if any(page is None for page in (firstPage, secondPage)):
warnMsg = "can't check dynamic content "
warnMsg += "because of lack of page content"
logger.critical(warnMsg)
return
seqMatcher = getCurrentThreadData().seqMatcher
seqMatcher.set_seq1(firstPage)
seqMatcher.set_seq2(secondPage)
# In case of an intolerable difference turn on dynamicity removal engine
if seqMatcher.quick_ratio() <= UPPER_RATIO_BOUND:
findDynamicContent(firstPage, secondPage)
count = 0
while not Request.queryPage():
count += 1
if count > conf.retries:
warnMsg = "target URL is too dynamic. "
warnMsg += "Switching to '--text-only' "
logger.warn(warnMsg)
conf.textOnly = True
return
warnMsg = "target URL is heavily dynamic"
warnMsg += ". sqlmap is going to retry the request"
logger.critical(warnMsg)
secondPage, _ = Request.queryPage(content=True)
findDynamicContent(firstPage, secondPage)
def checkStability():
"""
This function checks if the URL content is stable requesting the
same page two times with a small delay within each request to
assume that it is stable.
In case the content of the page differs when requesting
the same page, the dynamicity might depend on other parameters,
like for instance string matching (--string).
"""
infoMsg = "testing if the target URL is stable"
logger.info(infoMsg)
firstPage = kb.originalPage # set inside checkConnection()
delay = 1 - (time.time() - (kb.originalPageTime or 0))
delay = max(0, min(1, delay))
time.sleep(delay)
secondPage, _ = Request.queryPage(content=True, noteResponseTime=False, raise404=False)
if kb.redirectChoice:
return None
kb.pageStable = (firstPage == secondPage)
if kb.pageStable:
if firstPage:
infoMsg = "target URL is stable"
logger.info(infoMsg)
else:
errMsg = "there was an error checking the stability of page "
errMsg += "because of lack of content. Please check the "
errMsg += "page request results (and probable errors) by "
errMsg += "using higher verbosity levels"
logger.error(errMsg)
else:
warnMsg = "target URL is not stable. sqlmap will base the page "
warnMsg += "comparison on a sequence matcher. If no dynamic nor "
warnMsg += "injectable parameters are detected, or in case of "
warnMsg += "junk results, refer to user's manual paragraph "
warnMsg += "'Page comparison' and provide a string or regular "
warnMsg += "expression to match on"
logger.warn(warnMsg)
message = "how do you want to proceed? [(C)ontinue/(s)tring/(r)egex/(q)uit] "
test = readInput(message, default="C")
if test and test[0] in ("q", "Q"):
raise SqlmapUserQuitException
elif test and test[0] in ("s", "S"):
showStaticWords(firstPage, secondPage)
message = "please enter value for parameter 'string': "
test = readInput(message)
if test:
conf.string = test
if kb.nullConnection:
debugMsg = "turning off NULL connection "
debugMsg += "support because of string checking"
logger.debug(debugMsg)
kb.nullConnection = None
else:
errMsg = "Empty value supplied"
raise SqlmapNoneDataException(errMsg)
elif test and test[0] in ("r", "R"):
message = "please enter value for parameter 'regex': "
test = readInput(message)
if test:
conf.regex = test
if kb.nullConnection:
debugMsg = "turning off NULL connection "
debugMsg += "support because of regex checking"
logger.debug(debugMsg)
kb.nullConnection = None
else:
errMsg = "Empty value supplied"
raise SqlmapNoneDataException(errMsg)
else:
checkDynamicContent(firstPage, secondPage)
return kb.pageStable
def checkString():
if not conf.string:
return True
infoMsg = "testing if the provided string is within the "
infoMsg += "target URL page content"
logger.info(infoMsg)
page, headers = Request.queryPage(content=True)
rawResponse = "%s%s" % (listToStrValue(headers.headers if headers else ""), page)
if conf.string not in rawResponse:
warnMsg = "you provided '%s' as the string to " % conf.string
warnMsg += "match, but such a string is not within the target "
warnMsg += "URL raw response, sqlmap will carry on anyway"
logger.warn(warnMsg)
return True
def checkRegexp():
if not conf.regexp:
return True
infoMsg = "testing if the provided regular expression matches within "
infoMsg += "the target URL page content"
logger.info(infoMsg)
page, headers = Request.queryPage(content=True)
rawResponse = "%s%s" % (listToStrValue(headers.headers if headers else ""), page)
if not re.search(conf.regexp, rawResponse, re.I | re.M):
warnMsg = "you provided '%s' as the regular expression to " % conf.regexp
warnMsg += "match, but such a regular expression does not have any "
warnMsg += "match within the target URL raw response, sqlmap "
warnMsg += "will carry on anyway"
logger.warn(warnMsg)
return True
def checkWaf():
"""
Reference: http://seclists.org/nmap-dev/2011/q2/att-1005/http-waf-detect.nse
"""
if any((conf.string, conf.notString, conf.regexp, conf.dummy, conf.offline, conf.skipWaf)):
return None
_ = hashDBRetrieve(HASHDB_KEYS.CHECK_WAF_RESULT, True)
if _ is not None:
if _:
warnMsg = "previous heuristics detected that the target "
warnMsg += "is protected by some kind of WAF/IPS/IDS"
logger.critical(warnMsg)
return _
infoMsg = "checking if the target is protected by "
infoMsg += "some kind of WAF/IPS/IDS"
logger.info(infoMsg)
retVal = False
payload = "%d %s" % (randomInt(), IDS_WAF_CHECK_PAYLOAD)
value = "" if not conf.parameters.get(PLACE.GET) else conf.parameters[PLACE.GET] + DEFAULT_GET_POST_DELIMITER
value += agent.addPayloadDelimiters("%s=%s" % (randomStr(), payload))
pushValue(conf.timeout)
conf.timeout = IDS_WAF_CHECK_TIMEOUT
try:
retVal = Request.queryPage(place=PLACE.GET, value=value, getRatioValue=True, noteResponseTime=False, silent=True)[1] < IDS_WAF_CHECK_RATIO
except SqlmapConnectionException:
retVal = True
finally:
kb.matchRatio = None
conf.timeout = popValue()
if retVal:
warnMsg = "heuristics detected that the target "
warnMsg += "is protected by some kind of WAF/IPS/IDS"
logger.critical(warnMsg)
if not conf.identifyWaf:
message = "do you want sqlmap to try to detect backend "
message += "WAF/IPS/IDS? [y/N] "
output = readInput(message, default="N")
if output and output[0] in ("Y", "y"):
conf.identifyWaf = True
if conf.timeout == defaults.timeout:
logger.warning("dropping timeout to %d seconds (i.e. '--timeout=%d')" % (IDS_WAF_CHECK_TIMEOUT, IDS_WAF_CHECK_TIMEOUT))
conf.timeout = IDS_WAF_CHECK_TIMEOUT
hashDBWrite(HASHDB_KEYS.CHECK_WAF_RESULT, retVal, True)
return retVal
def identifyWaf():
if not conf.identifyWaf:
return None
kb.testMode = True
infoMsg = "using WAF scripts to detect "
infoMsg += "backend WAF/IPS/IDS protection"
logger.info(infoMsg)
@cachedmethod
def _(*args, **kwargs):
page, headers, code = None, None, None
try:
pushValue(kb.redirectChoice)
kb.redirectChoice = REDIRECTION.NO
if kwargs.get("get"):
kwargs["get"] = urlencode(kwargs["get"])
kwargs["raise404"] = False
kwargs["silent"] = True
page, headers, code = Request.getPage(*args, **kwargs)
except Exception:
pass
finally:
kb.redirectChoice = popValue()
return page or "", headers or {}, code
retVal = False
for function, product in kb.wafFunctions:
try:
logger.debug("checking for WAF/IDS/IPS product '%s'" % product)
found = function(_)
except Exception, ex:
errMsg = "exception occurred while running "
errMsg += "WAF script for '%s' ('%s')" % (product, getSafeExString(ex))
logger.critical(errMsg)
found = False
if found:
retVal = product
break
if retVal:
errMsg = "WAF/IDS/IPS identified '%s'. Please " % retVal
errMsg += "consider usage of tamper scripts (option '--tamper')"
logger.critical(errMsg)
message = "are you sure that you want to "
message += "continue with further target testing? [y/N] "
output = readInput(message, default="N")
if output and output[0] not in ("Y", "y"):
raise SqlmapUserQuitException
else:
warnMsg = "no WAF/IDS/IPS product has been identified (this doesn't mean that there is none)"
logger.warn(warnMsg)
kb.testType = None
kb.testMode = False
return retVal
def checkNullConnection():
"""
Reference: http://www.wisec.it/sectou.php?id=472f952d79293
"""
if conf.data:
return False
infoMsg = "testing NULL connection to the target URL"
logger.info(infoMsg)
try:
pushValue(kb.pageCompress)
kb.pageCompress = False
page, headers, _ = Request.getPage(method=HTTPMETHOD.HEAD)
if not page and HTTP_HEADER.CONTENT_LENGTH in (headers or {}):
kb.nullConnection = NULLCONNECTION.HEAD
infoMsg = "NULL connection is supported with HEAD header"
logger.info(infoMsg)
else:
page, headers, _ = Request.getPage(auxHeaders={HTTP_HEADER.RANGE: "bytes=-1"})
if page and len(page) == 1 and HTTP_HEADER.CONTENT_RANGE in (headers or {}):
kb.nullConnection = NULLCONNECTION.RANGE
infoMsg = "NULL connection is supported with GET header "
infoMsg += "'%s'" % kb.nullConnection
logger.info(infoMsg)
else:
_, headers, _ = Request.getPage(skipRead = True)
if HTTP_HEADER.CONTENT_LENGTH in (headers or {}):
kb.nullConnection = NULLCONNECTION.SKIP_READ
infoMsg = "NULL connection is supported with 'skip-read' method"
logger.info(infoMsg)
except SqlmapConnectionException, ex:
errMsg = getSafeExString(ex)
raise SqlmapConnectionException(errMsg)
finally:
kb.pageCompress = popValue()
return kb.nullConnection is not None
def checkConnection(suppressOutput=False):
if not any((conf.proxy, conf.tor, conf.dummy, conf.offline)):
try:
debugMsg = "resolving hostname '%s'" % conf.hostname
logger.debug(debugMsg)
socket.getaddrinfo(conf.hostname, None)
except socket.gaierror:
errMsg = "host '%s' does not exist" % conf.hostname
raise SqlmapConnectionException(errMsg)
except socket.error, ex:
errMsg = "problem occurred while "
errMsg += "resolving a host name '%s' ('%s')" % (conf.hostname, getSafeExString(ex))
raise SqlmapConnectionException(errMsg)
if not suppressOutput and not conf.dummy and not conf.offline:
infoMsg = "testing connection to the target URL"
logger.info(infoMsg)
try:
kb.originalPageTime = time.time()
page, headers = Request.queryPage(content=True, noteResponseTime=False)
kb.originalPage = kb.pageTemplate = page
kb.errorIsNone = False
if not kb.originalPage and wasLastResponseHTTPError():
errMsg = "unable to retrieve page content"
raise SqlmapConnectionException(errMsg)
elif wasLastResponseDBMSError():
warnMsg = "there is a DBMS error found in the HTTP response body "
warnMsg += "which could interfere with the results of the tests"
logger.warn(warnMsg)
elif wasLastResponseHTTPError():
warnMsg = "the web server responded with an HTTP error code (%d) " % getLastRequestHTTPError()
warnMsg += "which could interfere with the results of the tests"
logger.warn(warnMsg)
else:
kb.errorIsNone = True
except SqlmapConnectionException, ex:
if conf.ipv6:
warnMsg = "check connection to a provided "
warnMsg += "IPv6 address with a tool like ping6 "
warnMsg += "(e.g. 'ping6 -I eth0 %s') " % conf.hostname
warnMsg += "prior to running sqlmap to avoid "
warnMsg += "any addressing issues"
singleTimeWarnMessage(warnMsg)
if any(code in kb.httpErrorCodes for code in (httplib.NOT_FOUND, )):
errMsg = getSafeExString(ex)
logger.critical(errMsg)
if conf.multipleTargets:
return False
msg = "it is not recommended to continue in this kind of cases. Do you want to quit and make sure that everything is set up properly? [Y/n] "
if readInput(msg, default="Y") not in ("n", "N"):
raise SqlmapSilentQuitException
else:
kb.ignoreNotFound = True
else:
raise
return True
def setVerbosity(): # Cross-linked function
raise NotImplementedError
| apache-2.0 |
lino-framework/welfare | lino_welfare/projects/gerd/tests/dumps/18.8.0/countries_place.py | 2 | 8075 | # -*- coding: UTF-8 -*-
logger.info("Loading 78 objects to table countries_place...")
# fields: id, parent, name, country, zip_code, type, show_type, inscode
loader.save(create_countries_place(1,None,['Eupen', '', ''],u'BE',u'4700',u'50',False,u'63023'))
loader.save(create_countries_place(2,1,['Nispert', '', ''],u'BE',u'',u'55',False,u''))
loader.save(create_countries_place(3,None,['Burg-Reuland', '', ''],u'BE',u'4790',u'50',False,u'63087'))
loader.save(create_countries_place(4,3,['Ouren', '', ''],u'BE',u'',u'55',False,u''))
loader.save(create_countries_place(5,None,['Kelmis', 'La Calamine', 'Kelmis'],u'BE',u'4720',u'50',False,u'63040'))
loader.save(create_countries_place(6,None,['Kettenis', '', ''],u'BE',u'4701',u'70',False,u'63023'))
loader.save(create_countries_place(7,None,['Raeren', '', ''],u'BE',u'4730',u'70',False,u'63061'))
loader.save(create_countries_place(8,None,['Angleur', '', ''],u'BE',u'4031',u'50',False,u'62063'))
loader.save(create_countries_place(9,None,['Ans', '', ''],u'BE',u'4430',u'50',False,u'62003'))
loader.save(create_countries_place(10,None,['Ottignies', '', ''],u'BE',u'1340',u'50',False,u'25121'))
loader.save(create_countries_place(11,None,['Thieusies', '', ''],u'BE',u'7061',u'50',False,u'55040'))
loader.save(create_countries_place(12,None,['Cuesmes', '', ''],u'BE',u'7033',u'50',False,u'53053'))
loader.save(create_countries_place(13,None,['La Reid', '', ''],u'BE',u'4910',u'50',False,u'63076'))
loader.save(create_countries_place(14,None,['Bl\xe9gny', '', ''],u'BE',u'4670',u'50',False,u'62119'))
loader.save(create_countries_place(15,None,['Trembleur', '', ''],u'BE',u'4670',u'50',False,u'62119'))
loader.save(create_countries_place(16,None,['Mortier', '', ''],u'BE',u'4670',u'50',False,u'62119'))
loader.save(create_countries_place(17,None,['Cerfontaine', '', ''],u'BE',u'5630',u'50',False,u'93010'))
loader.save(create_countries_place(18,None,['Burdinne', '', ''],u'BE',u'4210',u'50',False,u'61010'))
loader.save(create_countries_place(19,None,['Antwerpen', 'Anvers', 'Anvers'],u'BE',u'',u'21',False,u''))
loader.save(create_countries_place(20,None,['Luxemburg', 'Luxembourg', 'Luxembourg'],u'BE',u'',u'21',False,u''))
loader.save(create_countries_place(21,None,['Nam\xfcr', 'Namur', 'Namur'],u'BE',u'',u'21',False,u''))
loader.save(create_countries_place(22,None,['Limburg', 'Limbourg', 'Limbourg'],u'BE',u'',u'21',False,u''))
loader.save(create_countries_place(23,22,['Aalst-bij-Sint-Truiden', '', ''],u'BE',u'3800',u'70',False,u''))
loader.save(create_countries_place(24,None,['L\xfcttich', 'Li\xe8ge', 'Li\xe8ge'],u'BE',u'',u'21',False,u''))
loader.save(create_countries_place(25,24,['L\xfcttich', 'Li\xe8ge', 'Li\xe8ge'],u'BE',u'4000',u'50',False,u''))
loader.save(create_countries_place(26,24,['B\xfctgenbach', 'Butgenbach', 'Butgenbach'],u'BE',u'4750',u'50',False,u'63013'))
loader.save(create_countries_place(27,24,['B\xfcllingen', 'Bullange', 'B\xfcllingen'],u'BE',u'4760',u'50',False,u'63012'))
loader.save(create_countries_place(28,24,['Sankt Vith', 'Saint-Vith', 'Sankt Vith'],u'BE',u'4780',u'50',False,u'63067'))
loader.save(create_countries_place(29,24,['Recht', 'Recht', 'Recht'],u'BE',u'4780',u'50',False,u'63067'))
loader.save(create_countries_place(30,24,['Baelen', 'Baelen', 'Baelen'],u'BE',u'4837',u'50',False,u''))
loader.save(create_countries_place(31,None,['Hennegau', 'Hainaut', 'Hainaut'],u'BE',u'',u'21',False,u''))
loader.save(create_countries_place(32,None,['Wallonisch-Brabant', 'Brabant wallon', 'Brabant wallon'],u'BE',u'',u'21',False,u''))
loader.save(create_countries_place(33,None,['Fl\xe4misch-Brabant', 'Brabant flamant', 'Brabant flamant'],u'BE',u'',u'21',False,u''))
loader.save(create_countries_place(34,None,['Ostflandern', "Flandre de l'Est", "Flandre de l'Est"],u'BE',u'',u'21',False,u''))
loader.save(create_countries_place(35,34,['Aalst', 'Alost', 'Aalst'],u'BE',u'9300',u'50',False,u'41002'))
loader.save(create_countries_place(36,35,['Gijzegem', '', ''],u'BE',u'9308',u'70',False,u'41002'))
loader.save(create_countries_place(37,35,['Baardegem', '', ''],u'BE',u'9310',u'70',False,u'41002'))
loader.save(create_countries_place(38,35,['Erembodegem', '', ''],u'BE',u'9320',u'70',False,u''))
loader.save(create_countries_place(39,35,['Herdersem', '', ''],u'BE',u'9310',u'70',False,u'41002'))
loader.save(create_countries_place(40,35,['Hofstade', '', ''],u'BE',u'9308',u'70',False,u''))
loader.save(create_countries_place(41,35,['Meldert', '', ''],u'BE',u'9310',u'70',False,u''))
loader.save(create_countries_place(42,35,['Nieuwerkerken', '', ''],u'BE',u'9320',u'70',False,u''))
loader.save(create_countries_place(43,35,['Moorsel', '', ''],u'BE',u'9310',u'70',False,u'41002'))
loader.save(create_countries_place(44,None,['Westflandern', "Flandre de l'Ouest", "Flandre de l'Ouest"],u'BE',u'',u'21',False,u''))
loader.save(create_countries_place(45,None,['Br\xfcssel', 'Bruxelles', 'Brussels'],u'BE',u'1000',u'50',False,u''))
loader.save(create_countries_place(46,None,['Bergen', 'Mons', 'Mons'],u'BE',u'7000',u'50',False,u''))
loader.save(create_countries_place(47,None,['Ostende', 'Ostende', 'Ostende'],u'BE',u'8400',u'50',False,u''))
loader.save(create_countries_place(48,None,['Nam\xfcr', 'Namur', 'Namur'],u'BE',u'5000',u'50',False,u''))
loader.save(create_countries_place(49,None,['Harju', '', ''],u'EE',u'',u'20',False,u''))
loader.save(create_countries_place(50,None,['P\xe4rnu', '', ''],u'EE',u'',u'20',False,u''))
loader.save(create_countries_place(51,None,['Rapla', '', ''],u'EE',u'',u'20',False,u''))
loader.save(create_countries_place(52,51,['Vigala', '', ''],u'EE',u'',u'52',False,u''))
loader.save(create_countries_place(53,51,['Rapla', '', ''],u'EE',u'',u'51',False,u''))
loader.save(create_countries_place(54,49,['Tallinn', '', ''],u'EE',u'',u'51',False,u''))
loader.save(create_countries_place(55,54,['Kesklinn', '', ''],u'EE',u'',u'55',False,u''))
loader.save(create_countries_place(56,54,['P\xf5hja-Tallinn', '', ''],u'EE',u'',u'55',False,u''))
loader.save(create_countries_place(57,50,['P\xe4rnu', '', ''],u'EE',u'',u'51',False,u''))
loader.save(create_countries_place(58,None,['Tartu', '', ''],u'EE',u'',u'51',False,u''))
loader.save(create_countries_place(59,None,['Narva', '', ''],u'EE',u'',u'51',False,u''))
loader.save(create_countries_place(60,49,['\xc4\xe4sm\xe4e', '', ''],u'EE',u'',u'51',False,u''))
loader.save(create_countries_place(61,None,['Aachen', 'Aix-la-Chapelle', 'Aachen'],u'DE',u'',u'50',False,u''))
loader.save(create_countries_place(62,None,['K\xf6ln', 'Cologne', 'Cologne'],u'DE',u'',u'50',False,u''))
loader.save(create_countries_place(63,None,['Berlin', '', ''],u'DE',u'',u'50',False,u''))
loader.save(create_countries_place(64,None,['Hamburg', '', ''],u'DE',u'',u'50',False,u''))
loader.save(create_countries_place(65,None,['M\xfcnchen', 'Munich', 'Munich'],u'DE',u'',u'50',False,u''))
loader.save(create_countries_place(66,None,['Monschau', 'Montjoie', 'Monschau'],u'DE',u'',u'50',False,u''))
loader.save(create_countries_place(67,None,['Maastricht', '', ''],u'NL',u'',u'50',False,u''))
loader.save(create_countries_place(68,None,['Amsterdam', '', ''],u'NL',u'',u'50',False,u''))
loader.save(create_countries_place(69,None,['Den Haag', '', ''],u'NL',u'',u'50',False,u''))
loader.save(create_countries_place(70,None,['Rotterdam', '', ''],u'NL',u'',u'50',False,u''))
loader.save(create_countries_place(71,None,['Utrecht', '', ''],u'NL',u'',u'50',False,u''))
loader.save(create_countries_place(72,None,['Breda', '', ''],u'NL',u'',u'50',False,u''))
loader.save(create_countries_place(73,None,['Paris', 'Paris', 'Paris'],u'FR',u'',u'50',False,u''))
loader.save(create_countries_place(74,None,['Nizza', 'Nice', 'Nice'],u'FR',u'',u'50',False,u''))
loader.save(create_countries_place(75,None,['Metz', '', ''],u'FR',u'',u'50',False,u''))
loader.save(create_countries_place(76,None,['Strasbourg', '', ''],u'FR',u'',u'50',False,u''))
loader.save(create_countries_place(77,None,['Nancy', '', ''],u'FR',u'',u'50',False,u''))
loader.save(create_countries_place(78,None,['Marseille', '', ''],u'FR',u'',u'50',False,u''))
loader.flush_deferred_objects()
| agpl-3.0 |
rvraghav93/scikit-learn | sklearn/neighbors/approximate.py | 3 | 22554 | """Approximate nearest neighbor search"""
# Author: Maheshakya Wijewardena <[email protected]>
# Joel Nothman <[email protected]>
import numpy as np
import warnings
from scipy import sparse
from .base import KNeighborsMixin, RadiusNeighborsMixin
from ..base import BaseEstimator
from ..utils.validation import check_array
from ..utils import check_random_state
from ..metrics.pairwise import pairwise_distances
from ..random_projection import GaussianRandomProjection
__all__ = ["LSHForest"]
HASH_DTYPE = '>u4'
MAX_HASH_SIZE = np.dtype(HASH_DTYPE).itemsize * 8
def _find_matching_indices(tree, bin_X, left_mask, right_mask):
"""Finds indices in sorted array of integers.
Most significant h bits in the binary representations of the
integers are matched with the items' most significant h bits.
"""
left_index = np.searchsorted(tree, bin_X & left_mask)
right_index = np.searchsorted(tree, bin_X | right_mask,
side='right')
return left_index, right_index
def _find_longest_prefix_match(tree, bin_X, hash_size,
left_masks, right_masks):
"""Find the longest prefix match in tree for each query in bin_X
Most significant bits are considered as the prefix.
"""
hi = np.empty_like(bin_X, dtype=np.intp)
hi.fill(hash_size)
lo = np.zeros_like(bin_X, dtype=np.intp)
res = np.empty_like(bin_X, dtype=np.intp)
left_idx, right_idx = _find_matching_indices(tree, bin_X,
left_masks[hi],
right_masks[hi])
found = right_idx > left_idx
res[found] = lo[found] = hash_size
r = np.arange(bin_X.shape[0])
kept = r[lo < hi] # indices remaining in bin_X mask
while kept.shape[0]:
mid = (lo.take(kept) + hi.take(kept)) // 2
left_idx, right_idx = _find_matching_indices(tree,
bin_X.take(kept),
left_masks[mid],
right_masks[mid])
found = right_idx > left_idx
mid_found = mid[found]
lo[kept[found]] = mid_found + 1
res[kept[found]] = mid_found
hi[kept[~found]] = mid[~found]
kept = r[lo < hi]
return res
class ProjectionToHashMixin(object):
"""Turn a transformed real-valued array into a hash"""
@staticmethod
def _to_hash(projected):
if projected.shape[1] % 8 != 0:
raise ValueError('Require reduced dimensionality to be a multiple '
'of 8 for hashing')
# XXX: perhaps non-copying operation better
out = np.packbits((projected > 0).astype(int)).view(dtype=HASH_DTYPE)
return out.reshape(projected.shape[0], -1)
def fit_transform(self, X, y=None):
self.fit(X)
return self.transform(X)
def transform(self, X):
return self._to_hash(super(ProjectionToHashMixin, self).transform(X))
class GaussianRandomProjectionHash(ProjectionToHashMixin,
GaussianRandomProjection):
"""Use GaussianRandomProjection to produce a cosine LSH fingerprint"""
def __init__(self,
n_components=32,
random_state=None):
super(GaussianRandomProjectionHash, self).__init__(
n_components=n_components,
random_state=random_state)
def _array_of_arrays(list_of_arrays):
"""Creates an array of array from list of arrays."""
out = np.empty(len(list_of_arrays), dtype=object)
out[:] = list_of_arrays
return out
class LSHForest(BaseEstimator, KNeighborsMixin, RadiusNeighborsMixin):
"""Performs approximate nearest neighbor search using LSH forest.
LSH Forest: Locality Sensitive Hashing forest [1] is an alternative
method for vanilla approximate nearest neighbor search methods.
LSH forest data structure has been implemented using sorted
arrays and binary search and 32 bit fixed-length hashes.
Random projection is used as the hash family which approximates
cosine distance.
The cosine distance is defined as ``1 - cosine_similarity``: the lowest
value is 0 (identical point) but it is bounded above by 2 for the farthest
points. Its value does not depend on the norm of the vector points but
only on their relative angles.
Read more in the :ref:`User Guide <approximate_nearest_neighbors>`.
Parameters
----------
n_estimators : int (default = 10)
Number of trees in the LSH Forest.
min_hash_match : int (default = 4)
lowest hash length to be searched when candidate selection is
performed for nearest neighbors.
n_candidates : int (default = 10)
Minimum number of candidates evaluated per estimator, assuming enough
items meet the `min_hash_match` constraint.
n_neighbors : int (default = 5)
Number of neighbors to be returned from query function when
it is not provided to the :meth:`kneighbors` method.
radius : float, optinal (default = 1.0)
Radius from the data point to its neighbors. This is the parameter
space to use by default for the :meth`radius_neighbors` queries.
radius_cutoff_ratio : float, optional (default = 0.9)
A value ranges from 0 to 1. Radius neighbors will be searched until
the ratio between total neighbors within the radius and the total
candidates becomes less than this value unless it is terminated by
hash length reaching `min_hash_match`.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
hash_functions_ : list of GaussianRandomProjectionHash objects
Hash function g(p,x) for a tree is an array of 32 randomly generated
float arrays with the same dimension as the data set. This array is
stored in GaussianRandomProjectionHash object and can be obtained
from ``components_`` attribute.
trees_ : array, shape (n_estimators, n_samples)
Each tree (corresponding to a hash function) contains an array of
sorted hashed values. The array representation may change in future
versions.
original_indices_ : array, shape (n_estimators, n_samples)
Original indices of sorted hashed values in the fitted index.
References
----------
.. [1] M. Bawa, T. Condie and P. Ganesan, "LSH Forest: Self-Tuning
Indexes for Similarity Search", WWW '05 Proceedings of the
14th international conference on World Wide Web, 651-660,
2005.
Examples
--------
>>> from sklearn.neighbors import LSHForest
>>> X_train = [[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1], [6, 10, 2]]
>>> X_test = [[9, 1, 6], [3, 1, 10], [7, 10, 3]]
>>> lshf = LSHForest(random_state=42)
>>> lshf.fit(X_train) # doctest: +NORMALIZE_WHITESPACE
LSHForest(min_hash_match=4, n_candidates=50, n_estimators=10,
n_neighbors=5, radius=1.0, radius_cutoff_ratio=0.9,
random_state=42)
>>> distances, indices = lshf.kneighbors(X_test, n_neighbors=2)
>>> distances # doctest: +ELLIPSIS
array([[ 0.069..., 0.149...],
[ 0.229..., 0.481...],
[ 0.004..., 0.014...]])
>>> indices
array([[1, 2],
[2, 0],
[4, 0]])
"""
def __init__(self, n_estimators=10, radius=1.0, n_candidates=50,
n_neighbors=5, min_hash_match=4, radius_cutoff_ratio=.9,
random_state=None):
self.n_estimators = n_estimators
self.radius = radius
self.random_state = random_state
self.n_candidates = n_candidates
self.n_neighbors = n_neighbors
self.min_hash_match = min_hash_match
self.radius_cutoff_ratio = radius_cutoff_ratio
warnings.warn("LSHForest has poor performance and has been deprecated "
"in 0.19. It will be removed in version 0.21.",
DeprecationWarning)
def _compute_distances(self, query, candidates):
"""Computes the cosine distance.
Distance is from the query to points in the candidates array.
Returns argsort of distances in the candidates
array and sorted distances.
"""
if candidates.shape == (0,):
# needed since _fit_X[np.array([])] doesn't work if _fit_X sparse
return np.empty(0, dtype=np.int), np.empty(0, dtype=float)
if sparse.issparse(self._fit_X):
candidate_X = self._fit_X[candidates]
else:
candidate_X = self._fit_X.take(candidates, axis=0, mode='clip')
distances = pairwise_distances(query, candidate_X,
metric='cosine')[0]
distance_positions = np.argsort(distances)
distances = distances.take(distance_positions, mode='clip', axis=0)
return distance_positions, distances
def _generate_masks(self):
"""Creates left and right masks for all hash lengths."""
tri_size = MAX_HASH_SIZE + 1
# Called once on fitting, output is independent of hashes
left_mask = np.tril(np.ones((tri_size, tri_size), dtype=int))[:, 1:]
right_mask = left_mask[::-1, ::-1]
self._left_mask = np.packbits(left_mask).view(dtype=HASH_DTYPE)
self._right_mask = np.packbits(right_mask).view(dtype=HASH_DTYPE)
def _get_candidates(self, query, max_depth, bin_queries, n_neighbors):
"""Performs the Synchronous ascending phase.
Returns an array of candidates, their distance ranks and
distances.
"""
index_size = self._fit_X.shape[0]
# Number of candidates considered including duplicates
# XXX: not sure whether this is being calculated correctly wrt
# duplicates from different iterations through a single tree
n_candidates = 0
candidate_set = set()
min_candidates = self.n_candidates * self.n_estimators
while (max_depth > self.min_hash_match and
(n_candidates < min_candidates or
len(candidate_set) < n_neighbors)):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
n_candidates += stop - start
candidate_set.update(
self.original_indices_[i][start:stop].tolist())
max_depth -= 1
candidates = np.fromiter(candidate_set, count=len(candidate_set),
dtype=np.intp)
# For insufficient candidates, candidates are filled.
# Candidates are filled from unselected indices uniformly.
if candidates.shape[0] < n_neighbors:
warnings.warn(
"Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (n_neighbors, self.min_hash_match))
remaining = np.setdiff1d(np.arange(0, index_size), candidates)
to_fill = n_neighbors - candidates.shape[0]
candidates = np.concatenate((candidates, remaining[:to_fill]))
ranks, distances = self._compute_distances(query,
candidates.astype(int))
return (candidates[ranks[:n_neighbors]],
distances[:n_neighbors])
def _get_radius_neighbors(self, query, max_depth, bin_queries, radius):
"""Finds radius neighbors from the candidates obtained.
Their distances from query are smaller than radius.
Returns radius neighbors and distances.
"""
ratio_within_radius = 1
threshold = 1 - self.radius_cutoff_ratio
total_candidates = np.array([], dtype=int)
total_neighbors = np.array([], dtype=int)
total_distances = np.array([], dtype=float)
while (max_depth > self.min_hash_match and
ratio_within_radius > threshold):
left_mask = self._left_mask[max_depth]
right_mask = self._right_mask[max_depth]
candidates = []
for i in range(self.n_estimators):
start, stop = _find_matching_indices(self.trees_[i],
bin_queries[i],
left_mask, right_mask)
candidates.extend(
self.original_indices_[i][start:stop].tolist())
candidates = np.setdiff1d(candidates, total_candidates)
total_candidates = np.append(total_candidates, candidates)
ranks, distances = self._compute_distances(query, candidates)
m = np.searchsorted(distances, radius, side='right')
positions = np.searchsorted(total_distances, distances[:m])
total_neighbors = np.insert(total_neighbors, positions,
candidates[ranks[:m]])
total_distances = np.insert(total_distances, positions,
distances[:m])
ratio_within_radius = (total_neighbors.shape[0] /
float(total_candidates.shape[0]))
max_depth = max_depth - 1
return total_neighbors, total_distances
def fit(self, X, y=None):
"""Fit the LSH forest on the data.
This creates binary hashes of input data points by getting the
dot product of input points and hash_function then
transforming the projection into a binary string array based
on the sign (positive/negative) of the projection.
A sorted array of binary hashes is created.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self : object
Returns self.
"""
self._fit_X = check_array(X, accept_sparse='csr')
# Creates a g(p,x) for each tree
self.hash_functions_ = []
self.trees_ = []
self.original_indices_ = []
rng = check_random_state(self.random_state)
int_max = np.iinfo(np.int32).max
for i in range(self.n_estimators):
# This is g(p,x) for a particular tree.
# Builds a single tree. Hashing is done on an array of data points.
# `GaussianRandomProjection` is used for hashing.
# `n_components=hash size and n_features=n_dim.
hasher = GaussianRandomProjectionHash(MAX_HASH_SIZE,
rng.randint(0, int_max))
hashes = hasher.fit_transform(self._fit_X)[:, 0]
original_index = np.argsort(hashes)
bin_hashes = hashes[original_index]
self.original_indices_.append(original_index)
self.trees_.append(bin_hashes)
self.hash_functions_.append(hasher)
self._generate_masks()
return self
def _query(self, X):
"""Performs descending phase to find maximum depth."""
# Calculate hashes of shape (n_samples, n_estimators, [hash_size])
bin_queries = np.asarray([hasher.transform(X)[:, 0]
for hasher in self.hash_functions_])
bin_queries = np.rollaxis(bin_queries, 1)
# descend phase
depths = [_find_longest_prefix_match(tree, tree_queries, MAX_HASH_SIZE,
self._left_mask, self._right_mask)
for tree, tree_queries in zip(self.trees_,
np.rollaxis(bin_queries, 1))]
return bin_queries, np.max(depths, axis=0)
def kneighbors(self, X, n_neighbors=None, return_distance=True):
"""Returns n_neighbors of approximate nearest neighbors.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
n_neighbors : int, optional (default = None)
Number of neighbors required. If not provided, this will
return the number specified at the initialization.
return_distance : boolean, optional (default = True)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples, n_neighbors)
Array representing the cosine distances to each point,
only present if return_distance=True.
ind : array, shape (n_samples, n_neighbors)
Indices of the approximate nearest points in the population
matrix.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_candidates(X[[i]], max_depth[i],
bin_queries[i],
n_neighbors)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return np.array(distances), np.array(neighbors)
else:
return np.array(neighbors)
def radius_neighbors(self, X, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of some points from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
LSH Forest being an approximate method, some true neighbors from the
indexed dataset might be missing from the results.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single query.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional (default = False)
Returns the distances of neighbors if set to True.
Returns
-------
dist : array, shape (n_samples,) of arrays
Each element is an array representing the cosine distances
to some points found within ``radius`` of the respective query.
Only present if ``return_distance=True``.
ind : array, shape (n_samples,) of arrays
Each element is an array of indices for neighbors within ``radius``
of the respective query.
"""
if not hasattr(self, 'hash_functions_'):
raise ValueError("estimator should be fitted.")
if radius is None:
radius = self.radius
X = check_array(X, accept_sparse='csr')
neighbors, distances = [], []
bin_queries, max_depth = self._query(X)
for i in range(X.shape[0]):
neighs, dists = self._get_radius_neighbors(X[[i]], max_depth[i],
bin_queries[i], radius)
neighbors.append(neighs)
distances.append(dists)
if return_distance:
return _array_of_arrays(distances), _array_of_arrays(neighbors)
else:
return _array_of_arrays(neighbors)
def partial_fit(self, X, y=None):
"""
Inserts new data into the already fitted LSH Forest.
Cost is proportional to new total size, so additions
should be batched.
Parameters
----------
X : array_like or sparse (CSR) matrix, shape (n_samples, n_features)
New data point to be inserted into the LSH Forest.
"""
X = check_array(X, accept_sparse='csr')
if not hasattr(self, 'hash_functions_'):
return self.fit(X)
if X.shape[1] != self._fit_X.shape[1]:
raise ValueError("Number of features in X and"
" fitted array does not match.")
n_samples = X.shape[0]
n_indexed = self._fit_X.shape[0]
for i in range(self.n_estimators):
bin_X = self.hash_functions_[i].transform(X)[:, 0]
# gets the position to be added in the tree.
positions = self.trees_[i].searchsorted(bin_X)
# adds the hashed value into the tree.
self.trees_[i] = np.insert(self.trees_[i],
positions, bin_X)
# add the entry into the original_indices_.
self.original_indices_[i] = np.insert(self.original_indices_[i],
positions,
np.arange(n_indexed,
n_indexed +
n_samples))
# adds the entry into the input_array.
if sparse.issparse(X) or sparse.issparse(self._fit_X):
self._fit_X = sparse.vstack((self._fit_X, X))
else:
self._fit_X = np.row_stack((self._fit_X, X))
return self
| bsd-3-clause |
stdweird/aquilon | lib/python2.6/aquilon/worker/commands/show_organization_all.py | 2 | 1252 | # -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains the logic for `aq show organization`."""
from aquilon.worker.broker import BrokerCommand # pylint: disable=W0611
from aquilon.worker.commands.show_location_type import CommandShowLocationType
class CommandShowOrganizationAll(CommandShowLocationType):
required_parameters = []
def render(self, session, **arguments):
return CommandShowLocationType.render(self, session=session,
type='company', name=None,
**arguments)
| apache-2.0 |
wangxianliang/facenet | tmp/mnist_noise_labels.py | 1 | 15390 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple, end-to-end, LeNet-5-like convolutional MNIST model example.
This should achieve a test error of 0.7%. Please keep this model as simple and
linear as possible, it is meant as a tutorial for simple convolutional models.
Run with --self_test on the command line to execute a short self-test.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import sys
import time
from six.moves import urllib # @UnresolvedImport
import tensorflow as tf
import numpy as np
SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
WORK_DIRECTORY = 'data'
IMAGE_SIZE = 28
NUM_CHANNELS = 1
PIXEL_DEPTH = 255
NUM_LABELS = 10
VALIDATION_SIZE = 5000 # Size of the validation set.
SEED = 66478 # Set to None for random seed.
BATCH_SIZE = 64
NUM_EPOCHS = 10
EVAL_BATCH_SIZE = 64
EVAL_FREQUENCY = 100 # Number of steps between evaluations.
NOISE_FACTOR = 0.2
BETA = 0.8
tf.app.flags.DEFINE_boolean("self_test", False, "True if running a self test.")
tf.app.flags.DEFINE_boolean('use_fp16', False,
"Use half floats instead of full floats if True.")
FLAGS = tf.app.flags.FLAGS
def data_type():
"""Return the type of the activations, weights, and placeholder variables."""
if FLAGS.use_fp16:
return tf.float16
else:
return tf.float32
def maybe_download(filename):
"""Download the data from Yann's website, unless it's already here."""
if not tf.gfile.Exists(WORK_DIRECTORY):
tf.gfile.MakeDirs(WORK_DIRECTORY)
filepath = os.path.join(WORK_DIRECTORY, filename)
if not tf.gfile.Exists(filepath):
filepath, _ = urllib.request.urlretrieve(SOURCE_URL + filename, filepath)
with tf.gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
def extract_data(filename, num_images):
"""Extract the images into a 4D tensor [image index, y, x, channels].
Values are rescaled from [0, 255] down to [-0.5, 0.5].
"""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(IMAGE_SIZE * IMAGE_SIZE * num_images * NUM_CHANNELS)
data = np.frombuffer(buf, dtype=np.uint8).astype(np.float32)
data = (data - (PIXEL_DEPTH / 2.0)) / PIXEL_DEPTH
data = data.reshape(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS)
return data
def extract_labels(filename, num_images):
"""Extract the labels into a vector of int64 label IDs."""
print('Extracting', filename)
with gzip.open(filename) as bytestream:
bytestream.read(8)
buf = bytestream.read(1 * num_images)
labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64)
return labels
def fake_data(num_images):
"""Generate a fake dataset that matches the dimensions of MNIST."""
data = np.ndarray(
shape=(num_images, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS),
dtype=np.float32)
labels = np.zeros(shape=(num_images,), dtype=np.int64)
for image in range(num_images):
label = image % 2
data[image, :, :, 0] = label - 0.5
labels[image] = label
return data, labels
def error_rate(predictions, labels):
"""Return the error rate based on dense predictions and sparse labels."""
return 100.0 - (
100.0 *
np.sum(np.argmax(predictions, 1) == labels) /
predictions.shape[0])
def main(argv=None): # pylint: disable=unused-argument
if FLAGS.self_test:
print('Running self-test.')
train_data, train_labels = fake_data(256)
validation_data, validation_labels = fake_data(EVAL_BATCH_SIZE)
test_data, test_labels = fake_data(EVAL_BATCH_SIZE)
num_epochs = 1
else:
# Get the data.
train_data_filename = maybe_download('train-images-idx3-ubyte.gz')
train_labels_filename = maybe_download('train-labels-idx1-ubyte.gz')
test_data_filename = maybe_download('t10k-images-idx3-ubyte.gz')
test_labels_filename = maybe_download('t10k-labels-idx1-ubyte.gz')
# Extract it into numpy arrays.
train_data = extract_data(train_data_filename, 60000)
train_labels = extract_labels(train_labels_filename, 60000)
test_data = extract_data(test_data_filename, 10000)
test_labels = extract_labels(test_labels_filename, 10000)
# Generate a validation set.
validation_data = train_data[:VALIDATION_SIZE, ...]
validation_labels = train_labels[:VALIDATION_SIZE]
train_data = train_data[VALIDATION_SIZE:, ...]
train_labels = train_labels[VALIDATION_SIZE:]
nrof_training_examples = train_labels.shape[0]
nrof_changed_labels = int(nrof_training_examples*NOISE_FACTOR)
shuf = np.arange(0,nrof_training_examples)
np.random.shuffle(shuf)
change_idx = shuf[0:nrof_changed_labels]
train_labels[change_idx] = (train_labels[change_idx] + np.random.randint(1,9,size=(nrof_changed_labels,))) % NUM_LABELS
num_epochs = NUM_EPOCHS
train_size = train_labels.shape[0]
# This is where training samples and labels are fed to the graph.
# These placeholder nodes will be fed a batch of training data at each
# training step using the {feed_dict} argument to the Run() call below.
train_data_node = tf.placeholder(
data_type(),
shape=(BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
train_labels_node = tf.placeholder(tf.int64, shape=(BATCH_SIZE,))
eval_data = tf.placeholder(
data_type(),
shape=(EVAL_BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, NUM_CHANNELS))
# The variables below hold all the trainable weights. They are passed an
# initial value which will be assigned when we call:
# {tf.global_variables_initializer().run()}
conv1_weights = tf.Variable(
tf.truncated_normal([5, 5, NUM_CHANNELS, 32], # 5x5 filter, depth 32.
stddev=0.1,
seed=SEED, dtype=data_type()))
conv1_biases = tf.Variable(tf.zeros([32], dtype=data_type()))
conv2_weights = tf.Variable(tf.truncated_normal(
[5, 5, 32, 64], stddev=0.1,
seed=SEED, dtype=data_type()))
conv2_biases = tf.Variable(tf.constant(0.1, shape=[64], dtype=data_type()))
fc1_weights = tf.Variable( # fully connected, depth 512.
tf.truncated_normal([IMAGE_SIZE // 4 * IMAGE_SIZE // 4 * 64, 512],
stddev=0.1,
seed=SEED,
dtype=data_type()))
fc1_biases = tf.Variable(tf.constant(0.1, shape=[512], dtype=data_type()))
fc2_weights = tf.Variable(tf.truncated_normal([512, NUM_LABELS],
stddev=0.1,
seed=SEED,
dtype=data_type()))
fc2_biases = tf.Variable(tf.constant(
0.1, shape=[NUM_LABELS], dtype=data_type()))
# We will replicate the model structure for the training subgraph, as well
# as the evaluation subgraphs, while sharing the trainable parameters.
def model(data, train=False):
"""The Model definition."""
# 2D convolution, with 'SAME' padding (i.e. the output feature map has
# the same size as the input). Note that {strides} is a 4D array whose
# shape matches the data layout: [image index, y, x, depth].
conv = tf.nn.conv2d(data,
conv1_weights,
strides=[1, 1, 1, 1],
padding='SAME')
# Bias and rectified linear non-linearity.
relu = tf.nn.relu(tf.nn.bias_add(conv, conv1_biases))
# Max pooling. The kernel size spec {ksize} also follows the layout of
# the data. Here we have a pooling window of 2, and a stride of 2.
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
conv = tf.nn.conv2d(pool,
conv2_weights,
strides=[1, 1, 1, 1],
padding='SAME')
relu = tf.nn.relu(tf.nn.bias_add(conv, conv2_biases))
pool = tf.nn.max_pool(relu,
ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1],
padding='SAME')
# Reshape the feature map cuboid into a 2D matrix to feed it to the
# fully connected layers.
pool_shape = pool.get_shape().as_list() #pylint: disable=no-member
reshape = tf.reshape(
pool,
[pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3]])
# Fully connected layer. Note that the '+' operation automatically
# broadcasts the biases.
hidden = tf.nn.relu(tf.matmul(reshape, fc1_weights) + fc1_biases)
# Add a 50% dropout during training only. Dropout also scales
# activations such that no rescaling is needed at evaluation time.
if train:
hidden = tf.nn.dropout(hidden, 0.5, seed=SEED)
return tf.matmul(hidden, fc2_weights) + fc2_biases
# Training computation: logits + cross-entropy loss.
logits = model(train_data_node, True)
# t: observed noisy labels
# q: estimated class probabilities (output from softmax)
# z: argmax of q
t = tf.one_hot(train_labels_node, NUM_LABELS)
q = tf.nn.softmax(logits)
qqq = tf.arg_max(q, dimension=1)
z = tf.one_hot(qqq, NUM_LABELS)
#cross_entropy = -tf.reduce_sum(t*tf.log(q),reduction_indices=1)
cross_entropy = -tf.reduce_sum((BETA*t+(1-BETA)*z)*tf.log(q),axis=1)
loss = tf.reduce_mean(cross_entropy)
# loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
# logits, train_labels_node))
# L2 regularization for the fully connected parameters.
regularizers = (tf.nn.l2_loss(fc1_weights) + tf.nn.l2_loss(fc1_biases) +
tf.nn.l2_loss(fc2_weights) + tf.nn.l2_loss(fc2_biases))
# Add the regularization term to the loss.
loss += 5e-4 * regularizers
# Optimizer: set up a variable that's incremented once per batch and
# controls the learning rate decay.
batch = tf.Variable(0, dtype=data_type())
# Decay once per epoch, using an exponential schedule starting at 0.01.
learning_rate = tf.train.exponential_decay(
0.01, # Base learning rate.
batch * BATCH_SIZE, # Current index into the dataset.
train_size, # Decay step.
0.95, # Decay rate.
staircase=True)
# Use simple momentum for the optimization.
optimizer = tf.train.MomentumOptimizer(learning_rate,
0.9).minimize(loss,
global_step=batch)
# Predictions for the current training minibatch.
train_prediction = tf.nn.softmax(logits)
# Predictions for the test and validation, which we'll compute less often.
eval_prediction = tf.nn.softmax(model(eval_data))
# Small utility function to evaluate a dataset by feeding batches of data to
# {eval_data} and pulling the results from {eval_predictions}.
# Saves memory and enables this to run on smaller GPUs.
def eval_in_batches(data, sess):
"""Get all predictions for a dataset by running it in small batches."""
size = data.shape[0]
if size < EVAL_BATCH_SIZE:
raise ValueError("batch size for evals larger than dataset: %d" % size)
predictions = np.ndarray(shape=(size, NUM_LABELS), dtype=np.float32)
for begin in xrange(0, size, EVAL_BATCH_SIZE):
end = begin + EVAL_BATCH_SIZE
if end <= size:
predictions[begin:end, :] = sess.run(
eval_prediction,
feed_dict={eval_data: data[begin:end, ...]})
else:
batch_predictions = sess.run(
eval_prediction,
feed_dict={eval_data: data[-EVAL_BATCH_SIZE:, ...]})
predictions[begin:, :] = batch_predictions[begin - size:, :]
return predictions
# Create a local session to run the training.
start_time = time.time()
with tf.Session() as sess:
# Run all the initializers to prepare the trainable parameters.
tf.global_variables_initializer().run() #pylint: disable=no-member
print('Initialized!')
# Loop through training steps.
for step in xrange(int(num_epochs * train_size) // BATCH_SIZE):
# Compute the offset of the current minibatch in the data.
# Note that we could use better randomization across epochs.
offset = (step * BATCH_SIZE) % (train_size - BATCH_SIZE)
batch_data = train_data[offset:(offset + BATCH_SIZE), ...]
batch_labels = train_labels[offset:(offset + BATCH_SIZE)]
# This dictionary maps the batch data (as a numpy array) to the
# node in the graph it should be fed to.
feed_dict = {train_data_node: batch_data,
train_labels_node: batch_labels}
# Run the graph and fetch some of the nodes.
_, l, lr, predictions = sess.run(
[optimizer, loss, learning_rate, train_prediction],
feed_dict=feed_dict)
if step % EVAL_FREQUENCY == 0:
elapsed_time = time.time() - start_time
start_time = time.time()
print('Step %d (epoch %.2f), %.1f ms' %
(step, float(step) * BATCH_SIZE / train_size,
1000 * elapsed_time / EVAL_FREQUENCY))
print('Minibatch loss: %.3f, learning rate: %.6f' % (l, lr))
print('Minibatch error: %.1f%%' % error_rate(predictions, batch_labels))
print('Validation error: %.1f%%' % error_rate(
eval_in_batches(validation_data, sess), validation_labels))
sys.stdout.flush()
# Finally print the result!
test_error = error_rate(eval_in_batches(test_data, sess), test_labels)
print('Test error: %.1f%%' % test_error)
if FLAGS.self_test:
print('test_error', test_error)
assert test_error == 0.0, 'expected 0.0 test_error, got %.2f' % (
test_error,)
if __name__ == '__main__':
tf.app.run()
| mit |
Weihonghao/ECM | Vpy34/lib/python3.5/site-packages/tensorflow/python/training/adagrad_da.py | 34 | 5998 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Adagrad Dual Averaging for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.training import optimizer
from tensorflow.python.training import training_ops
class AdagradDAOptimizer(optimizer.Optimizer):
"""Adagrad Dual Averaging algorithm for sparse linear models.
See this [paper](http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf).
This optimizer takes care of regularization of unseen features in a mini batch
by updating them when they are seen with a closed form update rule that is
equivalent to having updated them on every mini-batch.
AdagradDA is typically used when there is a need for large sparsity in the
trained model. This optimizer only guarantees sparsity for linear models. Be
careful when using AdagradDA for deep networks as it will require careful
initialization of the gradient accumulators for it to train.
@@__init__
"""
def __init__(self,
learning_rate,
global_step,
initial_gradient_squared_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0,
use_locking=False,
name="AdagradDA"):
"""Construct a new AdagradDA optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
global_step: A `Tensor` containing the current training step number.
initial_gradient_squared_accumulator_value: A floating point value.
Starting value for the accumulators, must be positive.
l1_regularization_strength: A float value, must be greater than or
equal to zero.
l2_regularization_strength: A float value, must be greater than or
equal to zero.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "AdagradDA".
Raises:
ValueError: If the `initial_gradient_squared_accumulator_value` is
invalid.
"""
if initial_gradient_squared_accumulator_value <= 0.0:
raise ValueError("initial_gradient_squared_accumulator_value must be"
"positive: %s" %
initial_gradient_squared_accumulator_value)
super(AdagradDAOptimizer, self).__init__(use_locking, name)
self._learning_rate = learning_rate
self._initial_gradient_squared_accumulator_value = (
initial_gradient_squared_accumulator_value)
# Created in Initialize.
self._learning_rate_tensor = None
self._l1_regularization_strength = l1_regularization_strength
self._l2_regularization_strength = l2_regularization_strength
self._global_step = global_step
def _create_slots(self, var_list):
for v in var_list:
with ops.colocate_with(v):
g_val = constant_op.constant(
0.0, shape=v.get_shape(), dtype=v.dtype.base_dtype)
gg_val = constant_op.constant(
self._initial_gradient_squared_accumulator_value,
shape=v.get_shape(),
dtype=v.dtype.base_dtype)
self._get_or_make_slot(v, g_val, "gradient_accumulator", self._name)
self._get_or_make_slot(v, gg_val, "gradient_squared_accumulator",
self._name)
def _prepare(self):
self._learning_rate_tensor = ops.convert_to_tensor(
self._learning_rate, name="learning_rate")
def _apply_dense(self, grad, var):
g_acc = self.get_slot(var, "gradient_accumulator")
gg_acc = self.get_slot(var, "gradient_squared_accumulator")
# Performance optimization so that worker creates a copy of the global step
# to avoid overloading the parameter server holding the global step.
with ops.device(grad[0].device):
global_step = array_ops.identity(self._global_step) + 1
return training_ops.apply_adagrad_da(
var,
g_acc,
gg_acc,
grad,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength, var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength, var.dtype.base_dtype),
global_step,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
g_acc = self.get_slot(var, "gradient_accumulator")
gg_acc = self.get_slot(var, "gradient_squared_accumulator")
# Performance optimization so that worker creates a copy of the global step
# to avoid overloading the parameter server holding the global step.
with ops.device(grad[0].device):
global_step = array_ops.identity(self._global_step) + 1
return training_ops.sparse_apply_adagrad_da(
var,
g_acc,
gg_acc,
grad.values,
grad.indices,
math_ops.cast(self._learning_rate_tensor, var.dtype.base_dtype),
math_ops.cast(self._l1_regularization_strength, var.dtype.base_dtype),
math_ops.cast(self._l2_regularization_strength, var.dtype.base_dtype),
global_step,
use_locking=self._use_locking)
| agpl-3.0 |
bregman-arie/ansible | lib/ansible/modules/cloud/azure/azure_rm_availabilityset_facts.py | 15 | 4569 | #!/usr/bin/python
#
# Copyright (c) 2016 Julien Stroheker, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_availabilityset_facts
version_added: "2.4"
short_description: Get availability set facts.
description:
- Get facts for a specific availability set or all availability sets.
options:
name:
description:
- Limit results to a specific availability set
resource_group:
description:
- The resource group to search for the desired availability set
extends_documentation_fragment:
- azure
author:
- "Julien Stroheker (@julienstroheker)"
'''
EXAMPLES = '''
- name: Get facts for one availability set
azure_rm_availabilityset_facts:
name: Testing
resource_group: TestRG
- name: Get facts for all availability sets in a specific resource group
azure_rm_availabilityset_facts:
resource_group: TestRG
'''
RETURN = '''
azure_availabilityset:
description: List of availability sets dicts.
returned: always
type: list
example: [{
"location": "eastus2",
"name": "myavailabilityset",
"properties": {
"platformFaultDomainCount": 3,
"platformUpdateDomainCount": 2,
"virtualMachines": []
},
"sku": "Aligned",
"type": "Microsoft.Compute/availabilitySets"
}]
'''
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
try:
from msrestazure.azure_exceptions import CloudError
except:
# handled in azure_rm_common
pass
AZURE_OBJECT_CLASS = 'AvailabilitySet'
class AzureRMAvailabilitySetFacts(AzureRMModuleBase):
"""Utility class to get availability set facts"""
def __init__(self):
self.module_args = dict(
name=dict(type='str'),
resource_group=dict(type='str'),
tags=dict(type='list')
)
self.results = dict(
changed=False,
ansible_facts=dict(
azure_availabilitysets=[]
)
)
self.name = None
self.resource_group = None
self.tags = None
super(AzureRMAvailabilitySetFacts, self).__init__(
derived_arg_spec=self.module_args,
supports_tags=False,
facts_module=True
)
def exec_module(self, **kwargs):
for key in self.module_args:
setattr(self, key, kwargs[key])
if self.name and not self.resource_group:
self.fail("Parameter error: resource group required when filtering by name.")
if self.name:
self.results['ansible_facts']['azure_availabilitysets'] = self.get_item()
else:
self.results['ansible_facts']['azure_availabilitysets'] = self.list_items()
return self.results
def get_item(self):
"""Get a single availability set"""
self.log('Get properties for {}'.format(self.name))
item = None
result = []
try:
item = self.compute_client.availability_sets.get(self.resource_group, self.name)
except CloudError:
pass
if item and self.has_tags(item.tags, self.tags):
avase = self.serialize_obj(item, AZURE_OBJECT_CLASS)
avase['name'] = item.name
avase['type'] = item.type
avase['sku'] = item.sku.name
result = [avase]
return result
def list_items(self):
"""Get all availability sets"""
self.log('List all availability sets')
try:
response = self.compute_client.availability_sets.list(self.resource_group)
except CloudError as exc:
self.fail('Failed to list all items - {}'.format(str(exc)))
results = []
for item in response:
if self.has_tags(item.tags, self.tags):
avase = self.serialize_obj(item, AZURE_OBJECT_CLASS)
avase['name'] = item.name
avase['type'] = item.type
avase['sku'] = item.sku.name
results.append(avase)
return results
def main():
"""Main module execution code path"""
AzureRMAvailabilitySetFacts()
if __name__ == '__main__':
main()
| gpl-3.0 |
jagguli/intellij-community | python/lib/Lib/distutils/file_util.py | 81 | 8341 | """distutils.file_util
Utility functions for operating on single files.
"""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: file_util.py 37828 2004-11-10 22:23:15Z loewis $"
import os
from distutils.errors import DistutilsFileError
from distutils import log
# for generating verbose output in 'copy_file()'
_copy_action = { None: 'copying',
'hard': 'hard linking',
'sym': 'symbolically linking' }
def _copy_file_contents (src, dst, buffer_size=16*1024):
"""Copy the file 'src' to 'dst'; both must be filenames. Any error
opening either file, reading from 'src', or writing to 'dst', raises
DistutilsFileError. Data is read/written in chunks of 'buffer_size'
bytes (default 16k). No attempt is made to handle anything apart from
regular files.
"""
# Stolen from shutil module in the standard library, but with
# custom error-handling added.
fsrc = None
fdst = None
try:
try:
fsrc = open(src, 'rb')
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not open '%s': %s" % (src, errstr)
if os.path.exists(dst):
try:
os.unlink(dst)
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not delete '%s': %s" % (dst, errstr)
try:
fdst = open(dst, 'wb')
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not create '%s': %s" % (dst, errstr)
while 1:
try:
buf = fsrc.read(buffer_size)
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not read from '%s': %s" % (src, errstr)
if not buf:
break
try:
fdst.write(buf)
except os.error, (errno, errstr):
raise DistutilsFileError, \
"could not write to '%s': %s" % (dst, errstr)
finally:
if fdst:
fdst.close()
if fsrc:
fsrc.close()
# _copy_file_contents()
def copy_file (src, dst,
preserve_mode=1,
preserve_times=1,
update=0,
link=None,
verbose=0,
dry_run=0):
"""Copy a file 'src' to 'dst'. If 'dst' is a directory, then 'src' is
copied there with the same name; otherwise, it must be a filename. (If
the file exists, it will be ruthlessly clobbered.) If 'preserve_mode'
is true (the default), the file's mode (type and permission bits, or
whatever is analogous on the current platform) is copied. If
'preserve_times' is true (the default), the last-modified and
last-access times are copied as well. If 'update' is true, 'src' will
only be copied if 'dst' does not exist, or if 'dst' does exist but is
older than 'src'.
'link' allows you to make hard links (os.link) or symbolic links
(os.symlink) instead of copying: set it to "hard" or "sym"; if it is
None (the default), files are copied. Don't set 'link' on systems that
don't support it: 'copy_file()' doesn't check if hard or symbolic
linking is available.
Under Mac OS, uses the native file copy function in macostools; on
other systems, uses '_copy_file_contents()' to copy file contents.
Return a tuple (dest_name, copied): 'dest_name' is the actual name of
the output file, and 'copied' is true if the file was copied (or would
have been copied, if 'dry_run' true).
"""
# XXX if the destination file already exists, we clobber it if
# copying, but blow up if linking. Hmmm. And I don't know what
# macostools.copyfile() does. Should definitely be consistent, and
# should probably blow up if destination exists and we would be
# changing it (ie. it's not already a hard/soft link to src OR
# (not update) and (src newer than dst).
from distutils.dep_util import newer
from stat import ST_ATIME, ST_MTIME, ST_MODE, S_IMODE
if not os.path.isfile(src):
raise DistutilsFileError, \
"can't copy '%s': doesn't exist or not a regular file" % src
if os.path.isdir(dst):
dir = dst
dst = os.path.join(dst, os.path.basename(src))
else:
dir = os.path.dirname(dst)
if update and not newer(src, dst):
log.debug("not copying %s (output up-to-date)", src)
return dst, 0
try:
action = _copy_action[link]
except KeyError:
raise ValueError, \
"invalid value '%s' for 'link' argument" % link
if os.path.basename(dst) == os.path.basename(src):
log.info("%s %s -> %s", action, src, dir)
else:
log.info("%s %s -> %s", action, src, dst)
if dry_run:
return (dst, 1)
# On Mac OS, use the native file copy routine
if os.name == 'mac':
import macostools
try:
macostools.copy(src, dst, 0, preserve_times)
except os.error, exc:
raise DistutilsFileError, \
"could not copy '%s' to '%s': %s" % (src, dst, exc[-1])
# If linking (hard or symbolic), use the appropriate system call
# (Unix only, of course, but that's the caller's responsibility)
elif link == 'hard':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.link(src, dst)
elif link == 'sym':
if not (os.path.exists(dst) and os.path.samefile(src, dst)):
os.symlink(src, dst)
# Otherwise (non-Mac, not linking), copy the file contents and
# (optionally) copy the times and mode.
else:
_copy_file_contents(src, dst)
if preserve_mode or preserve_times:
st = os.stat(src)
# According to David Ascher <[email protected]>, utime() should be done
# before chmod() (at least under NT).
if preserve_times:
os.utime(dst, (st[ST_ATIME], st[ST_MTIME]))
if preserve_mode and hasattr(os, 'chmod'):
os.chmod(dst, S_IMODE(st[ST_MODE]))
return (dst, 1)
# copy_file ()
# XXX I suspect this is Unix-specific -- need porting help!
def move_file (src, dst,
verbose=0,
dry_run=0):
"""Move a file 'src' to 'dst'. If 'dst' is a directory, the file will
be moved into it with the same name; otherwise, 'src' is just renamed
to 'dst'. Return the new full name of the file.
Handles cross-device moves on Unix using 'copy_file()'. What about
other systems???
"""
from os.path import exists, isfile, isdir, basename, dirname
import errno
log.info("moving %s -> %s", src, dst)
if dry_run:
return dst
if not isfile(src):
raise DistutilsFileError, \
"can't move '%s': not a regular file" % src
if isdir(dst):
dst = os.path.join(dst, basename(src))
elif exists(dst):
raise DistutilsFileError, \
"can't move '%s': destination '%s' already exists" % \
(src, dst)
if not isdir(dirname(dst)):
raise DistutilsFileError, \
"can't move '%s': destination '%s' not a valid path" % \
(src, dst)
copy_it = 0
try:
os.rename(src, dst)
except os.error, (num, msg):
if num == errno.EXDEV:
copy_it = 1
else:
raise DistutilsFileError, \
"couldn't move '%s' to '%s': %s" % (src, dst, msg)
if copy_it:
copy_file(src, dst)
try:
os.unlink(src)
except os.error, (num, msg):
try:
os.unlink(dst)
except os.error:
pass
raise DistutilsFileError, \
("couldn't move '%s' to '%s' by copy/delete: " +
"delete '%s' failed: %s") % \
(src, dst, src, msg)
return dst
# move_file ()
def write_file (filename, contents):
"""Create a file with the specified name and write 'contents' (a
sequence of strings without line terminators) to it.
"""
f = open(filename, "w")
for line in contents:
f.write(line + "\n")
f.close()
| apache-2.0 |
dya2/python-for-android | python3-alpha/extra_modules/bs4/testing.py | 46 | 18683 | """Helper classes for tests."""
import copy
import functools
import unittest
from unittest import TestCase
from bs4 import BeautifulSoup
from bs4.element import (
Comment,
Doctype,
SoupStrainer,
)
from bs4.builder import HTMLParserTreeBuilder
default_builder = HTMLParserTreeBuilder
class SoupTest(unittest.TestCase):
@property
def default_builder(self):
return default_builder()
def soup(self, markup, **kwargs):
"""Build a Beautiful Soup object from markup."""
builder = kwargs.pop('builder', self.default_builder)
return BeautifulSoup(markup, builder=builder, **kwargs)
def document_for(self, markup):
"""Turn an HTML fragment into a document.
The details depend on the builder.
"""
return self.default_builder.test_fragment_to_document(markup)
def assertSoupEquals(self, to_parse, compare_parsed_to=None):
builder = self.default_builder
obj = BeautifulSoup(to_parse, builder=builder)
if compare_parsed_to is None:
compare_parsed_to = to_parse
self.assertEqual(obj.decode(), self.document_for(compare_parsed_to))
class HTMLTreeBuilderSmokeTest(object):
"""A basic test of a treebuilder's competence.
Any HTML treebuilder, present or future, should be able to pass
these tests. With invalid markup, there's room for interpretation,
and different parsers can handle it differently. But with the
markup in these tests, there's not much room for interpretation.
"""
def assertDoctypeHandled(self, doctype_fragment):
"""Assert that a given doctype string is handled correctly."""
doctype_str, soup = self._document_with_doctype(doctype_fragment)
# Make sure a Doctype object was created.
doctype = soup.contents[0]
self.assertEqual(doctype.__class__, Doctype)
self.assertEqual(doctype, doctype_fragment)
self.assertEqual(str(soup)[:len(doctype_str)], doctype_str)
# Make sure that the doctype was correctly associated with the
# parse tree and that the rest of the document parsed.
self.assertEqual(soup.p.contents[0], 'foo')
def _document_with_doctype(self, doctype_fragment):
"""Generate and parse a document with the given doctype."""
doctype = '<!DOCTYPE %s>' % doctype_fragment
markup = doctype + '\n<p>foo</p>'
soup = self.soup(markup)
return doctype, soup
def test_normal_doctypes(self):
"""Make sure normal, everyday HTML doctypes are handled correctly."""
self.assertDoctypeHandled("html")
self.assertDoctypeHandled(
'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"')
def test_public_doctype_with_url(self):
doctype = 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"'
self.assertDoctypeHandled(doctype)
def test_system_doctype(self):
self.assertDoctypeHandled('foo SYSTEM "http://www.example.com/"')
def test_namespaced_system_doctype(self):
# We can handle a namespaced doctype with a system ID.
self.assertDoctypeHandled('xsl:stylesheet SYSTEM "htmlent.dtd"')
def test_namespaced_public_doctype(self):
# Test a namespaced doctype with a public id.
self.assertDoctypeHandled('xsl:stylesheet PUBLIC "htmlent.dtd"')
def test_deepcopy(self):
"""Make sure you can copy the tree builder.
This is important because the builder is part of a
BeautifulSoup object, and we want to be able to copy that.
"""
copy.deepcopy(self.default_builder)
def test_p_tag_is_never_empty_element(self):
"""A <p> tag is never designated as an empty-element tag.
Even if the markup shows it as an empty-element tag, it
shouldn't be presented that way.
"""
soup = self.soup("<p/>")
self.assertFalse(soup.p.is_empty_element)
self.assertEqual(str(soup.p), "<p></p>")
def test_unclosed_tags_get_closed(self):
"""A tag that's not closed by the end of the document should be closed.
This applies to all tags except empty-element tags.
"""
self.assertSoupEquals("<p>", "<p></p>")
self.assertSoupEquals("<b>", "<b></b>")
self.assertSoupEquals("<br>", "<br/>")
def test_br_is_always_empty_element_tag(self):
"""A <br> tag is designated as an empty-element tag.
Some parsers treat <br></br> as one <br/> tag, some parsers as
two tags, but it should always be an empty-element tag.
"""
soup = self.soup("<br></br>")
self.assertTrue(soup.br.is_empty_element)
self.assertEqual(str(soup.br), "<br/>")
def test_nested_formatting_elements(self):
self.assertSoupEquals("<em><em></em></em>")
def test_comment(self):
# Comments are represented as Comment objects.
markup = "<p>foo<!--foobar-->baz</p>"
self.assertSoupEquals(markup)
soup = self.soup(markup)
comment = soup.find(text="foobar")
self.assertEqual(comment.__class__, Comment)
def test_preserved_whitespace_in_pre_and_textarea(self):
"""Whitespace must be preserved in <pre> and <textarea> tags."""
self.assertSoupEquals("<pre> </pre>")
self.assertSoupEquals("<textarea> woo </textarea>")
def test_nested_inline_elements(self):
"""Inline elements can be nested indefinitely."""
b_tag = "<b>Inside a B tag</b>"
self.assertSoupEquals(b_tag)
nested_b_tag = "<p>A <i>nested <b>tag</b></i></p>"
self.assertSoupEquals(nested_b_tag)
double_nested_b_tag = "<p>A <a>doubly <i>nested <b>tag</b></i></a></p>"
self.assertSoupEquals(nested_b_tag)
def test_nested_block_level_elements(self):
"""Block elements can be nested."""
soup = self.soup('<blockquote><p><b>Foo</b></p></blockquote>')
blockquote = soup.blockquote
self.assertEqual(blockquote.p.b.string, 'Foo')
self.assertEqual(blockquote.b.string, 'Foo')
def test_correctly_nested_tables(self):
"""One table can go inside another one."""
markup = ('<table id="1">'
'<tr>'
"<td>Here's another table:"
'<table id="2">'
'<tr><td>foo</td></tr>'
'</table></td>')
self.assertSoupEquals(
markup,
'<table id="1"><tr><td>Here\'s another table:'
'<table id="2"><tr><td>foo</td></tr></table>'
'</td></tr></table>')
self.assertSoupEquals(
"<table><thead><tr><td>Foo</td></tr></thead>"
"<tbody><tr><td>Bar</td></tr></tbody>"
"<tfoot><tr><td>Baz</td></tr></tfoot></table>")
def test_angle_brackets_in_attribute_values_are_escaped(self):
self.assertSoupEquals('<a b="<a>"></a>', '<a b="<a>"></a>')
def test_entities_in_attributes_converted_to_unicode(self):
expect = '<p id="pi\N{LATIN SMALL LETTER N WITH TILDE}ata"></p>'
self.assertSoupEquals('<p id="piñata"></p>', expect)
self.assertSoupEquals('<p id="piñata"></p>', expect)
self.assertSoupEquals('<p id="piñata"></p>', expect)
def test_entities_in_text_converted_to_unicode(self):
expect = '<p>pi\N{LATIN SMALL LETTER N WITH TILDE}ata</p>'
self.assertSoupEquals("<p>piñata</p>", expect)
self.assertSoupEquals("<p>piñata</p>", expect)
self.assertSoupEquals("<p>piñata</p>", expect)
def test_out_of_range_entity(self):
expect = "\N{REPLACEMENT CHARACTER}"
self.assertSoupEquals("�", expect)
self.assertSoupEquals("�", expect)
self.assertSoupEquals("�", expect)
def test_basic_namespaces(self):
"""Parsers don't need to *understand* namespaces, but at the
very least they should not choke on namespaces or lose
data."""
markup = b'<html xmlns="http://www.w3.org/1999/xhtml" xmlns:mathml="http://www.w3.org/1998/Math/MathML" xmlns:svg="http://www.w3.org/2000/svg"><head></head><body><mathml:msqrt>4</mathml:msqrt><b svg:fill="red"></b></body></html>'
soup = self.soup(markup)
self.assertEqual(markup, soup.encode())
html = soup.html
self.assertEqual('http://www.w3.org/1999/xhtml', soup.html['xmlns'])
self.assertEqual(
'http://www.w3.org/1998/Math/MathML', soup.html['xmlns:mathml'])
self.assertEqual(
'http://www.w3.org/2000/svg', soup.html['xmlns:svg'])
#
# Generally speaking, tests below this point are more tests of
# Beautiful Soup than tests of the tree builders. But parsers are
# weird, so we run these tests separately for every tree builder
# to detect any differences between them.
#
def test_soupstrainer(self):
"""Parsers should be able to work with SoupStrainers."""
strainer = SoupStrainer("b")
soup = self.soup("A <b>bold</b> <meta/> <i>statement</i>",
parse_only=strainer)
self.assertEqual(soup.decode(), "<b>bold</b>")
def test_single_quote_attribute_values_become_double_quotes(self):
self.assertSoupEquals("<foo attr='bar'></foo>",
'<foo attr="bar"></foo>')
def test_attribute_values_with_nested_quotes_are_left_alone(self):
text = """<foo attr='bar "brawls" happen'>a</foo>"""
self.assertSoupEquals(text)
def test_attribute_values_with_double_nested_quotes_get_quoted(self):
text = """<foo attr='bar "brawls" happen'>a</foo>"""
soup = self.soup(text)
soup.foo['attr'] = 'Brawls happen at "Bob\'s Bar"'
self.assertSoupEquals(
soup.foo.decode(),
"""<foo attr="Brawls happen at "Bob\'s Bar"">a</foo>""")
def test_ampersand_in_attribute_value_gets_escaped(self):
self.assertSoupEquals('<this is="really messed up & stuff"></this>',
'<this is="really messed up & stuff"></this>')
self.assertSoupEquals(
'<a href="http://example.org?a=1&b=2;3">foo</a>',
'<a href="http://example.org?a=1&b=2;3">foo</a>')
def test_escaped_ampersand_in_attribute_value_is_left_alone(self):
self.assertSoupEquals('<a href="http://example.org?a=1&b=2;3"></a>')
def test_entities_in_strings_converted_during_parsing(self):
# Both XML and HTML entities are converted to Unicode characters
# during parsing.
text = "<p><<sacré bleu!>></p>"
expected = "<p><<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></p>"
self.assertSoupEquals(text, expected)
def test_smart_quotes_converted_on_the_way_in(self):
# Microsoft smart quotes are converted to Unicode characters during
# parsing.
quote = b"<p>\x91Foo\x92</p>"
soup = self.soup(quote)
self.assertEqual(
soup.p.string,
"\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}")
def test_non_breaking_spaces_converted_on_the_way_in(self):
soup = self.soup("<a> </a>")
self.assertEqual(soup.a.string, "\N{NO-BREAK SPACE}" * 2)
def test_entities_converted_on_the_way_out(self):
text = "<p><<sacré bleu!>></p>"
expected = "<p><<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></p>".encode("utf-8")
soup = self.soup(text)
self.assertEqual(soup.p.encode("utf-8"), expected)
def test_real_iso_latin_document(self):
# Smoke test of interrelated functionality, using an
# easy-to-understand document.
# Here it is in Unicode. Note that it claims to be in ISO-Latin-1.
unicode_html = '<html><head><meta content="text/html; charset=ISO-Latin-1" http-equiv="Content-type"/></head><body><p>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</p></body></html>'
# That's because we're going to encode it into ISO-Latin-1, and use
# that to test.
iso_latin_html = unicode_html.encode("iso-8859-1")
# Parse the ISO-Latin-1 HTML.
soup = self.soup(iso_latin_html)
# Encode it to UTF-8.
result = soup.encode("utf-8")
# What do we expect the result to look like? Well, it would
# look like unicode_html, except that the META tag would say
# UTF-8 instead of ISO-Latin-1.
expected = unicode_html.replace("ISO-Latin-1", "utf-8")
# And, of course, it would be in UTF-8, not Unicode.
expected = expected.encode("utf-8")
# Ta-da!
self.assertEqual(result, expected)
def test_real_shift_jis_document(self):
# Smoke test to make sure the parser can handle a document in
# Shift-JIS encoding, without choking.
shift_jis_html = (
b'<html><head></head><body><pre>'
b'\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f'
b'\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c'
b'\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B'
b'</pre></body></html>')
unicode_html = shift_jis_html.decode("shift-jis")
soup = self.soup(unicode_html)
# Make sure the parse tree is correctly encoded to various
# encodings.
self.assertEqual(soup.encode("utf-8"), unicode_html.encode("utf-8"))
self.assertEqual(soup.encode("euc_jp"), unicode_html.encode("euc_jp"))
def test_real_hebrew_document(self):
# A real-world test to make sure we can convert ISO-8859-9 (a
# Hebrew encoding) to UTF-8.
hebrew_document = b'<html><head><title>Hebrew (ISO 8859-8) in Visual Directionality</title></head><body><h1>Hebrew (ISO 8859-8) in Visual Directionality</h1>\xed\xe5\xec\xf9</body></html>'
soup = self.soup(
hebrew_document, from_encoding="iso8859-8")
self.assertEqual(soup.original_encoding, 'iso8859-8')
self.assertEqual(
soup.encode('utf-8'),
hebrew_document.decode("iso8859-8").encode("utf-8"))
def test_meta_tag_reflects_current_encoding(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/>')
# Here's a document incorporating that meta tag.
shift_jis_html = (
'<html><head>\n%s\n'
'<meta http-equiv="Content-language" content="ja"/>'
'</head><body>Shift-JIS markup goes here.') % meta_tag
soup = self.soup(shift_jis_html)
# Parse the document, and the charset is replaced with a
# generic value.
parsed_meta = soup.find('meta', {'http-equiv': 'Content-type'})
self.assertEqual(parsed_meta['content'],
'text/html; charset=%SOUP-ENCODING%')
self.assertEqual(parsed_meta.contains_substitutions, True)
# For the rest of the story, see TestSubstitutions in
# test_tree.py.
def test_html5_style_meta_tag_reflects_current_encoding(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta id="encoding" charset="x-sjis" />')
# Here's a document incorporating that meta tag.
shift_jis_html = (
'<html><head>\n%s\n'
'<meta http-equiv="Content-language" content="ja"/>'
'</head><body>Shift-JIS markup goes here.') % meta_tag
soup = self.soup(shift_jis_html)
# Parse the document, and the charset is replaced with a
# generic value.
parsed_meta = soup.find('meta', id="encoding")
self.assertEqual('%SOUP-ENCODING%', parsed_meta['charset'])
self.assertEqual(True, parsed_meta.contains_substitutions)
class XMLTreeBuilderSmokeTest(object):
def test_docstring_generated(self):
soup = self.soup("<root/>")
self.assertEqual(
soup.encode(), b'<?xml version="1.0" encoding="utf-8"?>\n<root/>')
def test_docstring_includes_correct_encoding(self):
soup = self.soup("<root/>")
self.assertEqual(
soup.encode("latin1"),
b'<?xml version="1.0" encoding="latin1"?>\n<root/>')
def test_real_xhtml_document(self):
"""A real XHTML document should come out the same as it went in."""
markup = b"""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><title>Hello.</title></head>
<body>Goodbye.</body>
</html>"""
soup = self.soup(markup)
self.assertEqual(soup.encode("utf-8"), markup)
def test_tags_are_empty_element_if_and_only_if_they_are_empty(self):
self.assertSoupEquals("<p>", "<p/>")
self.assertSoupEquals("<p>foo</p>")
def test_namespaces_are_preserved(self):
markup = '<root xmlns:a="http://example.com/" xmlns:b="http://example.net/"><a:foo>This tag is in the a namespace</a:foo><b:foo>This tag is in the b namespace</b:foo></root>'
soup = self.soup(markup)
root = soup.root
self.assertEqual("http://example.com/", root['xmlns:a'])
self.assertEqual("http://example.net/", root['xmlns:b'])
class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest):
"""Smoke test for a tree builder that supports HTML5."""
def test_html_tags_have_namespace(self):
markup = "<a>"
soup = self.soup(markup)
self.assertEqual("http://www.w3.org/1999/xhtml", soup.a.namespace)
def test_svg_tags_have_namespace(self):
markup = '<svg><circle/></svg>'
soup = self.soup(markup)
namespace = "http://www.w3.org/2000/svg"
self.assertEqual(namespace, soup.svg.namespace)
self.assertEqual(namespace, soup.circle.namespace)
def test_mathml_tags_have_namespace(self):
markup = '<math><msqrt>5</msqrt></math>'
soup = self.soup(markup)
namespace = 'http://www.w3.org/1998/Math/MathML'
self.assertEqual(namespace, soup.math.namespace)
self.assertEqual(namespace, soup.msqrt.namespace)
def skipIf(condition, reason):
def nothing(test, *args, **kwargs):
return None
def decorator(test_item):
if condition:
return nothing
else:
return test_item
return decorator
| apache-2.0 |
partofthething/home-assistant | homeassistant/components/google_assistant/__init__.py | 2 | 3785 | """Support for Actions on Google Assistant Smart Home Control."""
import logging
from typing import Any, Dict
import voluptuous as vol
# Typing imports
from homeassistant.const import CONF_API_KEY, CONF_NAME
from homeassistant.core import HomeAssistant, ServiceCall
from homeassistant.helpers import config_validation as cv
from .const import (
CONF_ALIASES,
CONF_CLIENT_EMAIL,
CONF_ENTITY_CONFIG,
CONF_EXPOSE,
CONF_EXPOSE_BY_DEFAULT,
CONF_EXPOSED_DOMAINS,
CONF_PRIVATE_KEY,
CONF_PROJECT_ID,
CONF_REPORT_STATE,
CONF_ROOM_HINT,
CONF_SECURE_DEVICES_PIN,
CONF_SERVICE_ACCOUNT,
DEFAULT_EXPOSE_BY_DEFAULT,
DEFAULT_EXPOSED_DOMAINS,
DOMAIN,
SERVICE_REQUEST_SYNC,
)
from .const import EVENT_QUERY_RECEIVED # noqa: F401
from .http import GoogleAssistantView, GoogleConfig
from .const import EVENT_COMMAND_RECEIVED, EVENT_SYNC_RECEIVED # noqa: F401, isort:skip
_LOGGER = logging.getLogger(__name__)
CONF_ALLOW_UNLOCK = "allow_unlock"
ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_EXPOSE, default=True): cv.boolean,
vol.Optional(CONF_ALIASES): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_ROOM_HINT): cv.string,
}
)
GOOGLE_SERVICE_ACCOUNT = vol.Schema(
{
vol.Required(CONF_PRIVATE_KEY): cv.string,
vol.Required(CONF_CLIENT_EMAIL): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
def _check_report_state(data):
if data[CONF_REPORT_STATE] and CONF_SERVICE_ACCOUNT not in data:
raise vol.Invalid("If report state is enabled, a service account must exist")
return data
GOOGLE_ASSISTANT_SCHEMA = vol.All(
vol.Schema(
{
vol.Required(CONF_PROJECT_ID): cv.string,
vol.Optional(
CONF_EXPOSE_BY_DEFAULT, default=DEFAULT_EXPOSE_BY_DEFAULT
): cv.boolean,
vol.Optional(
CONF_EXPOSED_DOMAINS, default=DEFAULT_EXPOSED_DOMAINS
): cv.ensure_list,
vol.Optional(CONF_ENTITY_CONFIG): {cv.entity_id: ENTITY_SCHEMA},
# str on purpose, makes sure it is configured correctly.
vol.Optional(CONF_SECURE_DEVICES_PIN): str,
vol.Optional(CONF_REPORT_STATE, default=False): cv.boolean,
vol.Optional(CONF_SERVICE_ACCOUNT): GOOGLE_SERVICE_ACCOUNT,
# deprecated configuration options
vol.Remove(CONF_ALLOW_UNLOCK): cv.boolean,
vol.Remove(CONF_API_KEY): cv.string,
},
extra=vol.PREVENT_EXTRA,
),
_check_report_state,
)
CONFIG_SCHEMA = vol.Schema({DOMAIN: GOOGLE_ASSISTANT_SCHEMA}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass: HomeAssistant, yaml_config: Dict[str, Any]):
"""Activate Google Actions component."""
config = yaml_config.get(DOMAIN, {})
google_config = GoogleConfig(hass, config)
await google_config.async_initialize()
hass.http.register_view(GoogleAssistantView(google_config))
if google_config.should_report_state:
google_config.async_enable_report_state()
async def request_sync_service_handler(call: ServiceCall):
"""Handle request sync service calls."""
agent_user_id = call.data.get("agent_user_id") or call.context.user_id
if agent_user_id is None:
_LOGGER.warning(
"No agent_user_id supplied for request_sync. Call as a user or pass in user id as agent_user_id"
)
return
await google_config.async_sync_entities(agent_user_id)
# Register service only if key is provided
if CONF_SERVICE_ACCOUNT in config:
hass.services.async_register(
DOMAIN, SERVICE_REQUEST_SYNC, request_sync_service_handler
)
return True
| mit |
jendap/tensorflow | tensorflow/contrib/boosted_trees/estimator_batch/dnn_tree_combined_estimator_test.py | 20 | 9393 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for combined DNN + GBDT estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
from tensorflow.contrib.boosted_trees.estimator_batch import dnn_tree_combined_estimator as estimator
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.python.estimator import exporter
from tensorflow.python.estimator.canned import head as head_lib
from tensorflow.python.estimator.export import export
from tensorflow.python.ops import parsing_ops
from tensorflow.python.feature_column import feature_column_lib as core_feature_column
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops.losses import losses
from tensorflow.python.platform import googletest
from tensorflow.python.training import checkpoint_utils
def _train_input_fn():
features = {
"x": constant_op.constant([[2.], [1.], [1.]])
}
label = constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
return features, label
def _eval_input_fn():
features = {
"x": constant_op.constant([[1.], [2.], [2.]])
}
label = constant_op.constant([[0], [1], [1]], dtype=dtypes.int32)
return features, label
class DNNBoostedTreeCombinedTest(test_util.TensorFlowTestCase):
def testClassifierContract(self):
estimator_test_utils.assert_estimator_contract(
self, estimator.DNNBoostedTreeCombinedClassifier)
def testRegressorContract(self):
estimator_test_utils.assert_estimator_contract(
self, estimator.DNNBoostedTreeCombinedRegressor)
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, estimator.DNNBoostedTreeCombinedEstimator)
def testNoDNNFeatureColumns(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
with self.assertRaisesRegexp(
ValueError,
"dnn_feature_columns must be specified"):
classifier = estimator.DNNBoostedTreeCombinedClassifier(
dnn_hidden_units=[1],
dnn_feature_columns=[],
tree_learner_config=learner_config,
num_trees=1,
tree_examples_per_layer=3,
n_classes=2)
classifier.fit(input_fn=_train_input_fn, steps=5)
def testFitAndEvaluateDontThrowException(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.DNNBoostedTreeCombinedClassifier(
dnn_hidden_units=[1],
dnn_feature_columns=[feature_column.real_valued_column("x")],
tree_learner_config=learner_config,
num_trees=1,
tree_examples_per_layer=3,
n_classes=2,
model_dir=model_dir,
config=config,
dnn_steps_to_train=10,
dnn_input_layer_to_tree=False,
tree_feature_columns=[feature_column.real_valued_column("x")])
classifier.fit(input_fn=_train_input_fn, steps=15)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
def testFitAndEvaluateWithDistillation(self):
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 1
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
classifier = estimator.DNNBoostedTreeCombinedClassifier(
dnn_hidden_units=[1],
dnn_feature_columns=[feature_column.real_valued_column("x")],
tree_learner_config=learner_config,
num_trees=1,
tree_examples_per_layer=3,
n_classes=2,
model_dir=model_dir,
config=config,
dnn_steps_to_train=10,
dnn_input_layer_to_tree=False,
tree_feature_columns=[feature_column.real_valued_column("x")],
dnn_to_tree_distillation_param=(1, None))
classifier.fit(input_fn=_train_input_fn, steps=15)
classifier.evaluate(input_fn=_eval_input_fn, steps=1)
class CoreDNNBoostedTreeCombinedTest(test_util.TensorFlowTestCase):
def _assert_checkpoint(self, model_dir, global_step):
reader = checkpoint_utils.load_checkpoint(model_dir)
self.assertEqual(global_step, reader.get_tensor(ops.GraphKeys.GLOBAL_STEP))
def testTrainEvaluateInferDoesNotThrowErrorWithNoDnnInput(self):
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 3
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
est = estimator.CoreDNNBoostedTreeCombinedEstimator(
head=head_fn,
dnn_hidden_units=[1],
dnn_feature_columns=[core_feature_column.numeric_column("x")],
tree_learner_config=learner_config,
num_trees=1,
tree_examples_per_layer=3,
model_dir=model_dir,
config=config,
dnn_steps_to_train=10,
dnn_input_layer_to_tree=False,
tree_feature_columns=[core_feature_column.numeric_column("x")])
# Train for a few steps.
est.train(input_fn=_train_input_fn, steps=1000)
# 10 steps for dnn, 3 for 1 tree of depth 3 + 1 after the tree finished
self._assert_checkpoint(est.model_dir, global_step=14)
res = est.evaluate(input_fn=_eval_input_fn, steps=1)
self.assertLess(0.5, res["auc"])
est.predict(input_fn=_eval_input_fn)
def testTrainEvaluateInferDoesNotThrowErrorWithDnnInput(self):
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 3
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
est = estimator.CoreDNNBoostedTreeCombinedEstimator(
head=head_fn,
dnn_hidden_units=[1],
dnn_feature_columns=[core_feature_column.numeric_column("x")],
tree_learner_config=learner_config,
num_trees=1,
tree_examples_per_layer=3,
model_dir=model_dir,
config=config,
dnn_steps_to_train=10,
dnn_input_layer_to_tree=True,
tree_feature_columns=[])
# Train for a few steps.
est.train(input_fn=_train_input_fn, steps=1000)
res = est.evaluate(input_fn=_eval_input_fn, steps=1)
self.assertLess(0.5, res["auc"])
est.predict(input_fn=_eval_input_fn)
def testTrainEvaluateWithDnnForInputAndTreeForPredict(self):
head_fn = head_lib._binary_logistic_head_with_sigmoid_cross_entropy_loss(
loss_reduction=losses.Reduction.SUM_OVER_NONZERO_WEIGHTS)
learner_config = learner_pb2.LearnerConfig()
learner_config.num_classes = 2
learner_config.constraints.max_tree_depth = 3
model_dir = tempfile.mkdtemp()
config = run_config.RunConfig()
est = estimator.CoreDNNBoostedTreeCombinedEstimator(
head=head_fn,
dnn_hidden_units=[1],
dnn_feature_columns=[core_feature_column.numeric_column("x")],
tree_learner_config=learner_config,
num_trees=1,
tree_examples_per_layer=3,
model_dir=model_dir,
config=config,
dnn_steps_to_train=10,
dnn_input_layer_to_tree=True,
predict_with_tree_only=True,
dnn_to_tree_distillation_param=(0.5, None),
tree_feature_columns=[])
# Train for a few steps.
est.train(input_fn=_train_input_fn, steps=1000)
res = est.evaluate(input_fn=_eval_input_fn, steps=1)
self.assertLess(0.5, res["auc"])
est.predict(input_fn=_eval_input_fn)
serving_input_fn = (
export.build_parsing_serving_input_receiver_fn(
feature_spec={"x": parsing_ops.FixedLenFeature(
[1], dtype=dtypes.float32)}))
base_exporter = exporter.FinalExporter(
name="Servo",
serving_input_receiver_fn=serving_input_fn,
assets_extra=None)
export_path = os.path.join(model_dir, "export")
base_exporter.export(
est,
export_path=export_path,
checkpoint_path=None,
eval_result={},
is_the_final_export=True)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
eurosata1/e2 | lib/python/Components/Converter/ClockToText.py | 42 | 3445 | from Converter import Converter
from time import localtime, strftime
from Components.Element import cached
class ClockToText(Converter, object):
DEFAULT = 0
WITH_SECONDS = 1
IN_MINUTES = 2
DATE = 3
FORMAT = 4
AS_LENGTH = 5
TIMESTAMP = 6
FULL = 7
SHORT_DATE = 8
LONG_DATE = 9
VFD = 10
AS_LENGTHHOURS = 11
AS_LENGTHSECONDS = 12
# add: date, date as string, weekday, ...
# (whatever you need!)
def __init__(self, type):
Converter.__init__(self, type)
self.fix = ""
if ';' in type:
type, self.fix = type.split(';')
if type == "WithSeconds":
self.type = self.WITH_SECONDS
elif type == "InMinutes":
self.type = self.IN_MINUTES
elif type == "Date":
self.type = self.DATE
elif type == "AsLength":
self.type = self.AS_LENGTH
elif type == "AsLengthHours":
self.type = self.AS_LENGTHHOURS
elif type == "AsLengthSeconds":
self.type = self.AS_LENGTHSECONDS
elif type == "Timestamp":
self.type = self.TIMESTAMP
elif type == "Full":
self.type = self.FULL
elif type == "ShortDate":
self.type = self.SHORT_DATE
elif type == "LongDate":
self.type = self.LONG_DATE
elif type == "VFD":
self.type = self.VFD
elif "Format" in type:
self.type = self.FORMAT
self.fmt_string = type[7:]
else:
self.type = self.DEFAULT
@cached
def getText(self):
time = self.source.time
if time is None:
return ""
# add/remove 1st space
def fix_space(string):
if "Proportional" in self.fix and t.tm_hour < 10:
return " " + string
if "NoSpace" in self.fix:
return string.lstrip(' ')
return string
# handle durations
if self.type == self.IN_MINUTES:
return _("%d min") % (time / 60)
elif self.type == self.AS_LENGTH:
if time < 0:
return ""
return "%d:%02d" % (time / 60, time % 60)
elif self.type == self.AS_LENGTHHOURS:
if time < 0:
return ""
return "%d:%02d" % (time / 3600, time / 60 % 60)
elif self.type == self.AS_LENGTHSECONDS:
if time < 0:
return ""
return "%d:%02d:%02d" % (time / 3600, time / 60 % 60, time % 60)
elif self.type == self.TIMESTAMP:
return str(time)
t = localtime(time)
if self.type == self.WITH_SECONDS:
# TRANSLATORS: full time representation hour:minute:seconds
return fix_space(_("%2d:%02d:%02d") % (t.tm_hour, t.tm_min, t.tm_sec))
elif self.type == self.DEFAULT:
# TRANSLATORS: short time representation hour:minute
return fix_space(_("%2d:%02d") % (t.tm_hour, t.tm_min))
elif self.type == self.DATE:
# TRANSLATORS: full date representation dayname daynum monthname year in strftime() format! See 'man strftime'
d = _("%A %e %B %Y")
elif self.type == self.FULL:
# TRANSLATORS: long date representation short dayname daynum short monthname hour:minute in strftime() format! See 'man strftime'
d = _("%a %e/%m %-H:%M")
elif self.type == self.SHORT_DATE:
# TRANSLATORS: short date representation short dayname daynum short monthname in strftime() format! See 'man strftime'
d = _("%a %e/%m")
elif self.type == self.LONG_DATE:
# TRANSLATORS: long date representations dayname daynum monthname in strftime() format! See 'man strftime'
d = _("%A %e %B")
elif self.type == self.VFD:
# TRANSLATORS: VFD hour:minute daynum short monthname in strftime() format! See 'man strftime'
d = _("%k:%M %e/%m")
elif self.type == self.FORMAT:
d = self.fmt_string
else:
return "???"
return strftime(d, t)
text = property(getText)
| gpl-2.0 |
0todd0000/spm1d | spm1d/rft1d/examples/val_max_4_anova1_1d.py | 1 | 2121 |
import numpy as np
from matplotlib import pyplot
from spm1d import rft1d
eps = np.finfo(float).eps
def here_anova1(Y, X, X0, Xi, X0i, df):
Y = np.matrix(Y)
### estimate parameters:
b = Xi*Y
eij = Y - X*b
R = eij.T*eij
### reduced design:
b0 = X0i*Y
eij0 = Y - X0*b0
R0 = eij0.T*eij0
### compute F statistic:
F = ((np.diag(R0)-np.diag(R))/df[0]) / (np.diag(R+eps)/df[1])
return F
def here_design_matrices(nResponses, nGroups):
nTotal = sum(nResponses)
X = np.zeros((nTotal,nGroups))
i0 = 0
for i,n in enumerate(nResponses):
X[i0:i0+n,i] = 1
i0 += n
X = np.matrix(X)
X0 = np.matrix(np.ones(nTotal)).T #reduced design matrix
Xi,X0i = np.linalg.pinv(X), np.linalg.pinv(X0) #pseudo-inverses
return X,X0,Xi,X0i
#(0) Set parameters:
np.random.seed(123456789)
nResponses = 6,8,9 #number of responses in each group
nNodes = 101
FWHM = 12.0
nIterations = 5000
### derived parameters:
nGroups = len(nResponses)
nTotal = sum(nResponses)
df = nGroups-1, nTotal-nGroups
X,X0,Xi,X0i = here_design_matrices(nResponses, nGroups)
#(1) Generate Gaussian 1D fields, compute test stat, store field maximum:
F = []
generator = rft1d.random.Generator1D(nTotal, nNodes, FWHM)
for i in range(nIterations):
y = generator.generate_sample()
f = here_anova1(y, X, X0, Xi, X0i, df)
F.append( f.max() )
F = np.asarray(F)
#(2) Survival functions:
heights = np.linspace(6, 14, 21)
sf = np.array( [ (F>h).mean() for h in heights] )
sfE = rft1d.f.sf(heights, df, nNodes, FWHM) #theoretical
sf0D = rft1d.f.sf0d(heights, df) #theoretical (0D)
#(3) Plot results:
pyplot.close('all')
ax = pyplot.axes()
ax.plot(heights, sf, 'o', label='Simulated')
ax.plot(heights, sfE, '-', label='Theoretical')
ax.plot(heights, sf0D, 'r-', label='Theoretical (0D)')
ax.set_xlabel('$u$', size=20)
ax.set_ylabel('$P (F_\mathrm{max} > u)$', size=20)
ax.legend()
ax.set_title('ANOVA validation (1D)', size=20)
pyplot.show()
| gpl-3.0 |
hogarthj/ansible | lib/ansible/plugins/connection/network_cli.py | 5 | 19736 | # (c) 2016 Red Hat Inc.
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
author: Ansible Networking Team
connection: network_cli
short_description: Use network_cli to run command on network appliances
description:
- This connection plugin provides a connection to remote devices over the
SSH and implements a CLI shell. This connection plugin is typically used by
network devices for sending and receiving CLi commands to network devices.
version_added: "2.3"
options:
host:
description:
- Specifies the remote device FQDN or IP address to establish the SSH
connection to.
default: inventory_hostname
vars:
- name: ansible_host
port:
type: int
description:
- Specifies the port on the remote device to listening for connections
when establishing the SSH connection.
default: 22
ini:
- section: defaults
key: remote_port
env:
- name: ANSIBLE_REMOTE_PORT
vars:
- name: ansible_port
network_os:
description:
- Configures the device platform network operating system. This value is
used to load the correct terminal and cliconf plugins to communicate
with the remote device
vars:
- name: ansible_network_os
remote_user:
description:
- The username used to authenticate to the remote device when the SSH
connection is first established. If the remote_user is not specified,
the connection will use the username of the logged in user.
- Can be configured form the CLI via the C(--user) or C(-u) options
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
password:
description:
- Configures the user password used to authenticate to the remote device
when first establishing the SSH connection.
vars:
- name: ansible_password
- name: ansible_ssh_pass
private_key_file:
description:
- The private SSH key or certificate file used to to authenticate to the
remote device when first establishing the SSH connection.
ini:
section: defaults
key: private_key_file
env:
- name: ANSIBLE_PRIVATE_KEY_FILE
vars:
- name: ansible_private_key_file
timeout:
type: int
description:
- Sets the connection time, in seconds, for the communicating with the
remote device. This timeout is used as the default timeout value for
commands when issuing a command to the network CLI. If the command
does not return in timeout seconds, the an error is generated.
default: 120
become:
type: boolean
description:
- The become option will instruct the CLI session to attempt privilege
escalation on platforms that support it. Normally this means
transitioning from user mode to C(enable) mode in the CLI session.
If become is set to True and the remote device does not support
privilege escalation or the privilege has already been elevated, then
this option is silently ignored
- Can be configured form the CLI via the C(--become) or C(-b) options
default: False
ini:
section: privilege_escalation
key: become
env:
- name: ANSIBLE_BECOME
vars:
- name: ansible_become
become_method:
description:
- This option allows the become method to be specified in for handling
privilege escalation. Typically the become_method value is set to
C(enable) but could be defined as other values.
default: sudo
ini:
section: privilege_escalation
key: become_method
env:
- name: ANSIBLE_BECOME_METHOD
vars:
- name: ansible_become_method
host_key_auto_add:
type: boolean
description:
- By default, Ansible will prompt the user before adding SSH keys to the
known hosts file. Since persistent connections such as network_cli run
in background processes, the user will never be prompted. By enabling
this option, unknown host keys will automatically be added to the
known hosts file.
- Be sure to fully understand the security implications of enabling this
option on production systems as it could create a security vulnerability.
default: False
ini:
section: paramiko_connection
key: host_key_auto_add
env:
- name: ANSIBLE_HOST_KEY_AUTO_ADD
persistent_connect_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait when trying to
initially establish a persistent connection. If this value expires
before the connection to the remote device is completed, the connection
will fail
default: 30
ini:
section: persistent_connection
key: persistent_connect_timeout
env:
- name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT
persistent_command_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait for a command to
return from the remote device. If this timer is exceeded before the
command returns, the connection plugin will raise an exception and
close
default: 10
ini:
section: persistent_connection
key: persistent_command_timeout
env:
- name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT
"""
import json
import logging
import re
import os
import socket
import traceback
from ansible import constants as C
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils.six import BytesIO, PY3
from ansible.module_utils.six.moves import cPickle
from ansible.module_utils._text import to_bytes, to_text
from ansible.playbook.play_context import PlayContext
from ansible.plugins.loader import cliconf_loader, terminal_loader, connection_loader
from ansible.plugins.connection import ConnectionBase
from ansible.utils.path import unfrackpath
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Connection(ConnectionBase):
''' CLI (shell) SSH connections on Paramiko '''
transport = 'network_cli'
has_pipelining = True
force_persistence = True
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
self._ssh_shell = None
self._matched_prompt = None
self._matched_pattern = None
self._last_response = None
self._history = list()
self._play_context = play_context
self._local = connection_loader.get('local', play_context, '/dev/null')
self._local.set_options()
self._terminal = None
self._cliconf = None
self._ansible_playbook_pid = kwargs.get('ansible_playbook_pid')
if self._play_context.verbosity > 3:
logging.getLogger('paramiko').setLevel(logging.DEBUG)
# reconstruct the socket_path and set instance values accordingly
self._update_connection_state()
def __getattr__(self, name):
try:
return self.__dict__[name]
except KeyError:
if name.startswith('_'):
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
return getattr(self._cliconf, name)
def get_prompt(self):
"""Returns the current prompt from the device"""
return self._matched_prompt
def exec_command(self, cmd, in_data=None, sudoable=True):
# this try..except block is just to handle the transition to supporting
# network_cli as a toplevel connection. Once connection=local is gone,
# this block can be removed as well and all calls passed directly to
# the local connection
if self._ssh_shell:
try:
cmd = json.loads(to_text(cmd, errors='surrogate_or_strict'))
kwargs = {'command': to_bytes(cmd['command'], errors='surrogate_or_strict')}
for key in ('prompt', 'answer', 'sendonly', 'newline', 'prompt_retry_check'):
if cmd.get(key) is True or cmd.get(key) is False:
kwargs[key] = cmd[key]
elif cmd.get(key) is not None:
kwargs[key] = to_bytes(cmd[key], errors='surrogate_or_strict')
return self.send(**kwargs)
except ValueError:
cmd = to_bytes(cmd, errors='surrogate_or_strict')
return self.send(command=cmd)
else:
return self._local.exec_command(cmd, in_data, sudoable)
def put_file(self, in_path, out_path):
return self._local.put_file(in_path, out_path)
def fetch_file(self, in_path, out_path):
return self._local.fetch_file(in_path, out_path)
def update_play_context(self, pc_data):
"""Updates the play context information for the connection"""
pc_data = to_bytes(pc_data)
if PY3:
pc_data = cPickle.loads(pc_data, encoding='bytes')
else:
pc_data = cPickle.loads(pc_data)
play_context = PlayContext()
play_context.deserialize(pc_data)
messages = ['updating play_context for connection']
if self._play_context.become is False and play_context.become is True:
auth_pass = play_context.become_pass
self._terminal.on_become(passwd=auth_pass)
messages.append('authorizing connection')
elif self._play_context.become is True and not play_context.become:
self._terminal.on_unbecome()
messages.append('deauthorizing connection')
self._play_context = play_context
return messages
def _connect(self):
'''
Connects to the remote device and starts the terminal
'''
if self.connected:
return
self.paramiko_conn = connection_loader.get('paramiko', self._play_context, '/dev/null')
self.paramiko_conn.set_options(direct={'look_for_keys': not bool(self._play_context.password and not self._play_context.private_key_file)})
self.paramiko_conn.force_persistence = self.force_persistence
ssh = self.paramiko_conn._connect()
display.vvvv('ssh connection done, setting terminal', host=self._play_context.remote_addr)
self._ssh_shell = ssh.ssh.invoke_shell()
self._ssh_shell.settimeout(self._play_context.timeout)
network_os = self._play_context.network_os
if not network_os:
raise AnsibleConnectionFailure(
'Unable to automatically determine host network os. Please '
'manually configure ansible_network_os value for this host'
)
self._terminal = terminal_loader.get(network_os, self)
if not self._terminal:
raise AnsibleConnectionFailure('network os %s is not supported' % network_os)
display.vvvv('loaded terminal plugin for network_os %s' % network_os, host=self._play_context.remote_addr)
self._cliconf = cliconf_loader.get(network_os, self)
if self._cliconf:
display.vvvv('loaded cliconf plugin for network_os %s' % network_os, host=self._play_context.remote_addr)
else:
display.vvvv('unable to load cliconf for network_os %s' % network_os)
self.receive()
display.vvvv('firing event: on_open_shell()', host=self._play_context.remote_addr)
self._terminal.on_open_shell()
if self._play_context.become and self._play_context.become_method == 'enable':
display.vvvv('firing event: on_become', host=self._play_context.remote_addr)
auth_pass = self._play_context.become_pass
self._terminal.on_become(passwd=auth_pass)
display.vvvv('ssh connection has completed successfully', host=self._play_context.remote_addr)
self._connected = True
return self
def _update_connection_state(self):
'''
Reconstruct the connection socket_path and check if it exists
If the socket path exists then the connection is active and set
both the _socket_path value to the path and the _connected value
to True. If the socket path doesn't exist, leave the socket path
value to None and the _connected value to False
'''
ssh = connection_loader.get('ssh', class_only=True)
cp = ssh._create_control_path(self._play_context.remote_addr, self._play_context.port, self._play_context.remote_user, self._play_context.connection,
self._ansible_playbook_pid)
tmp_path = unfrackpath(C.PERSISTENT_CONTROL_PATH_DIR)
socket_path = unfrackpath(cp % dict(directory=tmp_path))
if os.path.exists(socket_path):
self._connected = True
self._socket_path = socket_path
def reset(self):
'''
Reset the connection
'''
if self._socket_path:
display.vvvv('resetting persistent connection for socket_path %s' % self._socket_path, host=self._play_context.remote_addr)
self.close()
display.vvvv('reset call on connection instance', host=self._play_context.remote_addr)
def close(self):
'''
Close the active connection to the device
'''
# only close the connection if its connected.
if self._connected:
display.debug("closing ssh connection to device")
if self._ssh_shell:
display.debug("firing event: on_close_shell()")
self._terminal.on_close_shell()
self._ssh_shell.close()
self._ssh_shell = None
display.debug("cli session is now closed")
self.paramiko_conn.close()
self.paramiko_conn = None
display.debug("ssh connection has been closed successfully")
self._connected = False
def receive(self, command=None, prompts=None, answer=None, newline=True, prompt_retry_check=False):
'''
Handles receiving of output from command
'''
recv = BytesIO()
handled = False
self._matched_prompt = None
self._matched_cmd_prompt = None
matched_prompt_window = window_count = 0
while True:
data = self._ssh_shell.recv(256)
# when a channel stream is closed, received data will be empty
if not data:
break
recv.write(data)
offset = recv.tell() - 256 if recv.tell() > 256 else 0
recv.seek(offset)
window = self._strip(recv.read())
window_count += 1
if prompts and not handled:
handled = self._handle_prompt(window, prompts, answer, newline)
matched_prompt_window = window_count
elif prompts and handled and prompt_retry_check and matched_prompt_window + 1 == window_count:
# check again even when handled, if same prompt repeats in next window
# (like in the case of a wrong enable password, etc) indicates
# value of answer is wrong, report this as error.
if self._handle_prompt(window, prompts, answer, newline, prompt_retry_check):
raise AnsibleConnectionFailure("For matched prompt '%s', answer is not valid" % self._matched_cmd_prompt)
if self._find_prompt(window):
self._last_response = recv.getvalue()
resp = self._strip(self._last_response)
return self._sanitize(resp, command)
def send(self, command, prompt=None, answer=None, newline=True, sendonly=False, prompt_retry_check=False):
'''
Sends the command to the device in the opened shell
'''
try:
self._history.append(command)
self._ssh_shell.sendall(b'%s\r' % command)
if sendonly:
return
response = self.receive(command, prompt, answer, newline, prompt_retry_check)
return to_text(response, errors='surrogate_or_strict')
except (socket.timeout, AttributeError):
display.vvvv(traceback.format_exc(), host=self._play_context.remote_addr)
raise AnsibleConnectionFailure("timeout trying to send command: %s" % command.strip())
def _strip(self, data):
'''
Removes ANSI codes from device response
'''
for regex in self._terminal.ansi_re:
data = regex.sub(b'', data)
return data
def _handle_prompt(self, resp, prompts, answer, newline, prompt_retry_check=False):
'''
Matches the command prompt and responds
:arg resp: Byte string containing the raw response from the remote
:arg prompts: Sequence of byte strings that we consider prompts for input
:arg answer: Byte string to send back to the remote if we find a prompt.
A carriage return is automatically appended to this string.
:returns: True if a prompt was found in ``resp``. False otherwise
'''
if not isinstance(prompts, list):
prompts = [prompts]
prompts = [re.compile(r, re.I) for r in prompts]
for regex in prompts:
match = regex.search(resp)
if match:
# if prompt_retry_check is enabled to check if same prompt is
# repeated don't send answer again.
if not prompt_retry_check:
self._ssh_shell.sendall(b'%s' % answer)
if newline:
self._ssh_shell.sendall(b'\r')
self._matched_cmd_prompt = match.group()
return True
return False
def _sanitize(self, resp, command=None):
'''
Removes elements from the response before returning to the caller
'''
cleaned = []
for line in resp.splitlines():
if (command and line.strip() == command.strip()) or self._matched_prompt.strip() in line:
continue
cleaned.append(line)
return b'\n'.join(cleaned).strip()
def _find_prompt(self, response):
'''Searches the buffered response for a matching command prompt
'''
errored_response = None
is_error_message = False
for regex in self._terminal.terminal_stderr_re:
if regex.search(response):
is_error_message = True
# Check if error response ends with command prompt if not
# receive it buffered prompt
for regex in self._terminal.terminal_stdout_re:
match = regex.search(response)
if match:
errored_response = response
self._matched_pattern = regex.pattern
self._matched_prompt = match.group()
break
if not is_error_message:
for regex in self._terminal.terminal_stdout_re:
match = regex.search(response)
if match:
self._matched_pattern = regex.pattern
self._matched_prompt = match.group()
if not errored_response:
return True
if errored_response:
raise AnsibleConnectionFailure(errored_response)
return False
| gpl-3.0 |
weightedEights/runDBcheck | RADAR_DATA/20170713.001/Source/Shell/proxies/txcProxy.py | 5 | 2285 | """
Txc proxy.
This proxy handles all communication between the Txc and the
shell programs
"""
from shell.proxies.baseProxy import baseProxy
import os,re
import time
from xmlrpclib import ServerProxy
import siteconfig
class txcProxy(baseProxy):
def __init__(self,experiment):
baseProxy.__init__(self,'txc',experiment)
txcurl = siteconfig.url('txc')
self.remote = ServerProxy(txcurl)
h5buffer = self.exp.h5Buffer
try:
h5buffer.setDynamic('/Tx/Power',[0.0,0.0])
h5buffer.setAttribute('/Tx/Power/Unit','W')
h5buffer.setAttribute('/Tx/Power/Description','Transmitted power')
except Exception,inst:
self.log.exception(inst)
self.log.info('Initialized')
def setup(self,n):
baseProxy.setup(self,n)
section = '%s mode:%d' % (self.exp.dtc,n)
txf = self.exp.experimentConfig.get(section,'txfrequency',0)
try:
# sondrestrom legacy
f = int(txf)
self.powerName = 'txcfrequency%dpower' % (f)
except:
# amisr and new sondrestrom way. The freq is named directly in the exp file
mo = re.match('tx([c0-9]*)frequency([0-9]*)',txf,re.I)
txid = mo.group(1)
txlo = mo.group(2)
self.powerName = 'tx%sfrequency%spower' % (txid,txlo)
def storeData(self,h5buffer,starttime,endtime,vars):
tsub = time.clock()
state = self.remote.getState([starttime,endtime])
esub = time.clock()-tsub
self.log.info('Gathering txc info: %3.2f [secs]' % (esub))
# set boi info
if state[0]['_timediff'] < 60:
h5buffer.h5Dynamic.setOutBoi('/Tx/Power',state[0][self.powerName])
else:
self.log.error('Txc boi info of in time by: %f secs' % (state[0]['_timediff']))
# set eoi info
if state[1]['_timediff'] < 60:
h5buffer.h5Dynamic.setOutEoi('/Tx/Power',state[1][self.powerName])
else:
self.log.error('Txc eoi info of in time by: %f secs' % (state[1]['_timediff']))
proxy = txcProxy | gpl-3.0 |
jorgealmerio/QEsg | core/ezdxf/modern/solid3d.py | 1 | 2503 | # Purpose: support for ACIS based 3D entities - BODY, REGION, 3DSOLID
# Created: 24.05.2015
# Copyright (C) 2015, Manfred Moitzi
# License: MIT License
from __future__ import unicode_literals
__author__ = "mozman <[email protected]>"
from contextlib import contextmanager
from .graphics import none_subclass, entity_subclass, ModernGraphicEntity
from ..lldxf.types import convert_tags_to_text_lines, convert_text_lines_to_tags
from ..lldxf.classifiedtags import ClassifiedTags
from ..lldxf.attributes import DXFAttr, DXFAttributes, DefSubclass
from ..tools import crypt
_BODY_TPL = """ 0
BODY
5
0
330
0
100
AcDbEntity
8
0
100
AcDbModelerGeometry
70
1
"""
modeler_geometry_subclass = DefSubclass('AcDbModelerGeometry', {
'version': DXFAttr(70, default=1),
})
class Body(ModernGraphicEntity):
TEMPLATE = ClassifiedTags.from_text(_BODY_TPL)
DXFATTRIBS = DXFAttributes(none_subclass, entity_subclass, modeler_geometry_subclass)
def get_acis_data(self):
modeler_geometry = self.tags.subclasses[2]
text_lines = convert_tags_to_text_lines(tag for tag in modeler_geometry if tag.code in (1, 3))
return crypt.decode(text_lines)
def set_acis_data(self, text_lines):
def cleanup(lines):
for line in lines:
yield line.rstrip().replace('\n', '')
modeler_geometry = self.tags.subclasses[2]
# remove existing text
modeler_geometry[:] = (tag for tag in modeler_geometry if tag.code not in (1, 3))
modeler_geometry.extend(convert_text_lines_to_tags(crypt.encode(cleanup(text_lines))))
@contextmanager
def edit_data(self):
data = ModelerGeometryData(self)
yield data
self.set_acis_data(data.text_lines)
class ModelerGeometryData(object):
def __init__(self, body):
self.text_lines = list(body.get_acis_data())
def __str__(self):
return "\n".join(self.text_lines)
def set_text(self, text, sep='\n'):
self.text_lines = text.split(sep)
class Region(Body):
TEMPLATE = ClassifiedTags.from_text(_BODY_TPL.replace('BODY', 'REGION'))
_3DSOLID_TPL = """ 0
3DSOLID
5
0
330
0
100
AcDbEntity
8
0
100
AcDbModelerGeometry
70
1
100
AcDb3dSolid
350
0
"""
class Solid3d(Body):
TEMPLATE = ClassifiedTags.from_text(_3DSOLID_TPL)
DXFATTRIBS = DXFAttributes(
none_subclass,
entity_subclass,
modeler_geometry_subclass,
DefSubclass('AcDb3dSolid', {'history': DXFAttr(350, default=0)})
)
| gpl-3.0 |
tudorbarascu/QGIS | python/plugins/processing/algs/qgis/BasicStatistics.py | 30 | 13335 | # -*- coding: utf-8 -*-
"""
***************************************************************************
BasicStatistics.py
---------------------
Date : November 2016
Copyright : (C) 2016 by Nyall Dawson
Email : nyall dot dawson at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nyall Dawson'
__date__ = 'November 2016'
__copyright__ = '(C) 2016, Nyall Dawson'
import os
import codecs
from qgis.PyQt.QtCore import QVariant
from qgis.PyQt.QtGui import QIcon
from qgis.core import (QgsApplication,
QgsStatisticalSummary,
QgsStringStatisticalSummary,
QgsDateTimeStatisticalSummary,
QgsFeatureRequest,
QgsProcessing,
QgsProcessingException,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterField,
QgsProcessingParameterFileDestination,
QgsProcessingOutputNumber,
QgsProcessingFeatureSource)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0]
class BasicStatisticsForField(QgisAlgorithm):
INPUT_LAYER = 'INPUT_LAYER'
FIELD_NAME = 'FIELD_NAME'
OUTPUT_HTML_FILE = 'OUTPUT_HTML_FILE'
MIN = 'MIN'
MAX = 'MAX'
COUNT = 'COUNT'
UNIQUE = 'UNIQUE'
EMPTY = 'EMPTY'
FILLED = 'FILLED'
MIN_LENGTH = 'MIN_LENGTH'
MAX_LENGTH = 'MAX_LENGTH'
MEAN_LENGTH = 'MEAN_LENGTH'
CV = 'CV'
SUM = 'SUM'
MEAN = 'MEAN'
STD_DEV = 'STD_DEV'
RANGE = 'RANGE'
MEDIAN = 'MEDIAN'
MINORITY = 'MINORITY'
MAJORITY = 'MAJORITY'
FIRSTQUARTILE = 'FIRSTQUARTILE'
THIRDQUARTILE = 'THIRDQUARTILE'
IQR = 'IQR'
def icon(self):
return QgsApplication.getThemeIcon("/algorithms/mAlgorithmBasicStatistics.svg")
def svgIconPath(self):
return QgsApplication.iconPath("/algorithms/mAlgorithmBasicStatistics.svg")
def tags(self):
return self.tr(
'stats,statistics,date,time,datetime,string,number,text,table,layer,sum,maximum,minimum,mean,average,standard,deviation,'
'count,distinct,unique,variance,median,quartile,range,majority,minority,summary').split(',')
def group(self):
return self.tr('Vector analysis')
def groupId(self):
return 'vectoranalysis'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT_LAYER,
self.tr('Input layer'),
types=[QgsProcessing.TypeVector]))
self.addParameter(QgsProcessingParameterField(self.FIELD_NAME,
self.tr('Field to calculate statistics on'),
None, self.INPUT_LAYER, QgsProcessingParameterField.Any))
self.addParameter(QgsProcessingParameterFileDestination(self.OUTPUT_HTML_FILE, self.tr('Statistics'),
self.tr('HTML files (*.html)'), None, True))
self.addOutput(QgsProcessingOutputNumber(self.COUNT, self.tr('Count')))
self.addOutput(QgsProcessingOutputNumber(self.UNIQUE, self.tr('Number of unique values')))
self.addOutput(QgsProcessingOutputNumber(self.EMPTY, self.tr('Number of empty (null) values')))
self.addOutput(QgsProcessingOutputNumber(self.FILLED, self.tr('Number of non-empty values')))
self.addOutput(QgsProcessingOutputNumber(self.MIN, self.tr('Minimum value')))
self.addOutput(QgsProcessingOutputNumber(self.MAX, self.tr('Maximum value')))
self.addOutput(QgsProcessingOutputNumber(self.MIN_LENGTH, self.tr('Minimum length')))
self.addOutput(QgsProcessingOutputNumber(self.MAX_LENGTH, self.tr('Maximum length')))
self.addOutput(QgsProcessingOutputNumber(self.MEAN_LENGTH, self.tr('Mean length')))
self.addOutput(QgsProcessingOutputNumber(self.CV, self.tr('Coefficient of Variation')))
self.addOutput(QgsProcessingOutputNumber(self.SUM, self.tr('Sum')))
self.addOutput(QgsProcessingOutputNumber(self.MEAN, self.tr('Mean value')))
self.addOutput(QgsProcessingOutputNumber(self.STD_DEV, self.tr('Standard deviation')))
self.addOutput(QgsProcessingOutputNumber(self.RANGE, self.tr('Range')))
self.addOutput(QgsProcessingOutputNumber(self.MEDIAN, self.tr('Median')))
self.addOutput(QgsProcessingOutputNumber(self.MINORITY, self.tr('Minority (rarest occurring value)')))
self.addOutput(QgsProcessingOutputNumber(self.MAJORITY, self.tr('Majority (most frequently occurring value)')))
self.addOutput(QgsProcessingOutputNumber(self.FIRSTQUARTILE, self.tr('First quartile')))
self.addOutput(QgsProcessingOutputNumber(self.THIRDQUARTILE, self.tr('Third quartile')))
self.addOutput(QgsProcessingOutputNumber(self.IQR, self.tr('Interquartile Range (IQR)')))
def name(self):
return 'basicstatisticsforfields'
def displayName(self):
return self.tr('Basic statistics for fields')
def processAlgorithm(self, parameters, context, feedback):
source = self.parameterAsSource(parameters, self.INPUT_LAYER, context)
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT_LAYER))
field_name = self.parameterAsString(parameters, self.FIELD_NAME, context)
field = source.fields().at(source.fields().lookupField(field_name))
output_file = self.parameterAsFileOutput(parameters, self.OUTPUT_HTML_FILE, context)
request = QgsFeatureRequest().setFlags(QgsFeatureRequest.NoGeometry).setSubsetOfAttributes([field_name],
source.fields())
features = source.getFeatures(request, QgsProcessingFeatureSource.FlagSkipGeometryValidityChecks)
count = source.featureCount()
data = []
data.append(self.tr('Analyzed field: {}').format(field_name))
results = {}
if field.isNumeric():
d, results = self.calcNumericStats(features, feedback, field, count)
data.extend(d)
elif field.type() in (QVariant.Date, QVariant.Time, QVariant.DateTime):
d, results = self.calcDateTimeStats(features, feedback, field, count)
data.extend(d)
else:
d, results = self.calcStringStats(features, feedback, field, count)
data.extend(d)
if output_file:
self.createHTML(output_file, data)
results[self.OUTPUT_HTML_FILE] = output_file
return results
def calcNumericStats(self, features, feedback, field, count):
total = 100.0 / count if count else 0
stat = QgsStatisticalSummary()
for current, ft in enumerate(features):
if feedback.isCanceled():
break
stat.addVariant(ft[field.name()])
feedback.setProgress(int(current * total))
stat.finalize()
cv = stat.stDev() / stat.mean() if stat.mean() != 0 else 0
results = {self.COUNT: stat.count(),
self.UNIQUE: stat.variety(),
self.EMPTY: stat.countMissing(),
self.FILLED: count - stat.countMissing(),
self.MIN: stat.min(),
self.MAX: stat.max(),
self.RANGE: stat.range(),
self.SUM: stat.sum(),
self.MEAN: stat.mean(),
self.MEDIAN: stat.median(),
self.STD_DEV: stat.stDev(),
self.CV: cv,
self.MINORITY: stat.minority(),
self.MAJORITY: stat.majority(),
self.FIRSTQUARTILE: stat.firstQuartile(),
self.THIRDQUARTILE: stat.thirdQuartile(),
self.IQR: stat.interQuartileRange()}
data = []
data.append(self.tr('Count: {}').format(stat.count()))
data.append(self.tr('Unique values: {}').format(stat.variety()))
data.append(self.tr('NULL (missing) values: {}').format(stat.countMissing()))
data.append(self.tr('Minimum value: {}').format(stat.min()))
data.append(self.tr('Maximum value: {}').format(stat.max()))
data.append(self.tr('Range: {}').format(stat.range()))
data.append(self.tr('Sum: {}').format(stat.sum()))
data.append(self.tr('Mean value: {}').format(stat.mean()))
data.append(self.tr('Median value: {}').format(stat.median()))
data.append(self.tr('Standard deviation: {}').format(stat.stDev()))
data.append(self.tr('Coefficient of Variation: {}').format(cv))
data.append(self.tr('Minority (rarest occurring value): {}').format(stat.minority()))
data.append(self.tr('Majority (most frequently occurring value): {}').format(stat.majority()))
data.append(self.tr('First quartile: {}').format(stat.firstQuartile()))
data.append(self.tr('Third quartile: {}').format(stat.thirdQuartile()))
data.append(self.tr('Interquartile Range (IQR): {}').format(stat.interQuartileRange()))
return data, results
def calcStringStats(self, features, feedback, field, count):
total = 100.0 / count if count else 1
stat = QgsStringStatisticalSummary()
for current, ft in enumerate(features):
if feedback.isCanceled():
break
stat.addValue(ft[field.name()])
feedback.setProgress(int(current * total))
stat.finalize()
results = {self.COUNT: stat.count(),
self.UNIQUE: stat.countDistinct(),
self.EMPTY: stat.countMissing(),
self.FILLED: stat.count() - stat.countMissing(),
self.MIN: stat.min(),
self.MAX: stat.max(),
self.MIN_LENGTH: stat.minLength(),
self.MAX_LENGTH: stat.maxLength(),
self.MEAN_LENGTH: stat.meanLength()}
data = []
data.append(self.tr('Count: {}').format(count))
data.append(self.tr('Unique values: {}').format(stat.countDistinct()))
data.append(self.tr('NULL (missing) values: {}').format(stat.countMissing()))
data.append(self.tr('Minimum value: {}').format(stat.min()))
data.append(self.tr('Maximum value: {}').format(stat.max()))
data.append(self.tr('Minimum length: {}').format(stat.minLength()))
data.append(self.tr('Maximum length: {}').format(stat.maxLength()))
data.append(self.tr('Mean length: {}').format(stat.meanLength()))
return data, results
def calcDateTimeStats(self, features, feedback, field, count):
total = 100.0 / count if count else 1
stat = QgsDateTimeStatisticalSummary()
for current, ft in enumerate(features):
if feedback.isCanceled():
break
stat.addValue(ft[field.name()])
feedback.setProgress(int(current * total))
stat.finalize()
results = {self.COUNT: stat.count(),
self.UNIQUE: stat.countDistinct(),
self.EMPTY: stat.countMissing(),
self.FILLED: stat.count() - stat.countMissing(),
self.MIN: stat.statistic(QgsDateTimeStatisticalSummary.Min),
self.MAX: stat.statistic(QgsDateTimeStatisticalSummary.Max)}
data = []
data.append(self.tr('Count: {}').format(count))
data.append(self.tr('Unique values: {}').format(stat.countDistinct()))
data.append(self.tr('NULL (missing) values: {}').format(stat.countMissing()))
data.append(
self.tr('Minimum value: {}').format(field.displayString(stat.statistic(QgsDateTimeStatisticalSummary.Min))))
data.append(
self.tr('Maximum value: {}').format(field.displayString(stat.statistic(QgsDateTimeStatisticalSummary.Max))))
return data, results
def createHTML(self, outputFile, algData):
with codecs.open(outputFile, 'w', encoding='utf-8') as f:
f.write('<html><head>\n')
f.write('<meta http-equiv="Content-Type" content="text/html; \
charset=utf-8" /></head><body>\n')
for s in algData:
f.write('<p>' + str(s) + '</p>\n')
f.write('</body></html>\n')
| gpl-2.0 |
manashmndl/kivy | kivy/gesture.py | 50 | 14632 | '''
Gesture recognition
===================
This class allows you to easily create new
gestures and compare them::
from kivy.gesture import Gesture, GestureDatabase
# Create a gesture
g = Gesture()
g.add_stroke(point_list=[(1,1), (3,4), (2,1)])
g.normalize()
# Add it to the database
gdb = GestureDatabase()
gdb.add_gesture(g)
# And for the next gesture, try to find it!
g2 = Gesture()
# ...
gdb.find(g2)
.. warning::
You don't really want to do this: it's more of an example of how
to construct gestures dynamically. Typically, you would
need a lot more points, so it's better to record gestures in a file and
reload them to compare later. Look in the examples/gestures directory for
an example of how to do that.
'''
__all__ = ('Gesture', 'GestureDatabase', 'GesturePoint', 'GestureStroke')
import pickle
import base64
import zlib
import math
from kivy.vector import Vector
from io import BytesIO
class GestureDatabase(object):
'''Class to handle a gesture database.'''
def __init__(self):
self.db = []
def add_gesture(self, gesture):
'''Add a new gesture to the database.'''
self.db.append(gesture)
def find(self, gesture, minscore=0.9, rotation_invariant=True):
'''Find a matching gesture in the database.'''
if not gesture:
return
best = None
bestscore = minscore
for g in self.db:
score = g.get_score(gesture, rotation_invariant)
if score < bestscore:
continue
bestscore = score
best = g
if not best:
return
return (bestscore, best)
def gesture_to_str(self, gesture):
'''Convert a gesture into a unique string.'''
io = BytesIO()
p = pickle.Pickler(io)
p.dump(gesture)
data = base64.b64encode(zlib.compress(io.getvalue(), 9))
return data
def str_to_gesture(self, data):
'''Convert a unique string to a gesture.'''
io = BytesIO(zlib.decompress(base64.b64decode(data)))
p = pickle.Unpickler(io)
gesture = p.load()
return gesture
class GesturePoint:
def __init__(self, x, y):
'''Stores the x,y coordinates of a point in the gesture.'''
self.x = float(x)
self.y = float(y)
def scale(self, factor):
''' Scales the point by the given factor.'''
self.x *= factor
self.y *= factor
return self
def __repr__(self):
return 'Mouse_point: %f,%f' % (self.x, self.y)
class GestureStroke:
''' Gestures can be made up of multiple strokes.'''
def __init__(self):
''' A stroke in the gesture.'''
self.points = list()
self.screenpoints = list()
# These return the min and max coordinates of the stroke
@property
def max_x(self):
if len(self.points) == 0:
return 0
return max(self.points, key=lambda pt: pt.x).x
@property
def min_x(self):
if len(self.points) == 0:
return 0
return min(self.points, key=lambda pt: pt.x).x
@property
def max_y(self):
if len(self.points) == 0:
return 0
return max(self.points, key=lambda pt: pt.y).y
@property
def min_y(self):
if len(self.points) == 0:
return 0
return min(self.points, key=lambda pt: pt.y).y
def add_point(self, x, y):
'''
add_point(x=x_pos, y=y_pos)
Adds a point to the stroke.
'''
self.points.append(GesturePoint(x, y))
self.screenpoints.append((x, y))
def scale_stroke(self, scale_factor):
'''
scale_stroke(scale_factor=float)
Scales the stroke down by scale_factor.
'''
self.points = [pt.scale(scale_factor) for pt in self.points]
def points_distance(self, point1, point2):
'''
points_distance(point1=GesturePoint, point2=GesturePoint)
Returns the distance between two GesturePoints.
'''
x = point1.x - point2.x
y = point1.y - point2.y
return math.sqrt(x * x + y * y)
def stroke_length(self, point_list=None):
'''Finds the length of the stroke. If a point list is given,
finds the length of that list.
'''
if point_list is None:
point_list = self.points
gesture_length = 0.0
if len(point_list) <= 1: # If there is only one point -> no length
return gesture_length
for i in range(len(point_list) - 1):
gesture_length += self.points_distance(
point_list[i], point_list[i + 1])
return gesture_length
def normalize_stroke(self, sample_points=32):
'''Normalizes strokes so that every stroke has a standard number of
points. Returns True if stroke is normalized, False if it can't be
normalized. sample_points controls the resolution of the stroke.
'''
# If there is only one point or the length is 0, don't normalize
if len(self.points) <= 1 or self.stroke_length(self.points) == 0.0:
return False
# Calculate how long each point should be in the stroke
target_stroke_size = \
self.stroke_length(self.points) / float(sample_points)
new_points = list()
new_points.append(self.points[0])
# We loop on the points
prev = self.points[0]
src_distance = 0.0
dst_distance = target_stroke_size
for curr in self.points[1:]:
d = self.points_distance(prev, curr)
if d > 0:
prev = curr
src_distance = src_distance + d
# The new point need to be inserted into the
# segment [prev, curr]
while dst_distance < src_distance:
x_dir = curr.x - prev.x
y_dir = curr.y - prev.y
ratio = (src_distance - dst_distance) / d
to_x = x_dir * ratio + prev.x
to_y = y_dir * ratio + prev.y
new_points.append(GesturePoint(to_x, to_y))
dst_distance = self.stroke_length(self.points) / \
float(sample_points) * len(new_points)
# If this happens, we are into troubles...
if not len(new_points) == sample_points:
raise ValueError('Invalid number of strokes points; got '
'%d while it should be %d' %
(len(new_points), sample_points))
self.points = new_points
return True
def center_stroke(self, offset_x, offset_y):
'''Centers the stroke by offseting the points.'''
for point in self.points:
point.x -= offset_x
point.y -= offset_y
class Gesture:
'''A python implementation of a gesture recognition algorithm by
Oleg Dopertchouk: http://www.gamedev.net/reference/articles/article2039.asp
Implemented by Jeiel Aranal ([email protected]),
released into the public domain.
'''
# Tolerance for evaluation using the '==' operator
DEFAULT_TOLERANCE = 0.1
def __init__(self, tolerance=None):
'''
Gesture([tolerance=float])
Creates a new gesture with an optional matching tolerance value.
'''
self.width = 0.
self.height = 0.
self.gesture_product = 0.
self.strokes = list()
if tolerance is None:
self.tolerance = Gesture.DEFAULT_TOLERANCE
else:
self.tolerance = tolerance
def _scale_gesture(self):
''' Scales down the gesture to a unit of 1.'''
# map() creates a list of min/max coordinates of the strokes
# in the gesture and min()/max() pulls the lowest/highest value
min_x = min([stroke.min_x for stroke in self.strokes])
max_x = max([stroke.max_x for stroke in self.strokes])
min_y = min([stroke.min_y for stroke in self.strokes])
max_y = max([stroke.max_y for stroke in self.strokes])
x_len = max_x - min_x
self.width = x_len
y_len = max_y - min_y
self.height = y_len
scale_factor = max(x_len, y_len)
if scale_factor <= 0.0:
return False
scale_factor = 1.0 / scale_factor
for stroke in self.strokes:
stroke.scale_stroke(scale_factor)
return True
def _center_gesture(self):
''' Centers the Gesture.points of the gesture.'''
total_x = 0.0
total_y = 0.0
total_points = 0
for stroke in self.strokes:
# adds up all the points inside the stroke
stroke_y = sum([pt.y for pt in stroke.points])
stroke_x = sum([pt.x for pt in stroke.points])
total_y += stroke_y
total_x += stroke_x
total_points += len(stroke.points)
if total_points == 0:
return False
# Average to get the offset
total_x /= total_points
total_y /= total_points
# Apply the offset to the strokes
for stroke in self.strokes:
stroke.center_stroke(total_x, total_y)
return True
def add_stroke(self, point_list=None):
'''Adds a stroke to the gesture and returns the Stroke instance.
Optional point_list argument is a list of the mouse points for
the stroke.
'''
self.strokes.append(GestureStroke())
if isinstance(point_list, list) or isinstance(point_list, tuple):
for point in point_list:
if isinstance(point, GesturePoint):
self.strokes[-1].points.append(point)
elif isinstance(point, list) or isinstance(point, tuple):
if len(point) != 2:
raise ValueError("Stroke entry must have 2 values max")
self.strokes[-1].add_point(point[0], point[1])
else:
raise TypeError("The point list should either be "
"tuples of x and y or a list of "
"GesturePoint objects")
elif point_list is not None:
raise ValueError("point_list should be a tuple/list")
return self.strokes[-1]
def normalize(self, stroke_samples=32):
'''Runs the gesture normalization algorithm and calculates the dot
product with self.
'''
if not self._scale_gesture() or not self._center_gesture():
self.gesture_product = False
return False
for stroke in self.strokes:
stroke.normalize_stroke(stroke_samples)
self.gesture_product = self.dot_product(self)
def get_rigid_rotation(self, dstpts):
'''
Extract the rotation to apply to a group of points to minimize the
distance to a second group of points. The two groups of points are
assumed to be centered. This is a simple version that just picks
an angle based on the first point of the gesture.
'''
if len(self.strokes) < 1 or len(self.strokes[0].points) < 1:
return 0
if len(dstpts.strokes) < 1 or len(dstpts.strokes[0].points) < 1:
return 0
p = dstpts.strokes[0].points[0]
target = Vector([p.x, p.y])
source = Vector([p.x, p.y])
return source.angle(target)
def dot_product(self, comparison_gesture):
''' Calculates the dot product of the gesture with another gesture.'''
if len(comparison_gesture.strokes) != len(self.strokes):
return -1
if getattr(comparison_gesture, 'gesture_product', True) is False or \
getattr(self, 'gesture_product', True) is False:
return -1
dot_product = 0.0
for stroke_index, (my_stroke, cmp_stroke) in enumerate(
list(zip(self.strokes, comparison_gesture.strokes))):
for pt_index, (my_point, cmp_point) in enumerate(
list(zip(my_stroke.points, cmp_stroke.points))):
dot_product += (my_point.x * cmp_point.x +
my_point.y * cmp_point.y)
return dot_product
def rotate(self, angle):
g = Gesture()
for stroke in self.strokes:
tmp = []
for j in stroke.points:
v = Vector([j.x, j.y]).rotate(angle)
tmp.append(v)
g.add_stroke(tmp)
g.gesture_product = g.dot_product(g)
return g
def get_score(self, comparison_gesture, rotation_invariant=True):
''' Returns the matching score of the gesture against another gesture.
'''
if isinstance(comparison_gesture, Gesture):
if rotation_invariant:
# get orientation
angle = self.get_rigid_rotation(comparison_gesture)
# rotate the gesture to be in the same frame.
comparison_gesture = comparison_gesture.rotate(angle)
# this is the normal "orientation" code.
score = self.dot_product(comparison_gesture)
if score <= 0:
return score
score /= math.sqrt(
self.gesture_product * comparison_gesture.gesture_product)
return score
def __eq__(self, comparison_gesture):
''' Allows easy comparisons between gesture instances.'''
if isinstance(comparison_gesture, Gesture):
# If the gestures don't have the same number of strokes, its
# definitely not the same gesture
score = self.get_score(comparison_gesture)
if (score > (1.0 - self.tolerance) and
score < (1.0 + self.tolerance)):
return True
else:
return False
else:
return NotImplemented
def __ne__(self, comparison_gesture):
result = self.__eq__(comparison_gesture)
if result is NotImplemented:
return result
else:
return not result
def __lt__(self, comparison_gesture):
raise TypeError("Gesture cannot be evaluated with <")
def __gt__(self, comparison_gesture):
raise TypeError("Gesture cannot be evaluated with >")
def __le__(self, comparison_gesture):
raise TypeError("Gesture cannot be evaluated with <=")
def __ge__(self, comparison_gesture):
raise TypeError("Gesture cannot be evaluated with >=")
| mit |
chenc10/Spark-PAF | ec2/lib/boto-2.34.0/boto/s3/user.py | 171 | 1968 | # Copyright (c) 2006,2007 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class User(object):
def __init__(self, parent=None, id='', display_name=''):
if parent:
parent.owner = self
self.type = None
self.id = id
self.display_name = display_name
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'DisplayName':
self.display_name = value
elif name == 'ID':
self.id = value
else:
setattr(self, name, value)
def to_xml(self, element_name='Owner'):
if self.type:
s = '<%s xsi:type="%s">' % (element_name, self.type)
else:
s = '<%s>' % element_name
s += '<ID>%s</ID>' % self.id
s += '<DisplayName>%s</DisplayName>' % self.display_name
s += '</%s>' % element_name
return s
| apache-2.0 |
pulinagrawal/nupic | scripts/run_swarm.py | 9 | 7871 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
""" @file run_swarm.py
This script is the command-line interface for running swarms in nupic."""
import sys
import os
import optparse
from nupic.swarming import permutations_runner
from nupic.swarming.permutations_runner import DEFAULT_OPTIONS
def runPermutations(args):
"""
The main function of the RunPermutations utility.
This utility will automatically generate and run multiple prediction framework
experiments that are permutations of a base experiment via the Grok engine.
For example, if you have an experiment that you want to test with 3 possible
values of variable A and 2 possible values of variable B, this utility will
automatically generate the experiment directories and description files for
each of the 6 different experiments.
Here is an example permutations file which is read by this script below. The
permutations file must be in the same directory as the description.py for the
base experiment that you want to permute. It contains a permutations dict, an
optional list of the result items to report on for each experiment, and an
optional result item to optimize for.
When an 'optimize' entry is provided, this tool will attempt to prioritize the
order in which the various permutations are run in order to improve the odds
of running the best permutations sooner. It does this by watching the results
for various parameter values and putting parameter values that give generally
better results at the head of the queue.
In addition, when the optimize key is provided, we periodically update the UI
with the best results obtained so far on that metric.
---------------------------------------------------------------------------
permutations = dict(
iterationCount = [1000, 5000],
coincCount = [50, 100],
trainTP = [False],
)
report = ['.*reconstructErrAvg',
'.*inputPredScore.*',
]
optimize = 'postProc_gym1_baseline:inputPredScore'
Parameters:
----------------------------------------------------------------------
args: Command-line args; the equivalent of sys.argv[1:]
retval: for the actions 'run', 'pickup', and 'dryRun', returns the
Hypersearch job ID (in ClinetJobs table); otherwise returns
None
"""
helpString = (
"\n\n%prog [options] permutationsScript\n"
"%prog [options] expDescription.json\n\n"
"This script runs permutations of an experiment via Grok engine, as "
"defined in a\npermutations.py script or an expGenerator experiment "
"description json file.\nIn the expDescription.json form, the json file "
"MUST have the file extension\n'.json' and MUST conform to "
"expGenerator/experimentDescriptionSchema.json.")
parser = optparse.OptionParser(usage=helpString)
parser.add_option(
"--replaceReport", dest="replaceReport", action="store_true",
default=DEFAULT_OPTIONS["replaceReport"],
help="Replace existing csv report file if it exists. Default is to "
"append to the existing file. [default: %default].")
parser.add_option(
"--action", dest="action", default=DEFAULT_OPTIONS["action"],
choices=["run", "pickup", "report", "dryRun"],
help="Which action to perform. Possible actions are run, pickup, choices, "
"report, list. "
"run: run a new HyperSearch via Grok. "
"pickup: pick up the latest run of a HyperSearch job. "
"dryRun: run a single HypersearchWorker inline within the application "
"process without the Grok infrastructure to flush out bugs in "
"description and permutations scripts; defaults to "
"maxPermutations=1: use --maxPermutations to change this; "
"report: just print results from the last or current run. "
"[default: %default].")
parser.add_option(
"--maxPermutations", dest="maxPermutations",
default=DEFAULT_OPTIONS["maxPermutations"], type="int",
help="Maximum number of models to search. Applies only to the 'run' and "
"'dryRun' actions. [default: %default].")
parser.add_option(
"--exports", dest="exports", default=DEFAULT_OPTIONS["exports"],
type="string",
help="json dump of environment variable settings that should be applied"
"for the job before running. [default: %default].")
parser.add_option(
"--useTerminators", dest="useTerminators", action="store_true",
default=DEFAULT_OPTIONS["useTerminators"], help="Use early model terminators in HyperSearch"
"[default: %default].")
parser.add_option(
"--maxWorkers", dest="maxWorkers", default=DEFAULT_OPTIONS["maxWorkers"],
type="int",
help="Maximum number of concurrent workers to launch. Applies only to "
"the 'run' action. [default: %default].")
parser.add_option(
"-v", dest="verbosityCount", action="count", default=0,
help="Increase verbosity of the output. Specify multiple times for "
"increased verbosity. e.g., -vv is more verbose than -v.")
parser.add_option(
"--timeout", dest="timeout", default=DEFAULT_OPTIONS["timeout"], type="int",
help="Time out for this search in minutes"
"[default: %default].")
parser.add_option(
"--overwrite", default=DEFAULT_OPTIONS["overwrite"], action="store_true",
help="If 'yes', overwrite existing description.py and permutations.py"
" (in the same directory as the <expDescription.json> file) if they"
" already exist. [default: %default].")
parser.add_option(
"--genTopNDescriptions", dest="genTopNDescriptions",
default=DEFAULT_OPTIONS["genTopNDescriptions"], type="int",
help="Generate description files for the top N models. Each one will be"
" placed into it's own subdirectory under the base description file."
"[default: %default].")
(options, positionalArgs) = parser.parse_args(args)
# Get the permutations script's filepath
if len(positionalArgs) != 1:
parser.error("You must supply the name of exactly one permutations script "
"or JSON description file.")
fileArgPath = os.path.expanduser(positionalArgs[0])
fileArgPath = os.path.expandvars(fileArgPath)
fileArgPath = os.path.abspath(fileArgPath)
permWorkDir = os.path.dirname(fileArgPath)
outputLabel = os.path.splitext(os.path.basename(fileArgPath))[0]
basename = os.path.basename(fileArgPath)
fileExtension = os.path.splitext(basename)[1]
optionsDict = vars(options)
if fileExtension == ".json":
returnValue = permutations_runner.runWithJsonFile(
fileArgPath, optionsDict, outputLabel, permWorkDir)
else:
returnValue = permutations_runner.runWithPermutationsScript(
fileArgPath, optionsDict, outputLabel, permWorkDir)
return returnValue
if __name__ == "__main__":
runPermutations(sys.argv[1:])
| agpl-3.0 |
alqfahad/odoo | addons/auth_openid/utils.py | 428 | 1589 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-2012 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
KEY_LENGTH = 16
SREG2AX = { # from http://www.axschema.org/types/#sreg
'nickname': 'http://axschema.org/namePerson/friendly',
'email': 'http://axschema.org/contact/email',
'fullname': 'http://axschema.org/namePerson',
'dob': 'http://axschema.org/birthDate',
'gender': 'http://axschema.org/person/gender',
'postcode': 'http://axschema.org/contact/postalCode/home',
'country': 'http://axschema.org/contact/country/home',
'language': 'http://axschema.org/pref/language',
'timezone': 'http://axschema.org/pref/timezone',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
TwinkleChawla/nova | nova/db/sqlalchemy/migrate_repo/versions/300_migration_context.py | 44 | 1117 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Column
from sqlalchemy import MetaData
from sqlalchemy import Table
from sqlalchemy import Text
BASE_TABLE_NAME = 'instance_extra'
NEW_COLUMN_NAME = 'migration_context'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
for prefix in ('', 'shadow_'):
table = Table(prefix + BASE_TABLE_NAME, meta, autoload=True)
new_column = Column(NEW_COLUMN_NAME, Text, nullable=True)
if not hasattr(table.c, NEW_COLUMN_NAME):
table.create_column(new_column)
| apache-2.0 |
msingh172/youtube-dl | youtube_dl/downloader/common.py | 95 | 13848 | from __future__ import division, unicode_literals
import os
import re
import sys
import time
from ..compat import compat_str
from ..utils import (
encodeFilename,
decodeArgument,
format_bytes,
timeconvert,
)
class FileDownloader(object):
"""File Downloader class.
File downloader objects are the ones responsible of downloading the
actual video file and writing it to disk.
File downloaders accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of
options instead.
Available options:
verbose: Print additional info to stdout.
quiet: Do not print messages to stdout.
ratelimit: Download speed limit, in bytes/sec.
retries: Number of times to retry for HTTP error 5xx
buffersize: Size of download buffer in bytes.
noresizebuffer: Do not automatically resize the download buffer.
continuedl: Try to continue downloads if possible.
noprogress: Do not print the progress bar.
logtostderr: Log messages to stderr instead of stdout.
consoletitle: Display progress in console window's titlebar.
nopart: Do not use temporary .part files.
updatetime: Use the Last-modified header to set output file timestamps.
test: Download only first bytes to test the downloader.
min_filesize: Skip files smaller than this size
max_filesize: Skip files larger than this size
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
(experimenatal)
external_downloader_args: A list of additional command-line arguments for the
external downloader.
Subclasses of this one must re-define the real_download method.
"""
_TEST_FILE_SIZE = 10241
params = None
def __init__(self, ydl, params):
"""Create a FileDownloader object with the given options."""
self.ydl = ydl
self._progress_hooks = []
self.params = params
self.add_progress_hook(self.report_progress)
@staticmethod
def format_seconds(seconds):
(mins, secs) = divmod(seconds, 60)
(hours, mins) = divmod(mins, 60)
if hours > 99:
return '--:--:--'
if hours == 0:
return '%02d:%02d' % (mins, secs)
else:
return '%02d:%02d:%02d' % (hours, mins, secs)
@staticmethod
def calc_percent(byte_counter, data_len):
if data_len is None:
return None
return float(byte_counter) / float(data_len) * 100.0
@staticmethod
def format_percent(percent):
if percent is None:
return '---.-%'
return '%6s' % ('%3.1f%%' % percent)
@staticmethod
def calc_eta(start, now, total, current):
if total is None:
return None
if now is None:
now = time.time()
dif = now - start
if current == 0 or dif < 0.001: # One millisecond
return None
rate = float(current) / dif
return int((float(total) - float(current)) / rate)
@staticmethod
def format_eta(eta):
if eta is None:
return '--:--'
return FileDownloader.format_seconds(eta)
@staticmethod
def calc_speed(start, now, bytes):
dif = now - start
if bytes == 0 or dif < 0.001: # One millisecond
return None
return float(bytes) / dif
@staticmethod
def format_speed(speed):
if speed is None:
return '%10s' % '---b/s'
return '%10s' % ('%s/s' % format_bytes(speed))
@staticmethod
def best_block_size(elapsed_time, bytes):
new_min = max(bytes / 2.0, 1.0)
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
if elapsed_time < 0.001:
return int(new_max)
rate = bytes / elapsed_time
if rate > new_max:
return int(new_max)
if rate < new_min:
return int(new_min)
return int(rate)
@staticmethod
def parse_bytes(bytestr):
"""Parse a string indicating a byte quantity into an integer."""
matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
if matchobj is None:
return None
number = float(matchobj.group(1))
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
return int(round(number * multiplier))
def to_screen(self, *args, **kargs):
self.ydl.to_screen(*args, **kargs)
def to_stderr(self, message):
self.ydl.to_screen(message)
def to_console_title(self, message):
self.ydl.to_console_title(message)
def trouble(self, *args, **kargs):
self.ydl.trouble(*args, **kargs)
def report_warning(self, *args, **kargs):
self.ydl.report_warning(*args, **kargs)
def report_error(self, *args, **kargs):
self.ydl.report_error(*args, **kargs)
def slow_down(self, start_time, now, byte_counter):
"""Sleep if the download speed is over the rate limit."""
rate_limit = self.params.get('ratelimit', None)
if rate_limit is None or byte_counter == 0:
return
if now is None:
now = time.time()
elapsed = now - start_time
if elapsed <= 0.0:
return
speed = float(byte_counter) / elapsed
if speed > rate_limit:
time.sleep(max((byte_counter // rate_limit) - elapsed, 0))
def temp_name(self, filename):
"""Returns a temporary filename for the given filename."""
if self.params.get('nopart', False) or filename == '-' or \
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
return filename
return filename + '.part'
def undo_temp_name(self, filename):
if filename.endswith('.part'):
return filename[:-len('.part')]
return filename
def try_rename(self, old_filename, new_filename):
try:
if old_filename == new_filename:
return
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
except (IOError, OSError) as err:
self.report_error('unable to rename file: %s' % compat_str(err))
def try_utime(self, filename, last_modified_hdr):
"""Try to set the last-modified time of the given file."""
if last_modified_hdr is None:
return
if not os.path.isfile(encodeFilename(filename)):
return
timestr = last_modified_hdr
if timestr is None:
return
filetime = timeconvert(timestr)
if filetime is None:
return filetime
# Ignore obviously invalid dates
if filetime == 0:
return
try:
os.utime(filename, (time.time(), filetime))
except Exception:
pass
return filetime
def report_destination(self, filename):
"""Report destination filename."""
self.to_screen('[download] Destination: ' + filename)
def _report_progress_status(self, msg, is_last_line=False):
fullmsg = '[download] ' + msg
if self.params.get('progress_with_newline', False):
self.to_screen(fullmsg)
else:
if os.name == 'nt':
prev_len = getattr(self, '_report_progress_prev_line_length',
0)
if prev_len > len(fullmsg):
fullmsg += ' ' * (prev_len - len(fullmsg))
self._report_progress_prev_line_length = len(fullmsg)
clear_line = '\r'
else:
clear_line = ('\r\x1b[K' if sys.stderr.isatty() else '\r')
self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
self.to_console_title('youtube-dl ' + msg)
def report_progress(self, s):
if s['status'] == 'finished':
if self.params.get('noprogress', False):
self.to_screen('[download] Download completed')
else:
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
if s.get('elapsed') is not None:
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
msg_template = '100%% of %(_total_bytes_str)s in %(_elapsed_str)s'
else:
msg_template = '100%% of %(_total_bytes_str)s'
self._report_progress_status(
msg_template % s, is_last_line=True)
if self.params.get('noprogress'):
return
if s['status'] != 'downloading':
return
if s.get('eta') is not None:
s['_eta_str'] = self.format_eta(s['eta'])
else:
s['_eta_str'] = 'Unknown ETA'
if s.get('total_bytes') and s.get('downloaded_bytes') is not None:
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes'])
elif s.get('total_bytes_estimate') and s.get('downloaded_bytes') is not None:
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes_estimate'])
else:
if s.get('downloaded_bytes') == 0:
s['_percent_str'] = self.format_percent(0)
else:
s['_percent_str'] = 'Unknown %'
if s.get('speed') is not None:
s['_speed_str'] = self.format_speed(s['speed'])
else:
s['_speed_str'] = 'Unknown speed'
if s.get('total_bytes') is not None:
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
msg_template = '%(_percent_str)s of %(_total_bytes_str)s at %(_speed_str)s ETA %(_eta_str)s'
elif s.get('total_bytes_estimate') is not None:
s['_total_bytes_estimate_str'] = format_bytes(s['total_bytes_estimate'])
msg_template = '%(_percent_str)s of ~%(_total_bytes_estimate_str)s at %(_speed_str)s ETA %(_eta_str)s'
else:
if s.get('downloaded_bytes') is not None:
s['_downloaded_bytes_str'] = format_bytes(s['downloaded_bytes'])
if s.get('elapsed'):
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s (%(_elapsed_str)s)'
else:
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s'
else:
msg_template = '%(_percent_str)s % at %(_speed_str)s ETA %(_eta_str)s'
self._report_progress_status(msg_template % s)
def report_resuming_byte(self, resume_len):
"""Report attempt to resume at given byte."""
self.to_screen('[download] Resuming download at byte %s' % resume_len)
def report_retry(self, count, retries):
"""Report retry in case of HTTP error 5xx"""
self.to_screen('[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
try:
self.to_screen('[download] %s has already been downloaded' % file_name)
except UnicodeEncodeError:
self.to_screen('[download] The file has already been downloaded')
def report_unable_to_resume(self):
"""Report it was impossible to resume download."""
self.to_screen('[download] Unable to resume')
def download(self, filename, info_dict):
"""Download to a filename using the info from info_dict
Return True on success and False otherwise
"""
nooverwrites_and_exists = (
self.params.get('nooverwrites', False) and
os.path.exists(encodeFilename(filename))
)
continuedl_and_exists = (
self.params.get('continuedl', True) and
os.path.isfile(encodeFilename(filename)) and
not self.params.get('nopart', False)
)
# Check file already present
if filename != '-' and nooverwrites_and_exists or continuedl_and_exists:
self.report_file_already_downloaded(filename)
self._hook_progress({
'filename': filename,
'status': 'finished',
'total_bytes': os.path.getsize(encodeFilename(filename)),
})
return True
sleep_interval = self.params.get('sleep_interval')
if sleep_interval:
self.to_screen('[download] Sleeping %s seconds...' % sleep_interval)
time.sleep(sleep_interval)
return self.real_download(filename, info_dict)
def real_download(self, filename, info_dict):
"""Real download process. Redefine in subclasses."""
raise NotImplementedError('This method must be implemented by subclasses')
def _hook_progress(self, status):
for ph in self._progress_hooks:
ph(status)
def add_progress_hook(self, ph):
# See YoutubeDl.py (search for progress_hooks) for a description of
# this interface
self._progress_hooks.append(ph)
def _debug_cmd(self, args, exe=None):
if not self.params.get('verbose', False):
return
str_args = [decodeArgument(a) for a in args]
if exe is None:
exe = os.path.basename(str_args[0])
try:
import pipes
shell_quote = lambda args: ' '.join(map(pipes.quote, str_args))
except ImportError:
shell_quote = repr
self.to_screen('[debug] %s command line: %s' % (
exe, shell_quote(str_args)))
| unlicense |
armink/rt-thread | bsp/k210/rtconfig.py | 6 | 1560 | import os
# toolchains options
ARCH ='risc-v'
CPU ='k210'
CROSS_TOOL ='gcc'
if os.getenv('RTT_ROOT'):
RTT_ROOT = os.getenv('RTT_ROOT')
else:
RTT_ROOT = r'../..'
if os.getenv('RTT_CC'):
CROSS_TOOL = os.getenv('RTT_CC')
if CROSS_TOOL == 'gcc':
PLATFORM = 'gcc'
EXEC_PATH = r'/opt/gnu-mcu-eclipse/riscv-none-gcc/8.2.0-2.1-20190425-1021/bin'
else:
print('Please make sure your toolchains is GNU GCC!')
exit(0)
if os.getenv('RTT_EXEC_PATH'):
EXEC_PATH = os.getenv('RTT_EXEC_PATH')
BUILD = 'release'
if PLATFORM == 'gcc':
# toolchains
PREFIX = 'riscv-none-embed-'
CC = PREFIX + 'gcc'
CXX = PREFIX + 'g++'
AS = PREFIX + 'gcc'
AR = PREFIX + 'ar'
LINK = PREFIX + 'gcc'
TARGET_EXT = 'elf'
SIZE = PREFIX + 'size'
OBJDUMP = PREFIX + 'objdump'
OBJCPY = PREFIX + 'objcopy'
DEVICE = ' -mcmodel=medany -march=rv64imafc -mabi=lp64f -fsingle-precision-constant'
CFLAGS = DEVICE + ' -fno-common -ffunction-sections -fdata-sections -fstrict-volatile-bitfields'
AFLAGS = ' -c' + DEVICE + ' -x assembler-with-cpp'
LFLAGS = DEVICE + ' -nostartfiles -Wl,--gc-sections,-Map=rtthread.map,-cref,-u,_start -T link.lds'
CPATH = ''
LPATH = ''
if BUILD == 'debug':
CFLAGS += ' -O0 -ggdb'
AFLAGS += ' -ggdb'
else:
CFLAGS += ' -O2 -Os'
CXXFLAGS = CFLAGS
DUMP_ACTION = OBJDUMP + ' -D -S $TARGET > rtt.asm\n'
POST_ACTION = OBJCPY + ' -O binary $TARGET rtthread.bin\n' + SIZE + ' $TARGET \n'
| apache-2.0 |
ChenJunor/hue | desktop/core/ext-py/Django-1.6.10/django/db/backends/oracle/creation.py | 104 | 12331 | import sys
import time
from django.conf import settings
from django.db.backends.creation import BaseDatabaseCreation
from django.utils.six.moves import input
TEST_DATABASE_PREFIX = 'test_'
PASSWORD = 'Im_a_lumberjack'
class DatabaseCreation(BaseDatabaseCreation):
# This dictionary maps Field objects to their associated Oracle column
# types, as strings. Column-type strings can contain format strings; they'll
# be interpolated against the values of Field.__dict__ before being output.
# If a column type is set to None, it won't be included in the output.
#
# Any format strings starting with "qn_" are quoted before being used in the
# output (the "qn_" prefix is stripped before the lookup is performed.
data_types = {
'AutoField': 'NUMBER(11)',
'BinaryField': 'BLOB',
'BooleanField': 'NUMBER(1) CHECK (%(qn_column)s IN (0,1))',
'CharField': 'NVARCHAR2(%(max_length)s)',
'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)',
'DateField': 'DATE',
'DateTimeField': 'TIMESTAMP',
'DecimalField': 'NUMBER(%(max_digits)s, %(decimal_places)s)',
'FileField': 'NVARCHAR2(%(max_length)s)',
'FilePathField': 'NVARCHAR2(%(max_length)s)',
'FloatField': 'DOUBLE PRECISION',
'IntegerField': 'NUMBER(11)',
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'GenericIPAddressField': 'VARCHAR2(39)',
'NullBooleanField': 'NUMBER(1) CHECK ((%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL))',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
'PositiveSmallIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
'TimeField': 'TIMESTAMP',
'URLField': 'VARCHAR2(%(max_length)s)',
}
def __init__(self, connection):
super(DatabaseCreation, self).__init__(connection)
def _create_test_db(self, verbosity=1, autoclobber=False):
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': TEST_PASSWD,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
cursor = self.connection.cursor()
if self._test_database_create():
try:
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = input("It appears the test database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_NAME)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test database '%s'..." % self.connection.alias)
self._execute_test_db_destruction(cursor, parameters, verbosity)
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
if self._test_user_create():
if verbosity >= 1:
print("Creating test user...")
try:
self._create_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error creating the test user: %s\n" % e)
if not autoclobber:
confirm = input("It appears the test user, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_USER)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test user...")
self._destroy_test_user(cursor, parameters, verbosity)
if verbosity >= 1:
print("Creating test user...")
self._create_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error recreating the test user: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
real_settings = settings.DATABASES[self.connection.alias]
real_settings['SAVED_USER'] = self.connection.settings_dict['SAVED_USER'] = self.connection.settings_dict['USER']
real_settings['SAVED_PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD'] = self.connection.settings_dict['PASSWORD']
real_settings['TEST_USER'] = real_settings['USER'] = self.connection.settings_dict['TEST_USER'] = self.connection.settings_dict['USER'] = TEST_USER
real_settings['PASSWORD'] = self.connection.settings_dict['PASSWORD'] = TEST_PASSWD
return self.connection.settings_dict['NAME']
def _destroy_test_db(self, test_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER']
self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD']
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': TEST_PASSWD,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
cursor = self.connection.cursor()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
if self._test_user_create():
if verbosity >= 1:
print('Destroying test user...')
self._destroy_test_user(cursor, parameters, verbosity)
if self._test_database_create():
if verbosity >= 1:
print('Destroying test database tables...')
self._execute_test_db_destruction(cursor, parameters, verbosity)
self.connection.close()
def _execute_test_db_creation(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_create_test_db(): dbname = %s" % parameters['dbname'])
statements = [
"""CREATE TABLESPACE %(tblspace)s
DATAFILE '%(tblspace)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 200M
""",
"""CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
TEMPFILE '%(tblspace_temp)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 100M
""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _create_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_create_test_user(): username = %s" % parameters['user'])
statements = [
"""CREATE USER %(user)s
IDENTIFIED BY %(password)s
DEFAULT TABLESPACE %(tblspace)s
TEMPORARY TABLESPACE %(tblspace_temp)s
QUOTA UNLIMITED ON %(tblspace)s
""",
"""GRANT CONNECT, RESOURCE TO %(user)s""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_test_db_destruction(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_execute_test_db_destruction(): dbname=%s" % parameters['dbname'])
statements = [
'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _destroy_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_destroy_test_user(): user=%s" % parameters['user'])
print("Be patient. This can take some time...")
statements = [
'DROP USER %(user)s CASCADE',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_statements(self, cursor, statements, parameters, verbosity):
for template in statements:
stmt = template % parameters
if verbosity >= 2:
print(stmt)
try:
cursor.execute(stmt)
except Exception as err:
sys.stderr.write("Failed (%s)\n" % (err))
raise
def _test_database_name(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_NAME']:
name = self.connection.settings_dict['TEST_NAME']
except AttributeError:
pass
return name
def _test_database_create(self):
return self.connection.settings_dict.get('TEST_CREATE', True)
def _test_user_create(self):
return self.connection.settings_dict.get('TEST_USER_CREATE', True)
def _test_database_user(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['USER']
try:
if self.connection.settings_dict['TEST_USER']:
name = self.connection.settings_dict['TEST_USER']
except KeyError:
pass
return name
def _test_database_passwd(self):
name = PASSWORD
try:
if self.connection.settings_dict['TEST_PASSWD']:
name = self.connection.settings_dict['TEST_PASSWD']
except KeyError:
pass
return name
def _test_database_tblspace(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_TBLSPACE']:
name = self.connection.settings_dict['TEST_TBLSPACE']
except KeyError:
pass
return name
def _test_database_tblspace_tmp(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME'] + '_temp'
try:
if self.connection.settings_dict['TEST_TBLSPACE_TMP']:
name = self.connection.settings_dict['TEST_TBLSPACE_TMP']
except KeyError:
pass
return name
def _get_test_db_name(self):
"""
We need to return the 'production' DB name to get the test DB creation
machinery to work. This isn't a great deal in this case because DB
names as handled by Django haven't real counterparts in Oracle.
"""
return self.connection.settings_dict['NAME']
def test_db_signature(self):
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME'],
self._test_database_user(),
)
| apache-2.0 |
bjolivot/ansible | lib/ansible/plugins/action/iosxr_config.py | 79 | 4164 | #
# (c) 2017, Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.iosxr import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w').write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
| gpl-3.0 |
MicronSW/linux-zynq | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
home-assistant/home-assistant | homeassistant/components/jewish_calendar/sensor.py | 5 | 6627 | """Platform to retrieve Jewish calendar information for Home Assistant."""
import logging
import hdate
from homeassistant.components.sensor import SensorEntity
from homeassistant.const import DEVICE_CLASS_TIMESTAMP, SUN_EVENT_SUNSET
from homeassistant.helpers.sun import get_astral_event_date
import homeassistant.util.dt as dt_util
from . import DOMAIN, SENSOR_TYPES
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Jewish calendar sensor platform."""
if discovery_info is None:
return
sensors = [
JewishCalendarSensor(hass.data[DOMAIN], sensor, sensor_info)
for sensor, sensor_info in SENSOR_TYPES["data"].items()
]
sensors.extend(
JewishCalendarTimeSensor(hass.data[DOMAIN], sensor, sensor_info)
for sensor, sensor_info in SENSOR_TYPES["time"].items()
)
async_add_entities(sensors)
class JewishCalendarSensor(SensorEntity):
"""Representation of an Jewish calendar sensor."""
def __init__(self, data, sensor, sensor_info):
"""Initialize the Jewish calendar sensor."""
self._location = data["location"]
self._type = sensor
self._name = f"{data['name']} {sensor_info[0]}"
self._icon = sensor_info[1]
self._hebrew = data["language"] == "hebrew"
self._candle_lighting_offset = data["candle_lighting_offset"]
self._havdalah_offset = data["havdalah_offset"]
self._diaspora = data["diaspora"]
self._state = None
self._prefix = data["prefix"]
self._holiday_attrs = {}
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self) -> str:
"""Generate a unique id."""
return f"{self._prefix}_{self._type}"
@property
def icon(self):
"""Icon to display in the front end."""
return self._icon
@property
def state(self):
"""Return the state of the sensor."""
return self._state
async def async_update(self):
"""Update the state of the sensor."""
now = dt_util.now()
_LOGGER.debug("Now: %s Location: %r", now, self._location)
today = now.date()
sunset = dt_util.as_local(
get_astral_event_date(self.hass, SUN_EVENT_SUNSET, today)
)
_LOGGER.debug("Now: %s Sunset: %s", now, sunset)
daytime_date = hdate.HDate(today, diaspora=self._diaspora, hebrew=self._hebrew)
# The Jewish day starts after darkness (called "tzais") and finishes at
# sunset ("shkia"). The time in between is a gray area (aka "Bein
# Hashmashot" - literally: "in between the sun and the moon").
# For some sensors, it is more interesting to consider the date to be
# tomorrow based on sunset ("shkia"), for others based on "tzais".
# Hence the following variables.
after_tzais_date = after_shkia_date = daytime_date
today_times = self.make_zmanim(today)
if now > sunset:
after_shkia_date = daytime_date.next_day
if today_times.havdalah and now > today_times.havdalah:
after_tzais_date = daytime_date.next_day
self._state = self.get_state(daytime_date, after_shkia_date, after_tzais_date)
_LOGGER.debug("New value for %s: %s", self._type, self._state)
def make_zmanim(self, date):
"""Create a Zmanim object."""
return hdate.Zmanim(
date=date,
location=self._location,
candle_lighting_offset=self._candle_lighting_offset,
havdalah_offset=self._havdalah_offset,
hebrew=self._hebrew,
)
@property
def extra_state_attributes(self):
"""Return the state attributes."""
if self._type != "holiday":
return {}
return self._holiday_attrs
def get_state(self, daytime_date, after_shkia_date, after_tzais_date):
"""For a given type of sensor, return the state."""
# Terminology note: by convention in py-libhdate library, "upcoming"
# refers to "current" or "upcoming" dates.
if self._type == "date":
return after_shkia_date.hebrew_date
if self._type == "weekly_portion":
# Compute the weekly portion based on the upcoming shabbat.
return after_tzais_date.upcoming_shabbat.parasha
if self._type == "holiday":
self._holiday_attrs["id"] = after_shkia_date.holiday_name
self._holiday_attrs["type"] = after_shkia_date.holiday_type.name
self._holiday_attrs["type_id"] = after_shkia_date.holiday_type.value
return after_shkia_date.holiday_description
if self._type == "omer_count":
return after_shkia_date.omer_day
if self._type == "daf_yomi":
return daytime_date.daf_yomi
return None
class JewishCalendarTimeSensor(JewishCalendarSensor):
"""Implement attrbutes for sensors returning times."""
@property
def state(self):
"""Return the state of the sensor."""
return dt_util.as_utc(self._state) if self._state is not None else None
@property
def device_class(self):
"""Return the class of this sensor."""
return DEVICE_CLASS_TIMESTAMP
@property
def extra_state_attributes(self):
"""Return the state attributes."""
attrs = {}
if self._state is None:
return attrs
return attrs
def get_state(self, daytime_date, after_shkia_date, after_tzais_date):
"""For a given type of sensor, return the state."""
if self._type == "upcoming_shabbat_candle_lighting":
times = self.make_zmanim(
after_tzais_date.upcoming_shabbat.previous_day.gdate
)
return times.candle_lighting
if self._type == "upcoming_candle_lighting":
times = self.make_zmanim(
after_tzais_date.upcoming_shabbat_or_yom_tov.first_day.previous_day.gdate
)
return times.candle_lighting
if self._type == "upcoming_shabbat_havdalah":
times = self.make_zmanim(after_tzais_date.upcoming_shabbat.gdate)
return times.havdalah
if self._type == "upcoming_havdalah":
times = self.make_zmanim(
after_tzais_date.upcoming_shabbat_or_yom_tov.last_day.gdate
)
return times.havdalah
times = self.make_zmanim(dt_util.now()).zmanim
return times[self._type]
| apache-2.0 |
sharkykh/SickRage | sickbeard/databases/cache_db.py | 5 | 5192 | # coding=utf-8
# Author: Nic Wolfe <[email protected]>
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from sickbeard import db
# Add new migrations at the bottom of the list; subclass the previous migration.
class InitialSchema(db.SchemaUpgrade):
def test(self):
return self.hasTable("db_version")
def execute(self):
queries = [
("CREATE TABLE lastUpdate (provider TEXT, time NUMERIC);",),
("CREATE TABLE lastSearch (provider TEXT, time NUMERIC);",),
("CREATE TABLE scene_exceptions (exception_id INTEGER PRIMARY KEY, indexer_id INTEGER, show_name TEXT, season NUMERIC DEFAULT -1, custom NUMERIC DEFAULT 0);",),
("CREATE TABLE scene_names (indexer_id INTEGER, name TEXT);",),
("CREATE TABLE network_timezones (network_name TEXT PRIMARY KEY, timezone TEXT);",),
("CREATE TABLE scene_exceptions_refresh (list TEXT PRIMARY KEY, last_refreshed INTEGER);",),
("CREATE TABLE db_version (db_version INTEGER);",),
("INSERT INTO db_version(db_version) VALUES (1);",),
]
for query in queries:
if len(query) == 1:
self.connection.action(query[0])
else:
self.connection.action(query[0], query[1:])
class AddSceneExceptions(InitialSchema):
def test(self):
return self.hasTable("scene_exceptions")
def execute(self):
self.connection.action(
"CREATE TABLE scene_exceptions (exception_id INTEGER PRIMARY KEY, indexer_id INTEGER, show_name TEXT);")
class AddSceneNameCache(AddSceneExceptions):
def test(self):
return self.hasTable("scene_names")
def execute(self):
self.connection.action("CREATE TABLE scene_names (indexer_id INTEGER, name TEXT);")
class AddNetworkTimezones(AddSceneNameCache):
def test(self):
return self.hasTable("network_timezones")
def execute(self):
self.connection.action("CREATE TABLE network_timezones (network_name TEXT PRIMARY KEY, timezone TEXT);")
class AddLastSearch(AddNetworkTimezones):
def test(self):
return self.hasTable("lastSearch")
def execute(self):
self.connection.action("CREATE TABLE lastSearch (provider TEXT, time NUMERIC);")
class AddSceneExceptionsSeasons(AddLastSearch):
def test(self):
return self.hasColumn("scene_exceptions", "season")
def execute(self):
self.addColumn("scene_exceptions", "season", "NUMERIC", -1)
class AddSceneExceptionsCustom(AddSceneExceptionsSeasons): # pylint:disable=too-many-ancestors
def test(self):
return self.hasColumn("scene_exceptions", "custom")
def execute(self):
self.addColumn("scene_exceptions", "custom", "NUMERIC", 0)
class AddSceneExceptionsRefresh(AddSceneExceptionsCustom): # pylint:disable=too-many-ancestors
def test(self):
return self.hasTable("scene_exceptions_refresh")
def execute(self):
self.connection.action(
"CREATE TABLE scene_exceptions_refresh (list TEXT PRIMARY KEY, last_refreshed INTEGER);")
class ConvertSceneExeptionsToIndexerScheme(AddSceneExceptionsRefresh): # pylint:disable=too-many-ancestors
def test(self):
return self.hasColumn("scene_exceptions", "indexer_id")
def execute(self):
self.connection.action("DROP TABLE IF EXISTS tmp_scene_exceptions;")
self.connection.action("ALTER TABLE scene_exceptions RENAME TO tmp_scene_exceptions;")
self.connection.action("CREATE TABLE scene_exceptions (exception_id INTEGER PRIMARY KEY, indexer_id INTEGER, show_name TEXT, season NUMERIC DEFAULT -1, custom NUMERIC DEFAULT 0);")
self.connection.action("INSERT INTO scene_exceptions SELECT exception_id, tvdb_id as indexer_id, show_name, season, custom FROM tmp_scene_exceptions;")
self.connection.action("DROP TABLE tmp_scene_exceptions;")
class ConvertSceneNamesToIndexerScheme(AddSceneExceptionsRefresh): # pylint:disable=too-many-ancestors
def test(self):
return self.hasColumn("scene_names", "indexer_id")
def execute(self):
self.connection.action("DROP TABLE IF EXISTS tmp_scene_names;")
self.connection.action("ALTER TABLE scene_names RENAME TO tmp_scene_names;")
self.connection.action("CREATE TABLE scene_names (indexer_id INTEGER, name TEXT);")
self.connection.action("INSERT INTO scene_names SELECT * FROM tmp_scene_names;")
self.connection.action("DROP TABLE tmp_scene_names;")
| gpl-3.0 |
daemonmaker/pylearn2 | pylearn2/space/tests/test_space.py | 32 | 58117 | """
Tests for space utilities.
"""
from __future__ import print_function
import itertools
import warnings
import numpy as np
from theano.compat.six.moves import xrange
import theano
from theano import tensor
# Can't use nose.tools.assert_raises, only introduced in python 2.7. Use
# numpy.testing.assert_raises instead
from pylearn2.space import (SimplyTypedSpace,
VectorSpace,
Conv2DSpace,
CompositeSpace,
VectorSequenceSpace,
IndexSequenceSpace,
IndexSpace,
NullSpace,
is_symbolic_batch)
from pylearn2.utils import function, safe_zip
def test_np_format_as_vector2vector():
vector_space_initial = VectorSpace(dim=8 * 8 * 3, sparse=False)
vector_space_final = VectorSpace(dim=8 * 8 * 3, sparse=False)
data = np.arange(5 * 8 * 8 * 3).reshape(5, 8 * 8 * 3)
rval = vector_space_initial.np_format_as(data, vector_space_final)
assert np.all(rval == data)
def test_np_format_as_index2index():
index_space_initial = IndexSpace(max_labels=10, dim=1)
index_space_final = IndexSpace(max_labels=10, dim=1)
data = np.array([[0], [2], [1], [3], [5], [8], [1]])
rval = index_space_initial.np_format_as(data, index_space_final)
assert index_space_initial == index_space_final
assert np.all(rval == data)
index_space_downcast = IndexSpace(max_labels=10, dim=1, dtype='int32')
rval = index_space_initial.np_format_as(data, index_space_downcast)
assert index_space_initial != index_space_downcast
assert np.all(rval == data)
assert rval.dtype == 'int32' and data.dtype == 'int64'
def test_np_format_as_conv2d2conv2d():
conv2d_space_initial = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('b', 'c', 0, 1))
conv2d_space_final = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('b', 'c', 0, 1))
data = np.arange(5 * 8 * 8 * 3).reshape(5, 3, 8, 8)
rval = conv2d_space_initial.np_format_as(data, conv2d_space_final)
assert np.all(rval == data)
conv2d_space1 = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('c', 'b', 1, 0))
conv2d_space0 = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('b', 'c', 0, 1))
data = np.arange(5 * 8 * 8 * 3).reshape(5, 3, 8, 8)
rval = conv2d_space0.np_format_as(data, conv2d_space1)
nval = data.transpose(1, 0, 3, 2)
assert np.all(rval == nval)
def test_np_format_as_vector2conv2d():
vector_space = VectorSpace(dim=8 * 8 * 3, sparse=False)
conv2d_space = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('b', 'c', 0, 1))
data = np.arange(5 * 8 * 8 * 3).reshape(5, 8 * 8 * 3)
rval = vector_space.np_format_as(data, conv2d_space)
# Get data in a Conv2DSpace with default axes
new_axes = conv2d_space.default_axes
axis_to_shape = {'b': 5, 'c': 3, 0: 8, 1: 8}
new_shape = tuple([axis_to_shape[ax] for ax in new_axes])
nval = data.reshape(new_shape)
# Then transpose
nval = nval.transpose(*[new_axes.index(ax)
for ax in conv2d_space.axes])
assert np.all(rval == nval)
def test_np_format_as_conv2d2vector():
vector_space = VectorSpace(dim=8 * 8 * 3, sparse=False)
conv2d_space = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('b', 'c', 0, 1))
data = np.arange(5 * 8 * 8 * 3).reshape(5, 3, 8, 8)
rval = conv2d_space.np_format_as(data, vector_space)
nval = data.transpose(*[conv2d_space.axes.index(ax)
for ax in conv2d_space.default_axes])
nval = nval.reshape(5, 3 * 8 * 8)
assert np.all(rval == nval)
vector_space = VectorSpace(dim=8 * 8 * 3, sparse=False)
conv2d_space = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('c', 'b', 0, 1))
data = np.arange(5 * 8 * 8 * 3).reshape(3, 5, 8, 8)
rval = conv2d_space.np_format_as(data, vector_space)
nval = data.transpose(*[conv2d_space.axes.index(ax)
for ax in conv2d_space.default_axes])
nval = nval.reshape(5, 3 * 8 * 8)
assert np.all(rval == nval)
def test_np_format_as_conv2d_vector_conv2d():
conv2d_space1 = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('c', 'b', 1, 0))
vector_space = VectorSpace(dim=8 * 8 * 3, sparse=False)
conv2d_space0 = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('b', 'c', 0, 1))
data = np.arange(5 * 8 * 8 * 3).reshape(5, 3, 8, 8)
vecval = conv2d_space0.np_format_as(data, vector_space)
rval1 = vector_space.np_format_as(vecval, conv2d_space1)
rval2 = conv2d_space0.np_format_as(data, conv2d_space1)
assert np.allclose(rval1, rval2)
nval = data.transpose(1, 0, 3, 2)
assert np.allclose(nval, rval1)
def test_np_format_as_vectorsequence2vectorsequence():
vector_sequence_space1 = VectorSequenceSpace(dim=3, dtype='float32')
vector_sequence_space2 = VectorSequenceSpace(dim=3, dtype='float64')
data = np.asarray(np.random.uniform(low=0.0,
high=1.0,
size=(10, 3)),
dtype=vector_sequence_space1.dtype)
rval = vector_sequence_space1.np_format_as(data, vector_sequence_space2)
assert np.all(rval == data)
def test_np_format_as_indexsequence2indexsequence():
index_sequence_space1 = IndexSequenceSpace(max_labels=6, dim=1,
dtype='int16')
index_sequence_space2 = IndexSequenceSpace(max_labels=6, dim=1,
dtype='int32')
data = np.asarray(np.random.randint(low=0,
high=5,
size=(10, 1)),
dtype=index_sequence_space1.dtype)
rval = index_sequence_space1.np_format_as(data, index_sequence_space2)
assert np.all(rval == data)
def test_np_format_as_indexsequence2vectorsequence():
index_sequence_space = IndexSequenceSpace(max_labels=6, dim=1)
vector_sequence_space = VectorSequenceSpace(dim=6)
data = np.array([[0], [1], [4], [3]],
dtype=index_sequence_space.dtype)
rval = index_sequence_space.np_format_as(data, vector_sequence_space)
true_val = np.array([[1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0]])
assert np.all(rval == true_val)
def test_np_format_as_sequence2other():
vector_sequence_space = VectorSequenceSpace(dim=3)
vector_space = VectorSpace(dim=3)
data = np.random.uniform(low=0.0, high=1.0, size=(10, 3))
np.testing.assert_raises(ValueError, vector_sequence_space.np_format_as,
data, vector_space)
index_sequence_space = IndexSequenceSpace(max_labels=6, dim=1)
index_space = IndexSpace(max_labels=6, dim=1)
data = np.random.randint(low=0, high=5, size=(10, 1))
np.testing.assert_raises(ValueError, index_sequence_space.np_format_as,
data, index_space)
def test_np_format_as_composite_composite():
"""
Test using CompositeSpace.np_format_as() to convert between
composite spaces that have the same tree structure, but different
leaf spaces.
"""
def make_composite_space(image_space):
"""
Returns a compsite space with a particular tree structure.
"""
return CompositeSpace((CompositeSpace((image_space,) * 2),
VectorSpace(dim=1)))
shape = np.array([8, 11])
channels = 3
datum_size = channels * shape.prod()
composite_topo = make_composite_space(Conv2DSpace(shape=shape,
num_channels=channels))
composite_flat = make_composite_space(VectorSpace(dim=datum_size))
def make_vector_data(batch_size, space):
"""
Returns a batch of synthetic data appropriate to the provided space.
Supports VectorSpaces, and CompositeSpaces of VectorSpaces. synthetic
data.
"""
if isinstance(space, CompositeSpace):
return tuple(make_vector_data(batch_size, subspace)
for subspace in space.components)
else:
assert isinstance(space, VectorSpace)
result = np.random.rand(batch_size, space.dim)
if space.dtype is not None:
return np.asarray(result, dtype=space.dtype)
else:
return result
batch_size = 5
flat_data = make_vector_data(batch_size, composite_flat)
composite_flat.np_validate(flat_data)
topo_data = composite_flat.np_format_as(flat_data, composite_topo)
composite_topo.np_validate(topo_data)
new_flat_data = composite_topo.np_format_as(topo_data,
composite_flat)
def get_shape(batch):
"""
Returns the (nested) shape(s) of a (nested) batch.
"""
if isinstance(batch, np.ndarray):
return batch.shape
else:
return tuple(get_shape(b) for b in batch)
def batch_equals(batch_0, batch_1):
"""
Returns true if all corresponding elements of two batches are
equal. Supports composite data (i.e. nested tuples of data).
"""
assert type(batch_0) == type(batch_1)
if isinstance(batch_0, tuple):
if len(batch_0) != len(batch_1):
return False
return np.all(tuple(batch_equals(b0, b1)
for b0, b1 in zip(batch_0, batch_1)))
else:
assert isinstance(batch_0, np.ndarray)
return np.all(batch_0 == batch_1)
assert batch_equals(new_flat_data, flat_data)
def test_vector_to_conv_c01b_invertible():
"""
Tests that the format_as methods between Conv2DSpace
and VectorSpace are invertible for the ('c', 0, 1, 'b')
axis format.
"""
rng = np.random.RandomState([2013, 5, 1])
batch_size = 3
rows = 4
cols = 5
channels = 2
conv = Conv2DSpace([rows, cols],
channels=channels,
axes=('c', 0, 1, 'b'))
vec = VectorSpace(conv.get_total_dimension())
X = conv.make_batch_theano()
Y = conv.format_as(X, vec)
Z = vec.format_as(Y, conv)
A = vec.make_batch_theano()
B = vec.format_as(A, conv)
C = conv.format_as(B, vec)
f = function([X, A], [Z, C])
X = rng.randn(*(conv.get_origin_batch(batch_size).shape)).astype(X.dtype)
A = rng.randn(*(vec.get_origin_batch(batch_size).shape)).astype(A.dtype)
Z, C = f(X, A)
np.testing.assert_allclose(Z, X)
np.testing.assert_allclose(C, A)
def test_broadcastable():
v = VectorSpace(5).make_theano_batch(batch_size=1)
np.testing.assert_(v.broadcastable[0])
c = Conv2DSpace((5, 5), channels=3,
axes=['c', 0, 1, 'b']).make_theano_batch(batch_size=1)
np.testing.assert_(c.broadcastable[-1])
d = Conv2DSpace((5, 5), channels=3,
axes=['b', 0, 1, 'c']).make_theano_batch(batch_size=1)
np.testing.assert_(d.broadcastable[0])
def test_compare_index():
dims = [5, 5, 5, 6]
max_labels = [10, 10, 9, 10]
index_spaces = [IndexSpace(dim=dim, max_labels=max_label)
for dim, max_label in zip(dims, max_labels)]
assert index_spaces[0] == index_spaces[1]
assert not any(index_spaces[i] == index_spaces[j]
for i, j in itertools.combinations([1, 2, 3], 2))
vector_space = VectorSpace(dim=5)
conv2d_space = Conv2DSpace(shape=(8, 8), num_channels=3,
axes=('b', 'c', 0, 1))
composite_space = CompositeSpace((index_spaces[0],))
assert not any(index_space == vector_space for index_space in index_spaces)
assert not any(index_space == composite_space
for index_space in index_spaces)
assert not any(index_space == conv2d_space for index_space in index_spaces)
def test_np_format_as_index2vector():
# Test 5 random batches for shape, number of non-zeros
for _ in xrange(5):
max_labels = np.random.randint(2, 10)
batch_size = np.random.randint(1, 10)
labels = np.random.randint(1, 10)
batch = np.random.random_integers(max_labels - 1,
size=(batch_size, labels))
index_space = IndexSpace(dim=labels, max_labels=max_labels)
vector_space_merge = VectorSpace(dim=max_labels)
vector_space_concatenate = VectorSpace(dim=max_labels * labels)
merged = index_space.np_format_as(batch, vector_space_merge)
concatenated = index_space.np_format_as(batch,
vector_space_concatenate)
assert merged.shape == (batch_size, max_labels)
assert concatenated.shape == (batch_size, max_labels * labels)
assert np.count_nonzero(merged) <= batch.size
assert np.count_nonzero(concatenated) == batch.size
assert np.all(np.unique(concatenated) == np.array([0, 1]))
# Make sure Theano variables give the same result
batch = tensor.lmatrix('batch')
single = tensor.lvector('single')
batch_size = np.random.randint(1, 10)
np_batch = np.random.random_integers(max_labels - 1,
size=(batch_size, labels))
np_single = np.random.random_integers(max_labels - 1,
size=(labels))
f_batch_merge = theano.function(
[batch], index_space._format_as_impl(False, batch, vector_space_merge)
)
f_batch_concatenate = theano.function(
[batch], index_space._format_as_impl(False, batch,
vector_space_concatenate)
)
f_single_merge = theano.function(
[single], index_space._format_as_impl(False, single,
vector_space_merge)
)
f_single_concatenate = theano.function(
[single], index_space._format_as_impl(False, single,
vector_space_concatenate)
)
np.testing.assert_allclose(
f_batch_merge(np_batch),
index_space._format_as_impl(True, np_batch, vector_space_merge)
)
np.testing.assert_allclose(
f_batch_concatenate(np_batch),
index_space._format_as_impl(True, np_batch, vector_space_concatenate)
)
np.testing.assert_allclose(
f_single_merge(np_single),
index_space._format_as_impl(True, np_single, vector_space_merge)
)
np.testing.assert_allclose(
f_single_concatenate(np_single),
index_space._format_as_impl(True, np_single, vector_space_concatenate)
)
def test_dtypes():
batch_size = 2
dtype_is_none_msg = ("self.dtype is None, so you must provide a "
"non-None dtype argument to this method.")
all_scalar_dtypes = tuple(t.dtype
for t in theano.scalar.all_types)
def underspecifies_dtypes(from_space, to_dtype):
"""
Returns True iff the from_space and to_dtype are both None. If
from_space is a CompositeSpace, this recurses into its tree of
subspaces.
"""
if isinstance(from_space, CompositeSpace):
if not isinstance(to_dtype, tuple):
return any(underspecifies_dtypes(s, to_dtype)
for s in from_space.components)
else:
return any(underspecifies_dtypes(s, d)
for s, d
in safe_zip(from_space.components, to_dtype))
else:
assert not isinstance(to_dtype, tuple), ("Tree structure "
"mismatch between "
"from_space and "
"to_dtype.")
return from_space.dtype is None and to_dtype is None
def get_expected_batch_dtype(from_space, to_dtype):
"""
Returns the expected dtype of a batch returned from
from_space.f(batch, to_dtype), where f is one of the three batch
creation methods (get_origin_batch, make_theano_batch, and
make_shared_batch)
"""
if to_dtype == 'floatX':
to_dtype = theano.config.floatX
if isinstance(from_space, CompositeSpace):
if not isinstance(to_dtype, tuple):
to_dtype = (to_dtype, ) * len(from_space.components)
return tuple(get_expected_batch_dtype(subspace, subtype)
for subspace, subtype
in safe_zip(from_space.components, to_dtype))
else:
assert not (from_space.dtype is None and to_dtype is None)
return from_space.dtype if to_dtype is None else to_dtype
def get_batch_dtype(batch):
"""
Returns the dtype of a batch, as a string, or nested tuple of strings.
For simple batches such as ndarray, this returns str(batch.dtype).
For the None batches "used" by NullSpace, this returns a special string
"NullSpace dtype".
For composite batches, this returns (nested) tuples of dtypes.
"""
if isinstance(batch, tuple):
return tuple(get_batch_dtype(b) for b in batch)
elif batch is None:
return "NullSpace dtype"
else:
return batch.dtype
def test_get_origin_batch(from_space, to_type):
# Expect failure if neither we nor the from_space specifies a dtype
if underspecifies_dtypes(from_space, to_type):
try:
from_space.get_origin_batch(batch_size, dtype=to_type)
except TypeError as ex:
assert dtype_is_none_msg in str(ex)
except Exception as unexpected_ex:
print("Expected an exception of type TypeError with message "
"%s, got a %s instead with message %s." %
(dtype_is_none_msg,
type(unexpected_ex),
str(unexpected_ex)))
raise unexpected_ex
finally:
return
batch = from_space.get_origin_batch(batch_size, dtype=to_type)
assert get_batch_dtype(batch) == get_expected_batch_dtype(from_space,
to_type)
def test_make_shared_batch(from_space, to_type):
if underspecifies_dtypes(from_space, to_type):
try:
from_space.make_shared_batch(batch_size, dtype=to_type)
except TypeError as ex:
assert dtype_is_none_msg in str(ex)
except Exception as unexpected_ex:
print("Expected an exception of type TypeError with message "
"%s, got a %s instead with message %s." %
(dtype_is_none_msg,
type(unexpected_ex),
str(unexpected_ex)))
raise unexpected_ex
finally:
return
batch = from_space.make_shared_batch(batch_size=batch_size,
name='batch',
dtype=to_type)
assert (get_batch_dtype(batch) ==
get_expected_batch_dtype(from_space, to_type)), \
("\nget_batch_dtype(batch): %s\n"
"get_expected_batch_dtype(from_space, to_type): %s" %
(get_batch_dtype(batch),
get_expected_batch_dtype(from_space, to_type)))
def test_make_theano_batch(from_space, to_type):
kwargs = {'name': 'batch',
'dtype': to_type}
# Sparse VectorSpaces throw an exception if batch_size is specified.
if not (isinstance(from_space, VectorSpace) and from_space.sparse):
kwargs['batch_size'] = batch_size
if underspecifies_dtypes(from_space, to_type):
try:
from_space.make_theano_batch(**kwargs)
except TypeError as ex:
assert dtype_is_none_msg in str(ex)
except Exception as unexpected_ex:
print("Expected an exception of type TypeError with message "
"%s, got a %s instead with message %s." %
(dtype_is_none_msg,
type(unexpected_ex),
str(unexpected_ex)))
raise unexpected_ex
finally:
return
batch = from_space.make_theano_batch(**kwargs)
assert get_batch_dtype(batch) == get_expected_batch_dtype(from_space,
to_type)
def test_format(from_space, to_space, using_numeric_batch):
"""
Unit test for a call to from_space.np_format_as(batch, to_space)
"""
# Type-checks the arguments
for space, name in zip((from_space, to_space),
("from_space", "to_space")):
if not isinstance(space,
(VectorSpace, Conv2DSpace, CompositeSpace)):
raise TypeError("This test only supports spaces of type "
"VectorSpace, Conv2DSpace, and "
"CompositeSpace, not %s's type %s" %
(name, type(space)))
def get_batch(space, using_numeric_batch):
"""
Uses space.get_origin_batch() to return a numeric batch,
or space.get_theano_batch() to return a symbolic
Uses a fallback dtype if the space itself doesn't have one.
"""
def specifies_all_dtypes(space):
"""
Returns True iff space has a completely specified dtype.
"""
if isinstance(space, CompositeSpace):
return all(specifies_all_dtypes(subspace)
for subspace in space.components)
else:
return space.dtype is not None
def replace_none_dtypes(dtype, fallback_dtype):
"""
Returns dtype, with any Nones replaced by fallback_dtype.
"""
if isinstance(dtype, tuple):
return tuple(replace_none_dtypes(d, fallback_dtype)
for d in dtype)
else:
return fallback_dtype if dtype is None else dtype
kwargs = {"batch_size": batch_size}
# Use this when space doesn't specify a dtype
fallback_dtype = theano.config.floatX
if not specifies_all_dtypes(space):
kwargs["dtype"] = replace_none_dtypes(space.dtype,
fallback_dtype)
if using_numeric_batch:
return space.get_origin_batch(**kwargs)
else:
# Sparse VectorSpaces throw an exception if batch_size is
# specified
if isinstance(space, VectorSpace) and space.sparse:
del kwargs["batch_size"]
kwargs["name"] = "space-generated batch"
return space.make_theano_batch(**kwargs)
def get_expected_warning(from_space, from_batch, to_space):
# composite -> composite
if isinstance(from_space, CompositeSpace) and \
isinstance(to_space, CompositeSpace):
for fs, fb, ts in safe_zip(from_space.components,
from_batch,
to_space.components):
warning, message = get_expected_warning(fs, fb, ts)
if warning is not None:
return warning, message
return None, None
# composite -> simple
if isinstance(from_space, CompositeSpace):
for fs, fb in safe_zip(from_space.components, from_batch):
warning, message = get_expected_warning(fs, fb, to_space)
if warning is not None:
return warning, message
return None, None
# simple -> composite
if isinstance(to_space, CompositeSpace):
if isinstance(from_space, VectorSpace) and \
isinstance(from_batch, theano.sparse.SparseVariable):
assert from_space.sparse
return (UserWarning,
'Formatting from a sparse VectorSpace to a '
'CompositeSpace is currently (2 Jan 2014) a '
'non-differentiable action. This is because it '
'calls slicing operations on a sparse batch '
'(e.g. "my_matrix[r:R, c:C]", which Theano does '
'not yet have a gradient operator for. If '
'autodifferentiation is reporting an error, '
'this may be why.')
for ts in to_space.components:
warning, message = get_expected_warning(from_space,
from_batch,
ts)
if warning is not None:
return warning, message
return None, None
# simple -> simple
return None, None
def get_expected_error(from_space, from_batch, to_space):
"""
Returns the type of error to be expected when calling
from_space.np_format_as(batch, to_space). Returns None if no error
should be expected.
"""
def contains_different_dtypes(space):
"""
Returns true if space contains different dtypes. None is
considered distinct from all actual dtypes.
"""
assert isinstance(space, CompositeSpace)
def get_shared_dtype_if_any(space):
"""
Returns space's dtype. If space is composite, returns the
dtype used by all of its subcomponents. Returns False if
the subcomponents use different dtypes.
"""
if isinstance(space, CompositeSpace):
dtypes = tuple(get_shared_dtype_if_any(c)
for c in space.components)
assert(len(dtypes) > 0)
if any(d != dtypes[0] for d in dtypes[1:]):
return False
return dtypes[0] # could be False, but that's fine
else:
return space.dtype
return get_shared_dtype_if_any(space) is False
assert (isinstance(from_space, CompositeSpace) ==
isinstance(from_batch, tuple))
# composite -> composite
if isinstance(from_space, CompositeSpace) and \
isinstance(to_space, CompositeSpace):
for fs, fb, ts in safe_zip(from_space.components,
from_batch,
to_space.components):
error, message = get_expected_error(fs, fb, ts)
if error is not None:
return error, message
return None, None
# composite -> simple
if isinstance(from_space, CompositeSpace):
if isinstance(to_space, Conv2DSpace):
return (NotImplementedError,
"CompositeSpace does not know how to format as "
"Conv2DSpace")
for fs, fb in safe_zip(from_space.components, from_batch):
error, message = get_expected_error(fs, fb, to_space)
if error is not None:
return error, message
if isinstance(to_space, VectorSpace) and \
contains_different_dtypes(from_space) and \
to_space.dtype is None:
return (TypeError,
"Tried to format components with differing dtypes "
"into a VectorSpace with no dtype of its own. "
"dtypes: ")
return None, None
# simple -> composite
if isinstance(to_space, CompositeSpace):
if isinstance(from_space, VectorSpace) and \
isinstance(from_batch, theano.sparse.SparseVariable):
assert from_space.sparse
return (UserWarning,
'Formatting from a sparse VectorSpace to a '
'CompositeSpace is currently (2 Jan 2014) a '
'non-differentiable action. This is because it '
'calls slicing operations on a sparse batch '
'(e.g. "my_matrix[r:R, c:C]", which Theano does '
'not yet have a gradient operator for. If '
'autodifferentiation is reporting an error, '
'this may be why.')
if isinstance(from_space, Conv2DSpace):
return (NotImplementedError,
"Conv2DSpace does not know how to format as "
"CompositeSpace")
for ts in to_space.components:
error, message = get_expected_error(from_space,
from_batch,
ts)
if error is not None:
return error, message
return None, None
#
# simple -> simple
#
def is_sparse(space):
return isinstance(space, VectorSpace) and space.sparse
def is_complex(arg):
"""
Returns whether a space or a batch has a complex dtype.
"""
return (arg.dtype is not None and
str(arg.dtype).startswith('complex'))
if isinstance(from_batch, tuple):
return (TypeError,
"This space only supports simple dtypes, but received "
"a composite batch.")
if is_complex(from_batch) and not is_complex(from_space):
return (TypeError,
"This space has a non-complex dtype (%s), and "
"thus cannot support complex batches of type %s." %
(from_space.dtype, from_batch.dtype))
if from_space.dtype is not None and \
from_space.dtype != from_batch.dtype:
return (TypeError,
"This space is for dtype %s, but recieved a "
"batch of dtype %s." %
(from_space.dtype, from_batch.dtype))
if is_sparse(from_space) and isinstance(to_space, Conv2DSpace):
return (TypeError,
"Formatting a SparseVariable to a Conv2DSpace "
"is not supported, since neither scipy nor "
"Theano has sparse tensors with more than 2 "
"dimensions. We need 4 dimensions to "
"represent a Conv2DSpace batch")
if is_complex(from_space) and not is_complex(to_space):
if is_symbolic_batch(from_batch):
return (TypeError,
"Casting from complex to real is ambiguous")
else:
return (np.ComplexWarning,
"Casting complex values to real discards the "
"imaginary part")
return None, None
def get_expected_formatted_dtype(from_batch, to_space):
"""
Returns the expected dtype of the batch returned from a call to
from_batch.format_as(batch, to_space). If the returned batch is a
nested tuple, the expected dtype will also a nested tuple.
"""
def get_single_dtype(batch):
"""
Returns the dtype shared by all leaf nodes of the nested batch.
If the nested batch contains differing dtypes, this throws an
AssertionError. None counts as a different dtype than non-None.
"""
if isinstance(batch, tuple):
assert len(batch) > 0
child_dtypes = tuple(get_single_dtype(b) for b in batch)
if any(c != child_dtypes[0] for c in child_dtypes[1:]):
return False
return child_dtypes[0] # may be False, but that's correct.
else:
return batch.dtype
# composite -> composite
if isinstance(from_batch, tuple) and \
isinstance(to_space, CompositeSpace):
return tuple(get_expected_formatted_dtype(b, s)
for b, s in safe_zip(from_batch,
to_space.components))
# composite -> simple
elif isinstance(from_batch, tuple):
if to_space.dtype is not None:
return to_space.dtype
else:
result = get_batch_dtype(from_batch)
if result is False:
raise TypeError("From_batch doesn't have a single "
"dtype: %s" %
str(get_batch_dtype(from_batch)))
return result
# simple -> composite
elif isinstance(to_space, CompositeSpace):
return tuple(get_expected_formatted_dtype(from_batch, s)
for s in to_space.components)
# simple -> simple with no dtype
elif to_space.dtype is None:
assert from_batch.dtype is not None
return str(from_batch.dtype)
# simple -> simple with a dtype
else:
return to_space.dtype
from_batch = get_batch(from_space, using_numeric_batch)
expected_error, expected_error_msg = get_expected_error(from_space,
from_batch,
to_space)
# For some reason, the "with assert_raises(expected_error) as context:"
# idiom isn't catching all the expceted_errors. Use this instead:
if expected_error is not None:
try:
# temporarily upgrades warnings to exceptions within this block
with warnings.catch_warnings():
warnings.simplefilter("error")
from_space._format_as(using_numeric_batch,
from_batch,
to_space)
except expected_error as ex:
assert str(ex).find(expected_error_msg) >= 0
except Exception as unknown_ex:
print("Expected exception of type %s, got %s." %
(expected_error.__name__, type(unknown_ex)))
raise unknown_ex
finally:
return
to_batch = from_space._format_as(using_numeric_batch,
from_batch,
to_space)
expected_dtypes = get_expected_formatted_dtype(from_batch, to_space)
actual_dtypes = get_batch_dtype(to_batch)
assert expected_dtypes == actual_dtypes, \
("\nexpected_dtypes: %s,\n"
"actual_dtypes: %s \n"
"from_space: %s\n"
"from_batch's dtype: %s\n"
"from_batch is theano?: %s\n"
"to_space: %s" % (expected_dtypes,
actual_dtypes,
from_space,
get_batch_dtype(from_batch),
is_symbolic_batch(from_batch),
to_space))
#
#
# End of test_format() function.
def test_dtype_getter(space):
"""
Tests the getter method of space's dtype property.
"""
def assert_composite_dtype_eq(space, dtype):
"""
Asserts that dtype is a nested tuple with exactly the same tree
structure as space, and that the dtypes of space's components and
their corresponding elements in <dtype> are equal.
"""
assert (isinstance(space, CompositeSpace) ==
isinstance(dtype, tuple))
if isinstance(space, CompositeSpace):
for s, d in safe_zip(space.components, dtype):
assert_composite_dtype_eq(s, d)
else:
assert space.dtype == dtype
if isinstance(space, SimplyTypedSpace):
assert space.dtype == space._dtype
elif isinstance(space, NullSpace):
assert space.dtype == "NullSpace's dtype"
elif isinstance(space, CompositeSpace):
assert_composite_dtype_eq(space, space.dtype)
def test_dtype_setter(space, dtype):
"""
Tests the setter method of space's dtype property.
"""
def get_expected_error(space, dtype):
"""
If calling space.dtype = dtype is expected to throw an exception,
this returns (exception_class, exception_message).
If no exception is to be expected, this returns (None, None).
"""
if isinstance(space, CompositeSpace):
if isinstance(dtype, tuple):
if len(space.components) != len(dtype):
return ValueError, "Argument 0 has length "
for s, d in safe_zip(space.components, dtype):
error, message = get_expected_error(s, d)
if error is not None:
return error, message
else:
for s in space.components:
error, message = get_expected_error(s, dtype)
if error is not None:
return error, message
return None, None
if isinstance(space, SimplyTypedSpace):
if not any((dtype is None,
dtype == 'floatX',
dtype in all_scalar_dtypes)):
return (TypeError,
'Unrecognized value "%s" (type %s) for dtype arg' %
(dtype, type(dtype)))
return None, None
if isinstance(space, NullSpace):
nullspace_dtype = NullSpace().dtype
if dtype != nullspace_dtype:
return (TypeError,
'NullSpace can only take the bogus dtype "%s"' %
nullspace_dtype)
return None, None
raise NotImplementedError("%s not yet supported by this test" %
type(space))
def assert_dtype_equiv(space, dtype):
"""
Asserts that space.dtype and dtype are equivalent.
"""
if isinstance(space, CompositeSpace):
if isinstance(dtype, tuple):
for s, d in safe_zip(space.components, dtype):
assert_dtype_equiv(s, d)
else:
for s in space.components:
assert_dtype_equiv(s, dtype)
else:
assert not isinstance(dtype, tuple)
if dtype == 'floatX':
dtype = theano.config.floatX
assert space.dtype == dtype, ("%s not equal to %s" %
(space.dtype, dtype))
expected_error, expected_message = get_expected_error(space, dtype)
if expected_error is not None:
try:
space.dtype = dtype
except expected_error as ex:
assert expected_message in str(ex)
except Exception:
print("Expected exception of type %s, got %s instead." %
(expected_error.__name__, type(ex)))
raise ex
return
else:
space.dtype = dtype
assert_dtype_equiv(space, dtype)
def test_simply_typed_space_validate(space, batch_dtype, is_numeric):
"""
Creates a batch of batch_dtype, and sees if space validates it.
"""
assert isinstance(space, SimplyTypedSpace), \
"%s is not a SimplyTypedSpace" % type(space)
batch_sizes = (1, 3)
if not is_numeric and isinstance(space, VectorSpace) and space.sparse:
batch_sizes = (None, )
for batch_size in batch_sizes:
if is_numeric:
batch = space.get_origin_batch(dtype=batch_dtype,
batch_size=batch_size)
else:
batch = space.make_theano_batch(dtype=batch_dtype,
batch_size=batch_size,
name="test batch to validate")
# Expect an error if space.dtype is not None and batch can't cast
# to it.
if space.dtype is not None and \
not np.can_cast(batch.dtype, space.dtype):
np.testing.assert_raises(TypeError,
space._validate,
(is_numeric, batch))
else:
# Otherwise, don't expect an error.
space._validate(is_numeric, batch)
#
#
# End of test_dtype_setter() function
shape = np.array([2, 3, 4], dtype='int')
assert len(shape) == 3 # This test depends on this being true
dtypes = ('floatX', None) + all_scalar_dtypes
#
# spaces with the same number of elements
#
vector_spaces = tuple(VectorSpace(dim=shape.prod(), dtype=dt, sparse=s)
for dt in dtypes for s in (True, False))
conv2d_spaces = tuple(Conv2DSpace(shape=shape[:2],
dtype=dt,
num_channels=shape[2])
for dt in dtypes)
# no need to make CompositeSpaces with components spanning all possible
# dtypes. Just try 2 dtype combos. No need to try different sparsities
# either. That will be tested by the non-composite space conversions.
n_dtypes = 2
old_nchannels = shape[2]
shape[2] = old_nchannels / 2
assert shape[2] * 2 == old_nchannels, \
("test code is broken: # of channels should start as an even "
"number, not %d." % old_nchannels)
def make_composite_space(dtype0, dtype1, use_conv2d):
if use_conv2d:
second_space = Conv2DSpace(shape=shape[:2],
dtype=dtype1,
num_channels=shape[2])
else:
second_space = VectorSpace(dim=np.prod(shape),
dtype=dtype1)
return CompositeSpace((VectorSpace(dim=shape.prod(), dtype=dtype0),
second_space))
composite_spaces = tuple(make_composite_space(dtype0, dtype1, use_conv2d)
for dtype0, dtype1 in zip(dtypes[:n_dtypes],
dtypes[-n_dtypes:])
for use_conv2d in [True, False])
del n_dtypes
# A few composite dtypes to try throwing at CompositeSpace's batch-making
# methods.
composite_dtypes = ((None, 'int8'),
('complex128', theano.config.floatX))
# Tests CompositeSpace's batch-making methods and dtype setter
# with composite dtypes
for from_space in composite_spaces:
for to_dtype in composite_dtypes:
test_get_origin_batch(from_space, to_dtype)
test_make_shared_batch(from_space, to_dtype)
test_make_theano_batch(from_space, to_dtype)
test_dtype_setter(from_space, to_dtype)
# Tests validate/np_validate() for SimplyTypedSpaces
for is_numeric in (True, False):
for space in vector_spaces + conv2d_spaces:
for batch_dtype in ('floatX', ) + all_scalar_dtypes:
# Skip the test if the symbolic SparseType does not implement
# that dtype. As of 2015-05-07, this happens for 'float16'.
if ((isinstance(space, VectorSpace) and
space.sparse and
batch_dtype in all_scalar_dtypes and
batch_dtype not in theano.sparse.SparseType.dtype_set)):
continue
test_simply_typed_space_validate(space,
batch_dtype,
is_numeric)
all_spaces = vector_spaces + conv2d_spaces + composite_spaces
for from_space in all_spaces:
test_dtype_getter(from_space)
# Tests batch-making and dtype setting methods with non-composite
# dtypes.
for to_dtype in dtypes:
# Skip the test if the symbolic SparseType does not implement
# that dtype. As of 2015-05-07, this happens for 'float16'.
if ((isinstance(from_space, VectorSpace) and
from_space.sparse and
to_dtype in all_scalar_dtypes and
to_dtype not in theano.sparse.SparseType.dtype_set)):
continue
test_get_origin_batch(from_space, to_dtype)
test_make_shared_batch(from_space, to_dtype)
test_make_theano_batch(from_space, to_dtype)
test_dtype_setter(from_space, to_dtype)
# Tests _format_as
for to_space in all_spaces:
# Skip the test if the symbolic SparseType does not implement
# that dtype. As of 2015-05-07, this happens for 'float16'.
if ((isinstance(to_space, VectorSpace) and
to_space.sparse and
to_space.dtype in all_scalar_dtypes and
to_space.dtype not in theano.sparse.SparseType.dtype_set)):
continue
for is_numeric in (True, False):
test_format(from_space, to_space, is_numeric)
def test_symbolic_undo_format_as():
"""
Test functionality of undo_format_as on symbolic batches.
After format_as and undo_format_as, the theano variable
should be the same object, not just an equivalent
variable.
"""
# Compare identity of Composite batches
def assert_components(batch1, batch2):
for e1, e2 in zip(batch1, batch2):
if isinstance(e1, tuple) and isinstance(e2, tuple):
assert_components(e1, e2)
elif isinstance(e1, tuple) or isinstance(e2, tuple):
raise ValueError('Composite batches do not match.')
else:
assert e1 is e2
# VectorSpace and Conv2DSpace
VS = VectorSpace(dim=27)
VS_sparse = VectorSpace(dim=27, sparse=True)
# VectorSpace to Sparse VectorSpace
VS_batch = VS.make_theano_batch()
new_SVS_batch = VS.format_as(VS_batch, VS_sparse)
new_VS_batch = VS.undo_format_as(new_SVS_batch, VS_sparse)
assert new_VS_batch is VS_batch
assert new_SVS_batch is not VS_batch
# ConvSpace to ConvSpace
CS = Conv2DSpace(shape=[3, 3],
num_channels=3,
axes=('b', 0, 1, 'c'),
dtype='float32')
CS_non_default = Conv2DSpace(shape=[3, 3],
num_channels=3,
axes=('c', 'b', 0, 1),
dtype='float64')
CS_batch = CS.make_theano_batch()
new_ndCS_batch = CS.format_as(CS_batch, CS_non_default)
new_CS_batch = CS.undo_format_as(new_ndCS_batch, CS_non_default)
assert new_CS_batch is CS_batch
assert new_ndCS_batch is not CS_batch
assert new_ndCS_batch.dtype == 'float64'
assert new_CS_batch.dtype == 'float32'
ndCS_batch = CS_non_default.make_theano_batch()
new_CS_batch = CS_non_default.format_as(ndCS_batch, CS)
new_ndCS_batch = CS_non_default.undo_format_as(new_CS_batch, CS)
assert new_ndCS_batch is ndCS_batch
assert new_CS_batch is not ndCS_batch
assert new_ndCS_batch.dtype == 'float64'
assert new_CS_batch.dtype == 'float32'
# Start in VectorSpace
VS_batch = VS.make_theano_batch()
new_CS_batch = VS.format_as(VS_batch, CS)
new_VS_batch = VS.undo_format_as(new_CS_batch, CS)
assert new_VS_batch is VS_batch
new_CS_batch = VS.format_as(VS_batch, CS_non_default)
new_VS_batch = VS.undo_format_as(new_CS_batch, CS_non_default)
assert new_VS_batch is VS_batch
# Start in Conv2D with default axes
CS_batch = CS.make_theano_batch()
new_VS_batch = CS.format_as(CS_batch, VS)
new_CS_batch = CS.undo_format_as(new_VS_batch, VS)
assert new_CS_batch is CS_batch
# Non-default axes
CS_batch = CS_non_default.make_theano_batch()
new_VS_batch = CS_non_default.format_as(CS_batch, VS)
new_CS_batch = CS_non_default.undo_format_as(new_VS_batch, VS)
assert new_CS_batch is CS_batch
# Composite Space to VectorSpace
VS = VectorSpace(dim=27)
CS = Conv2DSpace(shape=[2, 2], num_channels=3, axes=('b', 0, 1, 'c'))
CompS = CompositeSpace((CompositeSpace((VS, VS)), CS))
VS_large = VectorSpace(dim=(2*27+12))
CompS_batch = CompS.make_theano_batch()
new_VS_batch = CompS.format_as(CompS_batch, VS_large)
new_CompS_batch = CompS.undo_format_as(new_VS_batch, VS_large)
assert_components(CompS_batch, new_CompS_batch)
# VectorSpace to Composite Space
CompS = CompositeSpace((CompositeSpace((VS, VS)), CS))
VS_batch = VS_large.make_theano_batch()
new_CompS_batch = VS_large.format_as(VS_batch, CompS)
new_VS_batch = VS_large.undo_format_as(new_CompS_batch, CompS)
assert VS_batch is new_VS_batch
# Reorder CompositeSpace
CompS = CompositeSpace((VS, CompositeSpace((VS, CS))))
VS_batch = VS_large.make_theano_batch()
new_CompS_batch = VS_large.format_as(VS_batch, CompS)
new_VS_batch = VS_large.undo_format_as(new_CompS_batch, CompS)
assert VS_batch is new_VS_batch
# Reorder CompositeSpace
CompS = CompositeSpace((CompositeSpace((CompositeSpace((VS,)), CS)), VS))
VS_batch = VS_large.make_theano_batch()
new_CompS_batch = VS_large.format_as(VS_batch, CompS)
new_VS_batch = VS_large.undo_format_as(new_CompS_batch, CompS)
assert VS_batch is new_VS_batch
# CompositeSpace to CompositeSpace
VS = VectorSpace(dim=27)
CS = Conv2DSpace(shape=[3, 3], num_channels=3, axes=('b', 0, 1, 'c'))
CompS_VS = CompositeSpace((CompositeSpace((VS, VS)), VS))
CompS_CS = CompositeSpace((CompositeSpace((CS, CS)), CS))
CompS_VS_batch = CompS_VS.make_theano_batch()
new_CompS_CS_batch = CompS_VS.format_as(CompS_VS_batch, CompS_CS)
new_CompS_VS_batch = CompS_VS.undo_format_as(new_CompS_CS_batch, CompS_CS)
assert_components(CompS_VS_batch, new_CompS_VS_batch)
def test_numeric_undo_format_as():
"""
Test functionality of undo_np_format_as on numeric batches.
This calls np_format_as with spaces reversed.
"""
# Compare identity of Composite batches
def assert_components(batch1, batch2):
for e1, e2 in zip(batch1, batch2):
if isinstance(e1, tuple) and isinstance(e2, tuple):
assert_components(e1, e2)
elif isinstance(e1, tuple) or isinstance(e2, tuple):
raise ValueError('Composite batches do not match.')
else:
assert np.allclose(e1, e2)
# VectorSpace and Conv2DSpace
VS = VectorSpace(dim=27)
VS_sparse = VectorSpace(dim=27, sparse=True)
# VectorSpace to Sparse VectorSpace
VS_batch = np.arange(270).reshape(10, 27)
new_SVS_batch = VS.np_format_as(VS_batch, VS_sparse)
new_VS_batch = VS.undo_np_format_as(new_SVS_batch, VS_sparse)
assert np.allclose(new_VS_batch, VS_batch)
# ConvSpace to ConvSpace
CS = Conv2DSpace(shape=[3, 3],
num_channels=3,
axes=('b', 0, 1, 'c'),
dtype='float32')
CS_non_default = Conv2DSpace(shape=[3, 3],
num_channels=3,
axes=('c', 'b', 0, 1),
dtype='float64')
CS_batch = np.arange(270).reshape(10, 3, 3, 3).astype('float32')
new_ndCS_batch = CS.np_format_as(CS_batch, CS_non_default)
new_CS_batch = CS.undo_np_format_as(new_ndCS_batch, CS_non_default)
assert np.allclose(new_CS_batch, CS_batch)
assert new_ndCS_batch.shape != CS_batch.shape
assert new_ndCS_batch.dtype == 'float64'
assert new_CS_batch.dtype == 'float32'
ndCS_batch = np.arange(270).reshape(3, 10, 3, 3)
new_CS_batch = CS_non_default.np_format_as(ndCS_batch, CS)
new_ndCS_batch = CS_non_default.undo_np_format_as(new_CS_batch, CS)
assert np.allclose(new_ndCS_batch, ndCS_batch)
assert new_CS_batch.shape != ndCS_batch.shape
assert new_ndCS_batch.dtype == 'float64'
assert new_CS_batch.dtype == 'float32'
# Start in VectorSpace
VS_batch = np.arange(270).reshape(10, 27)
new_CS_batch = VS.np_format_as(VS_batch, CS)
new_VS_batch = VS.undo_np_format_as(new_CS_batch, CS)
assert np.allclose(new_VS_batch, VS_batch)
# Non-default axes
new_CS_batch = VS.np_format_as(VS_batch, CS_non_default)
new_VS_batch = VS.undo_np_format_as(new_CS_batch, CS_non_default)
assert np.allclose(new_VS_batch, VS_batch)
# Start in Conv2D with default axes
CS_batch = np.arange(270).reshape(10, 3, 3, 3)
new_VS_batch = CS.np_format_as(CS_batch, VS)
new_CS_batch = CS.undo_np_format_as(new_VS_batch, VS)
assert np.allclose(new_CS_batch, CS_batch)
# Non-default axes
CS_batch = np.arange(270).reshape(3, 10, 3, 3)
new_VS_batch = CS_non_default.np_format_as(CS_batch, VS)
new_CS_batch = CS_non_default.undo_np_format_as(new_VS_batch, VS)
assert np.allclose(new_CS_batch, CS_batch)
# Composite Space to VectorSpace
VS = VectorSpace(dim=27)
CS = Conv2DSpace(shape=[2, 2], num_channels=3, axes=('b', 0, 1, 'c'))
CompS = CompositeSpace((CompositeSpace((VS, VS)), CS))
VS_large = VectorSpace(dim=(2*27+12))
VS_batch = np.arange(270).reshape(10, 27)
VS_batch2 = 2*np.arange(270).reshape(10, 27)
CS_batch = 3*np.arange(120).reshape(10, 2, 2, 3)
CompS_batch = ((VS_batch, VS_batch2), CS_batch)
new_VS_batch = CompS.np_format_as(CompS_batch, VS_large)
new_CompS_batch = CompS.undo_np_format_as(new_VS_batch, VS_large)
assert_components(CompS_batch, new_CompS_batch)
# VectorSpace to Composite Space
CompS = CompositeSpace((CompositeSpace((VS, VS)), CS))
VS_batch = np.arange((2*27+12)*10).reshape(10, 2*27+12)
new_CompS_batch = VS_large.np_format_as(VS_batch, CompS)
new_VS_batch = VS_large.undo_np_format_as(new_CompS_batch, CompS)
assert np.allclose(VS_batch, new_VS_batch)
# Reorder CompositeSpace
CompS = CompositeSpace((VS, CompositeSpace((VS, CS))))
VS_batch = np.arange((2*27+12)*10).reshape(10, 2*27+12)
new_CompS_batch = VS_large.np_format_as(VS_batch, CompS)
new_VS_batch = VS_large.undo_np_format_as(new_CompS_batch, CompS)
assert np.allclose(VS_batch, new_VS_batch)
# Reorder CompositeSpace
CompS = CompositeSpace((CompositeSpace((CompositeSpace((VS,)), CS)), VS))
VS_batch = np.arange((2*27+12)*10).reshape(10, 2*27+12)
new_CompS_batch = VS_large.np_format_as(VS_batch, CompS)
new_VS_batch = VS_large.undo_np_format_as(new_CompS_batch, CompS)
assert np.allclose(VS_batch, new_VS_batch)
# CompositeSpace to CompositeSpace
VS = VectorSpace(dim=27)
CS = Conv2DSpace(shape=[3, 3], num_channels=3, axes=('b', 0, 1, 'c'))
VS_batch = np.arange(270).reshape(10, 27)
VS_batch2 = 2*np.arange(270).reshape(10, 27)
VS_batch3 = 3*np.arange(270).reshape(10, 27)
CompS_VS = CompositeSpace((CompositeSpace((VS, VS)), VS))
CompS_CS = CompositeSpace((CompositeSpace((CS, CS)), CS))
CompS_VS_batch = ((VS_batch, VS_batch2), VS_batch3)
new_CompS_CS_batch = CompS_VS.np_format_as(CompS_VS_batch, CompS_CS)
new_CompS_VS_batch = CompS_VS.undo_np_format_as(new_CompS_CS_batch,
CompS_CS)
assert_components(CompS_VS_batch, new_CompS_VS_batch)
| bsd-3-clause |
k0001/mediasancion | mediasancion/core/migrations/0001_initial.py | 1 | 15801 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Distrito'
db.create_table('core_distrito', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('remote_source', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('remote_url', self.gf('django.db.models.fields.URLField')(max_length=1023, blank=True)),
('remote_id', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('origin', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('uuid', self.gf('django.db.models.fields.CharField')(db_index=True, unique=True, max_length=36, blank=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=128)),
('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, max_length=50, blank=True)),
))
db.send_create_signal('core', ['Distrito'])
# Adding model 'Partido'
db.create_table('core_partido', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('remote_source', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('remote_url', self.gf('django.db.models.fields.URLField')(max_length=1023, blank=True)),
('remote_id', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('origin', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('uuid', self.gf('django.db.models.fields.CharField')(db_index=True, unique=True, max_length=36, blank=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, max_length=50, blank=True)),
))
db.send_create_signal('core', ['Partido'])
# Adding model 'Bloque'
db.create_table('core_bloque', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('remote_source', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('remote_url', self.gf('django.db.models.fields.URLField')(max_length=1023, blank=True)),
('remote_id', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('origin', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('uuid', self.gf('django.db.models.fields.CharField')(db_index=True, unique=True, max_length=36, blank=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=255)),
('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, max_length=50, blank=True)),
))
db.send_create_signal('core', ['Bloque'])
# Adding model 'Persona'
db.create_table('core_persona', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, blank=True)),
('remote_source', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('remote_url', self.gf('django.db.models.fields.URLField')(max_length=1023, blank=True)),
('remote_id', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('origin', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)),
('creator', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True)),
('uuid', self.gf('django.db.models.fields.CharField')(db_index=True, unique=True, max_length=36, blank=True)),
('slug', self.gf('django.db.models.fields.SlugField')(db_index=True, max_length=50, blank=True)),
('nombre', self.gf('django.db.models.fields.CharField')(max_length=128)),
('apellido', self.gf('django.db.models.fields.CharField')(max_length=128)),
('documento_tipo', self.gf('django.db.models.fields.CharField')(max_length=1, null=True, blank=True)),
('documento_numero', self.gf('django.db.models.fields.CharField')(max_length=63, null=True, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)),
('telefono', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)),
('website', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('foto', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal('core', ['Persona'])
def backwards(self, orm):
# Deleting model 'Distrito'
db.delete_table('core_distrito')
# Deleting model 'Partido'
db.delete_table('core_partido')
# Deleting model 'Bloque'
db.delete_table('core_bloque')
# Deleting model 'Persona'
db.delete_table('core_persona')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.bloque': {
'Meta': {'object_name': 'Bloque'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'origin': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_url': ('django.db.models.fields.URLField', [], {'max_length': '1023', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
'core.distrito': {
'Meta': {'object_name': 'Distrito'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'origin': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_url': ('django.db.models.fields.URLField', [], {'max_length': '1023', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
'core.partido': {
'Meta': {'object_name': 'Partido'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'origin': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_url': ('django.db.models.fields.URLField', [], {'max_length': '1023', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'})
},
'core.persona': {
'Meta': {'object_name': 'Persona'},
'apellido': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'documento_numero': ('django.db.models.fields.CharField', [], {'max_length': '63', 'null': 'True', 'blank': 'True'}),
'documento_tipo': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'foto': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'origin': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_source': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'remote_url': ('django.db.models.fields.URLField', [], {'max_length': '1023', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'telefono': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '36', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['core']
| agpl-3.0 |
py4n6/pyflag | utilities/raid_test.py | 7 | 1982 | # This is a good example of how the sk.c stuff can be integrated into
# the raid stuff to be able to verify the image without unpacking the
# whole thing.
import mapper
import optparse,sys
import sk
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-p','--period',default=6, type='int',
help = "periodicity of the map")
parser.add_option('-m','--map',default=None,
help = "The Map file itself")
parser.add_option('-s','--skip',default='0',
help = "length of data to skip in each disk")
parser.add_option('-n','--number',default=6, type='int',
help = "Number of disks")
parser.add_option('-b','--blocksize',default="512",
help = "block size")
parser.add_option('-P','--print_map',default=False, action='store_true',
help = "print the map")
parser.add_option('-o','--output', default="output.dd",
help = "Name of the output file")
parser.add_option("-S", "--subsys",
default=None,
help="Subsystem to use (e.g. EWF)")
(options, args) = parser.parse_args()
raid_map = mapper.load_map_file(options.map, options.period)
if options.print_map:
mapper.pretty_print(raid_map, options.period, options.number)
print mapper.calculate_map(raid_map, options.period, options.number)
sys.exit(0)
blocksize = mapper.parse_offsets(options.blocksize)
fds=[]
for arg in args:
if arg != "None":
fds.append(mapper.open_image(arg, options.subsys))
else:
fds.append(mapper.ParityDisk([mapper.open_image(arg) for arg in args if arg != 'None']))
fd = mapper.RaidReassembler(raid_map, fds, blocksize, skip=mapper.parse_offsets(options.skip))
skfs = sk.skfs(fd, imgoff = 128 * 1024 + 512 * 63)
print skfs.listdir("/")
| gpl-2.0 |
muravjov/ansible | v2/ansible/plugins/action/set_fact.py | 15 | 1402 | # Copyright 2013 Dag Wieers <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.errors import AnsibleError
from ansible.plugins.action import ActionBase
from ansible.template import Templar
from ansible.utils.boolean import boolean
class ActionModule(ActionBase):
TRANSFERS_FILES = False
def run(self, tmp=None, task_vars=dict()):
templar = Templar(loader=self._loader, variables=task_vars)
facts = dict()
if self._task.args:
for (k, v) in self._task.args.iteritems():
k = templar.template(k)
if isinstance(v, basestring) and v.lower() in ('true', 'false', 'yes', 'no'):
v = boolean(v)
facts[k] = v
return dict(changed=True, ansible_facts=facts)
| gpl-3.0 |
efiring/UTide | utide/utilities.py | 1 | 8730 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
import numpy as np
from scipy.io import loadmat
# This module is an excerpt from the one in python-gsw.
# Based on Robert Kern's Bunch; taken from
# http://currents.soest.hawaii.edu/hgstage/pycurrents/
# pycurrents/system/utilities.py
class Bunch(dict):
"""
A dictionary that also provides access via attributes.
Additional methods update_values and update_None provide
control over whether new keys are added to the dictionary
when updating, and whether an attempt to add a new key is
ignored or raises a KeyError.
The Bunch also prints differently than a normal
dictionary, using str() instead of repr() for its
keys and values, and in key-sorted order. The printing
format can be customized by subclassing with a different
str_ftm class attribute. Do not assign directly to this
class attribute, because that would substitute an instance
attribute which would then become part of the Bunch, and
would be reported as such by the keys() method.
To output a string representation with
a particular format, without subclassing, use the
formatted() method.
"""
str_fmt = "{0!s:<{klen}} : {1!s:>{vlen}}\n"
def __init__(self, *args, **kwargs):
"""
*args* can be dictionaries, bunches, or sequences of
key,value tuples. *kwargs* can be used to initialize
or add key, value pairs.
"""
dict.__init__(self)
self.__dict__ = self
for arg in args:
self.update(arg)
self.update(kwargs)
def __str__(self):
return self.formatted()
def formatted(self, fmt=None, types=False):
"""
Return a string with keys and/or values or types.
*fmt* is a format string as used in the str.format() method.
The str.format() method is called with key, value as positional
arguments, and klen, vlen as kwargs. The latter are the maxima
of the string lengths for the keys and values, respectively,
up to respective maxima of 20 and 40.
"""
if fmt is None:
fmt = self.str_fmt
items = list(self.items())
items.sort()
klens = []
vlens = []
for i, (k, v) in enumerate(items):
lenk = len(str(k))
if types:
v = type(v).__name__
lenv = len(str(v))
items[i] = (k, v)
klens.append(lenk)
vlens.append(lenv)
klen = min(20, max(klens))
vlen = min(40, max(vlens))
slist = [fmt.format(key, value, klen=klen, vlen=vlen) for
key, value in items]
return ''.join(slist)
def from_pyfile(self, filename):
"""
Read in variables from a python code file.
"""
# We can't simply exec the code directly, because in
# Python 3 the scoping for list comprehensions would
# lead to a NameError. Wrapping the code in a function
# fixes this.
d = dict()
lines = ["def _temp_func():\n"]
with open(filename) as f:
lines.extend([" " + line for line in f])
lines.extend(["\n return(locals())\n",
"_temp_out = _temp_func()\n",
"del(_temp_func)\n"])
codetext = "".join(lines)
code = compile(codetext, filename, 'exec')
exec(code, globals(), d)
self.update(d["_temp_out"])
return self
def update_values(self, *args, **kw):
"""
arguments are dictionary-like; if present, they act as
additional sources of kwargs, with the actual kwargs
taking precedence.
One reserved optional kwarg is "strict". If present and
True, then any attempt to update with keys that are not
already in the Bunch instance will raise a KeyError.
"""
strict = kw.pop("strict", False)
newkw = dict()
for d in args:
newkw.update(d)
newkw.update(kw)
self._check_strict(strict, newkw)
dsub = dict([(k, v) for (k, v) in newkw.items() if k in self])
self.update(dsub)
def update_None(self, *args, **kw):
"""
Similar to update_values, except that an existing value
will be updated only if it is None.
"""
strict = kw.pop("strict", False)
newkw = dict()
for d in args:
newkw.update(d)
newkw.update(kw)
self._check_strict(strict, newkw)
dsub = dict([(k, v) for (k, v) in newkw.items()
if k in self and self[k] is None])
self.update(dsub)
def _check_strict(self, strict, kw):
if strict:
bad = set(kw.keys()) - set(self.keys())
if bad:
bk = list(bad)
bk.sort()
ek = list(self.keys())
ek.sort()
raise KeyError(
"Update keys %s don't match existing keys %s" % (bk, ek))
# The following functions ending with loadbunch() and showmatbunch()
# are taken from the repo
# http://currents.soest.hawaii.edu/hgstage/pycurrents/,
# pycurrents/file/matfile.py.
def _crunch(arr, masked=True):
"""
Handle all arrays that are not Matlab structures.
"""
if arr.size == 1:
arr = arr.item() # Returns the contents.
return arr
# The following squeeze is discarding some information;
# we might want to make it optional.
arr = arr.squeeze()
if masked and arr.dtype.kind == 'f': # Check for complex also.
arrm = np.ma.masked_invalid(arr)
if arrm.count() < arrm.size:
arr = arrm
else:
arr = np.array(arr) # Copy to force a read.
else:
arr = np.array(arr)
return arr
def _structured_to_bunch(arr, masked=True):
"""
Recursively move through the structure tree, creating
a Bunch for each structure. When a non-structure is
encountered, process it with crunch().
"""
# A single "void" object comes from a Matlab structure.
# Each Matlab structure field corresponds to a field in
# a numpy structured dtype.
if arr.dtype.kind == 'V' and arr.shape == (1, 1):
b = Bunch()
x = arr[0, 0]
for name in x.dtype.names:
b[name] = _structured_to_bunch(x[name], masked=masked)
return b
return _crunch(arr, masked=masked)
def _showmatbunch(b, elements=None, origin=None):
if elements is None:
elements = []
if origin is None:
origin = ''
items = list(b.items())
for k, v in items:
_origin = "%s.%s" % (origin, k)
if isinstance(v, Bunch):
_showmatbunch(v, elements, _origin)
else:
if isinstance(v, str):
slen = len(v)
if slen < 50:
entry = v
else:
entry = 'string, %d characters' % slen
elif isinstance(v, np.ndarray):
if np.ma.isMA(v):
entry = 'masked array, shape %s, dtype %s' % (v.shape, v.dtype)
else:
entry = 'ndarray, shape %s, dtype %s' % (v.shape, v.dtype)
else:
entry = '%s %s' % (type(v).__name__, v)
elements.append((_origin, entry))
elements.sort()
return elements
def showmatbunch(b):
"""
Show the contents of a matfile as it has been, or would be, loaded
by loadbunch.
*b* can be either the name of a matfile or the output of loadbunch.
Returns a multi-line string suitable for printing.
"""
if isinstance(b, str):
b = loadbunch(b)
elist = _showmatbunch(b)
names = [n for n, v in elist]
namelen = min(40, max([len(n) for n in names]))
str_fmt = "{0!s:<{namelen}} : {1!s}\n"
strlist = [str_fmt.format(n[1:], v, namelen=namelen) for (n, v) in elist]
return ''.join(strlist)
def loadbunch(fname, masked=True):
"""
Wrapper for loadmat that dereferences (1,1) object arrays,
converts floating point arrays to masked arrays, and uses
nested Bunch objects in place of the matlab structures.
"""
out = Bunch()
if fname.endswith('.mat'):
with open(fname, 'rb') as fobj:
xx = loadmat(fobj)
elif fname.endswith('.npz'):
xx = np.load(fname, encoding='latin1')
else:
raise ValueError('Unrecognized file {}'.format(fname))
keys = [k for k in xx.keys() if not k.startswith("__")]
for k in keys:
out[k] = _structured_to_bunch(xx[k], masked=masked)
return out
| mit |
DrkVenom/roots | roots.py | 1 | 9713 | #Name: Tony Ranieri
#Created: October 2014
#Modified: August 2015
import numpy as np
import pylab as py
import matplotlib.pyplot as plt
def roots(f,df,a,b,niter,epsilon):
# Input
# f: the function that we need to find roots for
# df: derivative of the function f
# a: initial left bracket x-coord
# b: initial right bracket x-coord
# niter: max number of iterations
# epsilon: tolerance for the stopping rule
#
# Output
# xstar: the root of f for given tolerance epsilon
# perform bisect
fa=f(a) #define y-coord at a based on the given f
fb=f(b) #define y-coord at b based on the given f
if (fa*fb>0): #test to see if there is a single root in the bracket
print "There are either no roots in this bracket, or an even number of them. Please refine your bracket."
return 1
for i in range(niter):
xstar=(a+b)/2 #define xstar as the midpoint of the current bracket
fxstar=f(xstar) #set the value of our function at this new midpoint
err=abs(b-a)
if (fa*fxstar<0): #test to see if root is in [fa,fxstar]
b=xstar #if yes, set our upper bound to now be xstar
fb=fxstar #update the guess and iterate
elif (fb*fxstar<0): #test to see if root is in [fxstar,fb]
a=xstar #if yes, set our lower bound to now be xstar
fa=fxstar #update the guess and iterate
else:
a=xstar
b=xstar
print "Check the results carefully! One of your endpoints may be a root or 0 might be a root."
if (err<epsilon): #test to see if our proposed root is "close enough" based on our tolerance level
break #if it is, we're done here
xstar_b=xstar
fxstar_b=f(xstar_b)
# perform Newton
x0=(a+b)/2 #need an initial guess, midpoint seems decent enough
fx0=f(x0) #define y-coord at x0 based on the given f
for i in range(niter):
dfx0=df(x0) #define derivative y-coord at x0 based on the given df
if (dfx0==0):
break
xstar=x0-fx0/dfx0 #set xstar as defined by Newton's method
err=abs(xstar-x0)
fxstar=f(xstar)
if (err<epsilon): #test to see if our proposed root is "close enough" based on our tolerance level to the error
x0=xstar #update the initial guess and iterate
fx0=fxstar
if (i==niter):
break
xstar_n=xstar
fxstar_n=f(xstar_n)
# perform Secant
fa=f(a) #define y-coord at a based on the given f
fb=f(b) #define y-coord at b based on the given f
for i in range(niter):
if (fb==fa):
break
xstar=b-((fb*(b-a))/(fb-fa)) #set xstar as defined by secant method
err=abs(f(xstar))
fxstar=f(xstar)
if (err<epsilon): #test to see if our proposed root is "close enough" based on our tolerance level to the error
break
a=b #update the initial guess and iterate
b=xstar #update the initial guess and iterate
fa=fb
fb=fxstar
if (i==niter) or (fb==fa):
break
xstar_s=xstar
fxstar_s=f(xstar_s)
#find best estimate for root by testing proximity to zero
if (abs(fxstar_b-0)<=abs(fxstar_n-0)):
if (abs(fxstar_b-0)==abs(fxstar_n-0)):
xstar=xstar_b
print "Bisect method and Newton method came to the same conclusion."
else:
if (abs(fxstar_b-0)<=abs(fxstar_s-0)):
if (abs(fxstar_b-0)==abs(fxstar_s-0)):
xstar=xstar_b
print "Bisect method and Secant method came to the same conclusion."
else:
xstar=xstar_b
print "Bisect method is superior."
else:
xstar=xstar_s
print "Secant method is superior."
else:
if (abs(fxstar_n-0)<=abs(fxstar_s-0)):
if (abs(fxstar_n-0)==abs(fxstar_s-0)):
xstar=xstar_n
print "Newton method and Secant method came to the same conclusion."
else:
xstar=xstar_n
print "Newton method is superior."
else:
xstar=xstar_s
print "Secant method is superior."
#plot function with identified root
#x=np.linspace(a, b, 200)
#plt.plot(x, f(x))
#plt.xlim(a-1, b+1)
#plt.xticks(np.linspace(a, b, 10, endpoint=True))
#plt.xlim(x.min()*1.1,x.max() * 1.1)
#plt.ylim(-5, 5)
#ax = plt.gca()
#ax.axes.get_yaxis().set_visible(False)
#ax.spines['right'].set_color('none')
#ax.spines['top'].set_color('none')
#ax.spines['left'].set_color('none')
#ax.xaxis.set_ticks_position('bottom')
# ax.spines['bottom'].set_position(('data',0))
#plt.show()
print "output = (value, bisect, newton, secant)"
return xstar, xstar_b, xstar_n, xstar_s
def bisect(f,a,b,niter,epsilon):
# Input
# f: the function that we need to find roots for
# a: initial left bracket x-coord
# b: initial right bracket x-coord
# niter: max number of iterations
# epsilon: tolerance for the stopping rule
#
# Output
# xstar: the root of f for given tolerance epsilon
# err: error at convergence
# fxstar: the value of f at xstar (should be very close to zero as we are expecting a root)
# i: the number of iterations taken to get to the tolerance
# xseq: the values of {x_n} to see convergence
fa=f(a) #define y-coord at a based on the given f
fb=f(b) #define y-coord at b based on the given f
xseq=np.zeros(niter)
if (fa*fb>0): #test to see if there is a single root in the bracket
print "There are either no roots in this bracket, or an even number of them. Please refine your bracket."
return 1
for i in range(niter):
xstar=(a+b)/2 #define xstar as the midpoint of the current bracket
xseq[i]=xstar #add the value of xstar to this convergent sequence
fxstar=f(xstar) #set the value of our function at this new midpoint
err=abs(b-a)
if (fa*fxstar<0): #test to see if root is in [fa,fxstar]
b=xstar #if yes, set our upper bound to now be xstar
fb=fxstar #update the guess and iterate
elif (fb*fxstar<0): #test to see if root is in [fxstar,fb]
a=xstar #if yes, set our lower bound to now be xstar
fa=fxstar #update the guess and iterate
else:
a=xstar
b=xstar
print "Check the results carefully! One of your endpoints may be a root."
if (err<epsilon): #test to see if our proposed root is "close enough" based on our tolerance level
break #if it is, we're done here
xstar=(a+b)/2
fxstar=f(xstar)
return xstar, err, fxstar, i+1, xseq[0:i]
def newton(f,df,x0,niter,epsilon):
# Input
# f: the function that we need to find roots for
# df: the derivative of the function f
# x0: initial guess for a root
# niter: max number of iterations
# epsilon: tolerance for the stopping rule
#
# Output
# xstar: the root of f for given tolerance epsilon
# err: error at convergence
# fxstar: the value of f at xstar (should be very close to zero as we are expecting a root)
# i: the number of iterations taken to get to the tolerance
# xseq: the values of {x_n} to see convergence
fx0=f(x0) #define y-coord at x0 based on the given f
xseq=np.zeros(niter+1) #need +1 as we already know the first entry is x0
xseq[0]=x0
for i in range(niter):
dfx0=df(x0) #define derivative y-coord at x0 based on the given df
xstar=x0-fx0/dfx0 #set xstar as defined by Newton's method
xseq[i+1]=xstar
err=abs(xstar-x0)
fxstar=f(xstar)
if (err<epsilon): #test to see if our proposed root is "close enough" based on our tolerance level to the error
break
x0=xstar #update the initial guess and iterate
fx0=fxstar
if (i==niter):
print "Newton's method failed to converge given the number of iterations."
break
return xstar, err, fxstar, i+1, xseq[0:(i+2)]
def secant(f,a,b,niter,epsilon):
# Input
# f: the function of interest
# a: initial left bracket x-coord
# b: initial right bracket x-coord
# niter: max number of iterations
# epsilon: tolerance for the stopping rule
#
# Output
# xstar: the root of f for given tolerance epsilon
# err: error at convergence
# fxstar: the value of f at xstar (should be very close to zero as we are expecting a root)
# i: the number of iterations taken to get to the tolerance
# xseq: the values of {x_n} to see convergence
fa=f(a) #define y-coord at a based on the given f
fb=f(b) #define y-coord at b based on the given f
xseq=np.zeros(niter+1) #need +1 as we already know the first entry is x0
xseq[0]=a
xseq[1]=b
for i in range(niter):
xstar=b-((fb*(b-a))/(fb-fa)) #set xstar as defined by secant method
xseq[i+2]=xstar #+2 as we alreqady defined the first 2
err=abs(f(xstar))
fxstar=f(xstar)
if (err<epsilon): #test to see if our proposed root is "close enough" based on our tolerance level to the error
break
a=b #update the initial guess and iterate
b=xstar #update the initial guess and iterate
fa=fb
fb=fxstar
if (i==niter):
print "Secant's method failed to converge given the number of iterations."
break
return xstar, err, fxstar, i+1, xseq[0:(i+2)]
| gpl-2.0 |
happyemi/friendlypi | serverpi/src/plugins/test.py | 1 | 1122 | # Copyright 2013-2014 Emiliano Mennucci
#
# This file is part of FriendlyPi.
#
# FriendlyPi is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# FriendlyPi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with FriendlyPi. If not, see <http://www.gnu.org/licenses/>.
class TestMod:
def __init__(self, config):
self.value = 0
def get_status(self):
actions = [{"id": "inc", "label": "Increment"}, {"id": "dec", "label": "Decrement"}]
return {"caption": "Test module", "status": str(self.value), "actions": actions }
def exec_command(self, command):
if command == "inc":
self.value += 1
else:
self.value -= 1;
| gpl-3.0 |
FranzSchubert92/cw | python/game_of_stones.py | 1 | 2265 | """
Two players (numbered 1 and 2) are playing a game with n stones. Player 1
always plays first, and the two players move in alternating turns. The game's
rules are as follows:
In a single move, a player can remove 2, 3, or 5 stones from the game board.
If a player is unable to make a move, that player loses the game.
Given the number of stones, find and print the name of the winner on a new line.
Each player plays optimally, meaning they will not make a move that causes them
to lose the game if some better, winning move exists.
Input Format
The first line contains an integer, T, denoting the number of test cases.
Each of the subsequent lines contains a single integer n denoting the number
of stones in a test case.
Output Format
On a new line for each test case, print 'First' if the first player wins;
otherwise, print 'Second'.
# doctests
>>> play(1)
'Second'
>>> play(2)
'First'
>>> play(3)
'First'
>>> play(4)
'First'
>>> play(5)
'First'
>>> play(6)
'First'
>>> play(7)
'Second'
>>> play(8)
'Second'
>>> play(10)
'First'
Explanation
In the sample, we have 8 testcases.
We'll refer to our two players as and .
If , can't make any moves and loses the game (i.e., the wins and we print on
a new line).
If , removes stones in their first move and wins the game, so we print on a
new line.
If , removes stones in their first move, leaving stone on the board. Because
is left with no available moves, wins and we print on a new line.
If , removes stones in their first move, leaving stone on the board. Because
has no available moves,
"""
def play(stones):
# "moves" is a map from number of stones to whether player1 will win;
# player1 always goes first;
moves = {0:False, 1:False, 2:True, 3:True, 4:True, 5:True, 6:True, 7:False}
x = max(moves.keys())
while x < stones:
x += 1
if moves[x-2] == moves[x-3] == moves[x-5]:
moves[x] = not moves[x-2]
elif not moves[x-5] or not moves[x-3] or not moves[x-2]:
moves[x] = True
return "First" if moves[stones] else "Second"
if __name__ == "__main__":
import doctest
doctest.testmod()
T = int(input())
while T:
num_stones = int(input())
print(play(num_stones))
T -= 1
| bsd-3-clause |
adlius/osf.io | website/project/views/node.py | 1 | 51221 | # -*- coding: utf-8 -*-
import os
import logging
from rest_framework import status as http_status
import math
from collections import defaultdict
from flask import request
from django.apps import apps
from django.utils import timezone
from django.core.exceptions import ValidationError
from django.db.models import Q, OuterRef, Subquery
from framework import status
from framework.utils import iso8601format
from framework.flask import redirect # VOL-aware redirect
from framework.auth.decorators import must_be_logged_in, collect_auth
from website.ember_osf_web.decorators import ember_flag_is_active
from api.waffle.utils import flag_is_active, storage_i18n_flag_active, storage_usage_flag_active
from framework.exceptions import HTTPError
from osf.models.nodelog import NodeLog
from osf.utils.functional import rapply
from osf.utils.registrations import strip_registered_meta_comments
from osf.utils import sanitize
from osf import features
from website import language
from website.util import rubeus
from website.ember_osf_web.views import use_ember_app
from osf.exceptions import NodeStateError
from website.project import new_node, new_private_link
from website.project.decorators import (
must_be_contributor_or_public_but_not_anonymized,
must_be_contributor_or_public,
must_be_valid_project,
must_have_permission,
must_not_be_registration,
must_not_be_retracted_registration,
)
from osf.utils.tokens import process_token_or_pass
from website.util.rubeus import collect_addon_js
from website.project.model import has_anonymous_link, NodeUpdateError, validate_title
from website.project.forms import NewNodeForm
from website.project.utils import sizeof_fmt
from website.project.metadata.utils import serialize_meta_schemas
from addons.wiki.models import WikiPage
from osf.models import AbstractNode, Collection, Contributor, Guid, PrivateLink, Node, NodeRelation, Preprint
from osf.models.licenses import serialize_node_license_record
from osf.utils.sanitize import strip_html
from osf.utils.permissions import ADMIN, READ, WRITE, CREATOR_PERMISSIONS, ADMIN_NODE
from website import settings
from website.views import find_bookmark_collection, validate_page_num
from website.views import serialize_node_summary, get_storage_region_list
from website.profile import utils
from addons.mendeley.provider import MendeleyCitationsProvider
from addons.zotero.provider import ZoteroCitationsProvider
from addons.wiki.utils import serialize_wiki_widget
from addons.wiki.models import WikiVersion
from addons.dataverse.utils import serialize_dataverse_widget
from addons.forward.utils import serialize_forward_widget
r_strip_html = lambda collection: rapply(collection, strip_html)
logger = logging.getLogger(__name__)
@must_be_valid_project
@must_have_permission(WRITE)
def edit_node(auth, node, **kwargs):
post_data = request.json
edited_field = post_data.get('name')
value = post_data.get('value', '')
new_val = None
if edited_field == 'title':
try:
node.set_title(value, auth=auth)
except ValidationError as e:
raise HTTPError(
http_status.HTTP_400_BAD_REQUEST,
data=dict(message_long=str(e))
)
new_val = node.title
elif edited_field == 'description':
node.set_description(value, auth=auth)
new_val = node.description
elif edited_field == 'category':
node.category = new_val = value
try:
node.save()
except ValidationError as e:
raise HTTPError(
http_status.HTTP_400_BAD_REQUEST,
data=dict(message_long=str(e))
)
return {
'status': 'success',
'newValue': new_val # Used by x-editable widget to reflect changes made by sanitizer
}
##############################################################################
# New Project
##############################################################################
@must_be_logged_in
def project_new(**kwargs):
return {}
@must_be_logged_in
def project_new_post(auth, **kwargs):
user = auth.user
data = request.get_json()
title = strip_html(data.get('title'))
title = title.strip()
category = data.get('category', 'project')
template = data.get('template')
description = strip_html(data.get('description'))
campaign = data.get('campaign', None)
new_project = {}
if template:
original_node = AbstractNode.load(template)
changes = {
'title': title,
'category': category,
'template_node': original_node,
}
if description:
changes['description'] = description
project = original_node.use_as_template(
auth=auth,
changes={
template: changes,
}
)
else:
try:
project = new_node(category, title, user, description, campaign=campaign)
except ValidationError as e:
raise HTTPError(
http_status.HTTP_400_BAD_REQUEST,
data=dict(message_long=str(e))
)
new_project = _view_project(project, auth)
return {
'projectUrl': project.url,
'newNode': new_project['node'] if new_project else None
}, http_status.HTTP_201_CREATED
@must_be_logged_in
@must_be_valid_project
def project_new_from_template(auth, node, **kwargs):
new_node = node.use_as_template(
auth=auth,
changes=dict(),
)
return {'url': new_node.url}, http_status.HTTP_201_CREATED, None
##############################################################################
# New Node
##############################################################################
@must_be_valid_project
@must_have_permission(WRITE)
@must_not_be_registration
def project_new_node(auth, node, **kwargs):
"""
There's an APIv2 endpoint that does this same thing!
If you make changes here, see if they need to be made there.
"""
form = NewNodeForm(request.form)
user = auth.user
if form.validate():
try:
new_component = new_node(
title=strip_html(form.title.data),
user=user,
category=form.category.data,
parent=node,
)
except ValidationError as e:
raise HTTPError(
http_status.HTTP_400_BAD_REQUEST,
data=dict(message_long=str(e))
)
redirect_url = node.url
message = (
'Your component was created successfully. You can keep working on the project page below, '
'or go to the new <u><a href={component_url}>component</a></u>.'
).format(component_url=new_component.url)
if form.inherit_contributors.data and node.has_permission(user, WRITE):
for contributor in node.contributors:
# Using permission property off of Contributor model to get contributor permissions - not group member perms
perm = CREATOR_PERMISSIONS if contributor._id == user._id else Contributor.objects.get(user_id=contributor.id, node_id=node.id).permission
if contributor._id == user._id and not contributor.is_registered:
new_component.add_unregistered_contributor(
fullname=contributor.fullname, email=contributor.email,
permissions=perm, auth=auth, existing_user=contributor
)
else:
new_component.add_contributor(contributor, permissions=perm, auth=auth)
for group in node.osf_groups:
if group.is_manager(user):
new_component.add_osf_group(group, group.get_permission_to_node(node), auth=auth)
new_component.save()
redirect_url = new_component.url + 'contributors/'
message = (
'Your component was created successfully. You can edit the contributor permissions below, '
'work on your <u><a href={component_url}>component</a></u> or return to the <u> '
'<a href="{project_url}">project page</a></u>.'
).format(component_url=new_component.url, project_url=node.url)
status.push_status_message(message, kind='info', trust=True)
return {
'status': 'success',
}, 201, None, redirect_url
else:
# TODO: This function doesn't seem to exist anymore?
status.push_errors_to_status(form.errors)
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, redirect_url=node.url)
@must_be_logged_in
@must_be_valid_project
def project_before_fork(auth, node, **kwargs):
user = auth.user
prompts = node.callback('before_fork', user=user)
if node.has_pointers_recursive:
prompts.append(
language.BEFORE_FORK_HAS_POINTERS.format(
category=node.project_or_component
)
)
return {'prompts': prompts}
@must_be_logged_in
@must_be_valid_project
def project_before_template(auth, node, **kwargs):
prompts = []
for addon in node.get_addons():
if 'node' in addon.config.configs:
if addon.to_json(auth.user)['addon_full_name']:
prompts.append(addon.to_json(auth.user)['addon_full_name'])
return {'prompts': prompts, 'isRegistration': node.is_registration}
@must_be_valid_project
@must_be_contributor_or_public_but_not_anonymized
@must_not_be_registration
def node_registrations(auth, node, **kwargs):
if request.path.startswith('/project/'):
return redirect('/{}/registrations/'.format(node._id))
return use_ember_app()
@must_be_valid_project
@must_be_contributor_or_public_but_not_anonymized
@must_not_be_retracted_registration
def node_forks(auth, node, **kwargs):
if request.path.startswith('/project/'):
return redirect('/' + node._id + '/forks/')
return use_ember_app()
@must_be_valid_project
@must_not_be_retracted_registration
@must_be_logged_in
@must_have_permission(READ)
@ember_flag_is_active(features.EMBER_PROJECT_SETTINGS)
def node_setting(auth, node, **kwargs):
if node.is_registration and flag_is_active(request, features.EMBER_REGISTRIES_DETAIL_PAGE):
# Registration settings page obviated during redesign
return redirect(node.url)
auth.user.update_affiliated_institutions_by_email_domain()
auth.user.save()
ret = _view_project(node, auth, primary=True)
ret['include_wiki_settings'] = WikiPage.objects.include_wiki_settings(node)
ret['wiki_enabled'] = 'wiki' in node.get_addon_names()
ret['comments'] = {
'level': node.comment_level,
}
addon_settings = {}
for addon in ['forward']:
addon_config = apps.get_app_config('addons_{}'.format(addon))
config = addon_config.to_json()
config['template_lookup'] = addon_config.template_lookup
config['addon_icon_url'] = addon_config.icon_url
config['node_settings_template'] = os.path.basename(addon_config.node_settings_template)
addon_settings[addon] = config
ret['addon_settings'] = addon_settings
ret['categories'] = settings.NODE_CATEGORY_MAP
ret['categories'].update({
'project': 'Project'
})
return ret
@must_be_valid_project
@must_not_be_registration
@must_be_logged_in
@must_have_permission(WRITE)
def node_addons(auth, node, **kwargs):
ret = _view_project(node, auth, primary=True)
addon_settings = serialize_addons(node, auth)
ret['addon_capabilities'] = settings.ADDON_CAPABILITIES
# If an addon is default you cannot connect/disconnect so we don't have to load it.
ret['addon_settings'] = [addon for addon in addon_settings]
# Addons can have multiple categories, but we only want a set of unique ones being used.
ret['addon_categories'] = set([item for addon in addon_settings for item in addon['categories']])
# The page only needs to load enabled addons and it refreshes when a new addon is being enabled.
ret['addon_js'] = collect_node_config_js([addon for addon in addon_settings if addon['enabled']])
return ret
def serialize_addons(node, auth):
addon_settings = []
addons_available = [addon for addon in settings.ADDONS_AVAILABLE
if addon not in settings.SYSTEM_ADDED_ADDONS['node']
and addon.short_name not in ('wiki', 'forward', 'twofactor')]
for addon in addons_available:
addon_config = apps.get_app_config('addons_{}'.format(addon.short_name))
config = addon_config.to_json()
config['template_lookup'] = addon_config.template_lookup
config['addon_icon_url'] = addon_config.icon_url
config['node_settings_template'] = os.path.basename(addon_config.node_settings_template)
config['addon_short_name'] = addon.short_name
config['addon_full_name'] = addon.full_name
config['categories'] = addon.categories
config['enabled'] = node.has_addon(addon.short_name)
config['default'] = addon.short_name in settings.ADDONS_DEFAULT
if node.has_addon(addon.short_name):
node_json = node.get_addon(addon.short_name).to_json(auth.user)
config.update(node_json)
addon_settings.append(config)
addon_settings = sorted(addon_settings, key=lambda addon: addon['full_name'].lower())
return addon_settings
def collect_node_config_js(addons):
"""Collect webpack bundles for each of the addons' node-cfg.js modules. Return
the URLs for each of the JS modules to be included on the node addons config page.
:param list addons: List of node's addon config records.
"""
js_modules = []
for addon in addons:
source_path = os.path.join(
settings.ADDON_PATH,
addon['short_name'],
'static',
'node-cfg.js',
)
if os.path.exists(source_path):
asset_path = os.path.join(
'/',
'static',
'public',
'js',
addon['short_name'],
'node-cfg.js',
)
js_modules.append(asset_path)
return js_modules
@must_have_permission(WRITE)
@must_not_be_registration
def node_choose_addons(auth, node, **kwargs):
node.config_addons(request.json, auth)
@must_be_valid_project
@must_not_be_retracted_registration
@must_have_permission(READ)
@ember_flag_is_active(features.EMBER_PROJECT_CONTRIBUTORS)
def node_contributors(auth, node, **kwargs):
ret = _view_project(node, auth, primary=True)
ret['contributors'] = utils.serialize_contributors(node.contributors, node)
ret['access_requests'] = utils.serialize_access_requests(node)
ret['adminContributors'] = utils.serialize_contributors(node.parent_admin_contributors, node, admin=True)
return ret
@must_have_permission(ADMIN)
def configure_comments(node, **kwargs):
comment_level = request.json.get('commentLevel')
if not comment_level:
node.comment_level = None
elif comment_level in ['public', 'private']:
node.comment_level = comment_level
else:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
node.save()
@must_have_permission(ADMIN)
@must_not_be_registration
def configure_requests(node, **kwargs):
access_requests_enabled = request.get_json().get('accessRequestsEnabled')
auth = kwargs.get('auth', None)
node.set_access_requests_enabled(access_requests_enabled, auth, save=True)
return {'access_requests_enabled': access_requests_enabled}, 200
##############################################################################
# View Project
##############################################################################
@process_token_or_pass
@must_be_valid_project(retractions_valid=True)
@must_be_contributor_or_public
@ember_flag_is_active(features.EMBER_PROJECT_DETAIL)
def view_project(auth, node, **kwargs):
primary = '/api/v1' not in request.path
ret = _view_project(node, auth,
primary=primary,
embed_contributors=True,
embed_descendants=True
)
ret['addon_capabilities'] = settings.ADDON_CAPABILITIES
# Collect the URIs to the static assets for addons that have widgets
ret['addon_widget_js'] = list(collect_addon_js(
node,
filename='widget-cfg.js',
config_entry='widget'
))
ret.update(rubeus.collect_addon_assets(node))
access_request = node.requests.filter(creator=auth.user).exclude(machine_state='accepted')
ret['user']['access_request_state'] = access_request.get().machine_state if access_request else None
addons_widget_data = {
'wiki': None,
'mendeley': None,
'zotero': None,
'forward': None,
'dataverse': None
}
if 'wiki' in ret['addons']:
addons_widget_data['wiki'] = serialize_wiki_widget(node)
if 'dataverse' in ret['addons']:
addons_widget_data['dataverse'] = serialize_dataverse_widget(node)
if 'forward' in ret['addons']:
addons_widget_data['forward'] = serialize_forward_widget(node)
if 'zotero' in ret['addons']:
node_addon = node.get_addon('zotero')
zotero_widget_data = ZoteroCitationsProvider().widget(node_addon)
addons_widget_data['zotero'] = zotero_widget_data
if 'mendeley' in ret['addons']:
node_addon = node.get_addon('mendeley')
mendeley_widget_data = MendeleyCitationsProvider().widget(node_addon)
addons_widget_data['mendeley'] = mendeley_widget_data
ret.update({'addons_widget_data': addons_widget_data})
return ret
@process_token_or_pass
@must_be_valid_project(retractions_valid=True)
@must_be_contributor_or_public
def token_action(auth, node, **kwargs):
return redirect(node.url)
# Reorder components
@must_be_valid_project
@must_not_be_registration
@must_have_permission(WRITE)
def project_reorder_components(node, **kwargs):
"""Reorders the components in a project's component list.
:param-json list new_list: List of strings that include node GUIDs.
"""
ordered_guids = request.get_json().get('new_list', [])
node_relations = (
node.node_relations
.select_related('child')
.filter(child__is_deleted=False)
)
deleted_node_relation_ids = list(
node.node_relations.select_related('child')
.filter(child__is_deleted=True)
.values_list('pk', flat=True)
)
if len(ordered_guids) > len(node_relations):
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data=dict(message_long='Too many node IDs'))
# Ordered NodeRelation pks, sorted according the order of guids passed in the request payload
new_node_relation_ids = [
each.id for each in sorted(node_relations,
key=lambda nr: ordered_guids.index(nr.child._id))
]
if len(node_relations) == len(ordered_guids):
node.set_noderelation_order(new_node_relation_ids + deleted_node_relation_ids)
node.save()
return {'nodes': ordered_guids}
logger.error('Got invalid node list in reorder components')
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
@must_be_valid_project
@must_be_contributor_or_public
@must_not_be_retracted_registration
def project_statistics(auth, node, **kwargs):
if request.path.startswith('/project/'):
return redirect('/' + node._id + '/analytics/')
return use_ember_app()
###############################################################################
# Make Private/Public
###############################################################################
@must_be_valid_project
@must_have_permission(ADMIN)
def project_set_privacy(auth, node, **kwargs):
permissions = kwargs.get('permissions')
if permissions is None:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
try:
node.set_privacy(permissions, auth)
except NodeStateError as e:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST, data=dict(
message_short="Can't change privacy",
message_long=str(e)
))
return {
'status': 'success',
'permissions': permissions,
}
@must_be_valid_project
@must_have_permission(WRITE)
def update_node(auth, node, **kwargs):
# in node.update() method there is a key list node.WRITABLE_WHITELIST only allow user to modify
# category, title, and description which can be edited by write permission contributor
data = r_strip_html(request.get_json())
try:
updated_field_names = node.update(data, auth=auth)
except NodeUpdateError as e:
raise HTTPError(400, data=dict(
message_short="Failed to update attribute '{0}'".format(e.key),
message_long=e.reason
))
# Need to cast tags to a string to make them JSON-serialiable
updated_fields_dict = {
key: getattr(node, key) if key != 'tags' else [str(tag) for tag in node.tags]
for key in updated_field_names
if key != 'logs' and key != 'modified' and key != 'last_logged'
}
return {'updated_fields': updated_fields_dict}
@must_be_valid_project
@must_have_permission(ADMIN)
@must_not_be_registration
def component_remove(auth, node, **kwargs):
"""Remove component, and recursively remove its children. If node has a
parent, add log and redirect to parent; else redirect to user dashboard.
"""
try:
node.remove_node(auth)
except NodeStateError as e:
raise HTTPError(
http_status.HTTP_400_BAD_REQUEST,
data={
'message_short': 'Error',
'message_long': 'Could not delete component: ' + str(e)
},
)
message = '{} has been successfully deleted.'.format(
node.project_or_component.capitalize()
)
id = '{}_deleted'.format(node.project_or_component)
status.push_status_message(message, kind='success', trust=False, id=id)
parent = node.parent_node
if parent and parent.can_view(auth):
redirect_url = node.parent_node.url
else:
redirect_url = '/dashboard/'
return {
'url': redirect_url,
}
@must_be_valid_project
@must_have_permission(ADMIN)
def remove_private_link(*args, **kwargs):
link_id = request.json['private_link_id']
try:
link = PrivateLink.objects.get(_id=link_id)
except PrivateLink.DoesNotExist:
raise HTTPError(http_status.HTTP_404_NOT_FOUND)
link.is_deleted = True
link.deleted = timezone.now()
link.save()
for node in link.nodes.all():
log_dict = {
'project': node.parent_id,
'node': node._id,
'user': kwargs.get('auth').user._id,
'anonymous_link': link.anonymous,
}
node.add_log(
NodeLog.VIEW_ONLY_LINK_REMOVED,
log_dict,
auth=kwargs.get('auth', None)
)
# TODO: Split into separate functions
def _render_addons(addons):
widgets = {}
configs = {}
js = []
css = []
for addon in addons:
configs[addon.config.short_name] = addon.config.to_json()
js.extend(addon.config.include_js.get('widget', []))
css.extend(addon.config.include_css.get('widget', []))
js.extend(addon.config.include_js.get('files', []))
css.extend(addon.config.include_css.get('files', []))
return widgets, configs, js, css
def _should_show_wiki_widget(node, user):
has_wiki = bool(node.get_addon('wiki'))
wiki_page = WikiVersion.objects.get_for_node(node, 'home')
if node.has_permission(user, WRITE) and not node.is_registration:
return has_wiki
else:
return has_wiki and wiki_page and wiki_page.html(node)
def _view_project(node, auth, primary=False,
embed_contributors=False, embed_descendants=False,
embed_registrations=False, embed_forks=False):
"""Build a JSON object containing everything needed to render
project.view.mako.
"""
node = AbstractNode.objects.filter(pk=node.pk).include('contributor__user__guids').get()
user = auth.user
parent = node.find_readable_antecedent(auth)
if user:
bookmark_collection = find_bookmark_collection(user)
bookmark_collection_id = bookmark_collection._id
in_bookmark_collection = bookmark_collection.guid_links.filter(_id=node._id).exists()
else:
in_bookmark_collection = False
bookmark_collection_id = ''
view_only_link = auth.private_key or request.args.get('view_only', '').strip('/')
anonymous = has_anonymous_link(node, auth)
addons = list(node.get_addons())
widgets, configs, js, css = _render_addons(addons)
redirect_url = node.url + '?view_only=None'
disapproval_link = ''
if (node.is_pending_registration and node.has_permission(user, ADMIN)):
disapproval_link = node.root.registration_approval.stashed_urls.get(user._id, {}).get('reject', '')
if (node.is_pending_embargo and node.has_permission(user, ADMIN)):
disapproval_link = node.root.embargo.stashed_urls.get(user._id, {}).get('reject', '')
# Before page load callback; skip if not primary call
if primary:
for addon in addons:
messages = addon.before_page_load(node, user) or []
for message in messages:
status.push_status_message(message, kind='info', dismissible=False, trust=True)
NodeRelation = apps.get_model('osf.NodeRelation')
is_registration = node.is_registration
data = {
'node': {
'disapproval_link': disapproval_link,
'id': node._primary_key,
'title': sanitize.unescape_entities(node.title),
'category': node.category_display,
'category_short': node.category,
'node_type': node.project_or_component,
'description': node.description or '',
'license': serialize_node_license_record(node.license),
'url': node.url,
'api_url': node.api_url,
'absolute_url': node.absolute_url,
'redirect_url': redirect_url,
'display_absolute_url': node.display_absolute_url,
'update_url': node.api_url_for('update_node'),
'in_dashboard': in_bookmark_collection,
'is_public': node.is_public,
'is_archiving': node.archiving,
'date_created': iso8601format(node.created),
'date_modified': iso8601format(node.last_logged) if node.last_logged else '',
'tags': list(node.tags.filter(system=False).values_list('name', flat=True)),
'children': node.nodes_active.exists(),
'child_exists': Node.objects.get_children(node, active=True).exists(),
'is_supplemental_project': node.has_linked_published_preprints,
'is_registration': is_registration,
'is_pending_registration': node.is_pending_registration if is_registration else False,
'is_retracted': node.is_retracted if is_registration else False,
'is_pending_retraction': node.is_pending_retraction if is_registration else False,
'retracted_justification': getattr(node.root.retraction, 'justification', None) if is_registration else None,
'date_retracted': iso8601format(getattr(node.root.retraction, 'date_retracted', None)) if is_registration else '',
'embargo_end_date': node.embargo_end_date.strftime('%A, %b %d, %Y') if is_registration and node.embargo_end_date else '',
'is_pending_embargo': node.is_pending_embargo if is_registration else False,
'is_embargoed': node.is_embargoed if is_registration else False,
'is_pending_embargo_termination': is_registration and node.is_pending_embargo_termination,
'registered_from_url': node.registered_from.url if is_registration else '',
'registered_date': iso8601format(node.registered_date) if is_registration else '',
'root_id': node.root._id if node.root else None,
'registered_meta': strip_registered_meta_comments(node.registered_meta),
'registered_schemas': serialize_meta_schemas(list(node.registered_schema.all())) if is_registration else False,
'is_fork': node.is_fork,
'is_collected': node.is_collected,
'collections': serialize_collections(node.collecting_metadata_list, auth),
'forked_from_id': node.forked_from._primary_key if node.is_fork else '',
'forked_from_display_absolute_url': node.forked_from.display_absolute_url if node.is_fork else '',
'forked_date': iso8601format(node.forked_date) if node.is_fork else '',
'fork_count': node.forks.exclude(type='osf.registration').filter(is_deleted=False).count(),
'private_links': [x.to_json() for x in node.private_links_active],
'link': view_only_link,
'templated_count': node.templated_list.count(),
'linked_nodes_count': NodeRelation.objects.filter(child=node, is_node_link=True).exclude(parent__type='osf.collection').count(),
'anonymous': anonymous,
'comment_level': node.comment_level,
'has_comments': node.comment_set.exists(),
'identifiers': {
'doi': node.get_identifier_value('doi'),
'ark': node.get_identifier_value('ark'),
},
'visible_preprints': serialize_preprints(node, user),
'institutions': get_affiliated_institutions(node) if node else [],
'has_draft_registrations': node.has_active_draft_registrations,
'access_requests_enabled': node.access_requests_enabled,
'storage_location': node.osfstorage_region.name,
'waterbutler_url': node.osfstorage_region.waterbutler_url,
'mfr_url': node.osfstorage_region.mfr_url,
'groups': list(node.osf_groups.values_list('name', flat=True)),
},
'parent_node': {
'exists': parent is not None,
'id': parent._primary_key if parent else '',
'title': parent.title if parent else '',
'category': parent.category_display if parent else '',
'url': parent.url if parent else '',
'api_url': parent.api_url if parent else '',
'absolute_url': parent.absolute_url if parent else '',
'registrations_url': parent.web_url_for('node_registrations', _guid=True) if parent else '',
'is_public': parent.is_public if parent else '',
'is_contributor_or_group_member': parent.is_contributor_or_group_member(user) if parent else '',
'is_contributor': parent.is_contributor(user) if parent else '',
'can_view': parent.can_view(auth) if parent else False,
},
'user': {
'is_contributor_or_group_member': node.is_contributor_or_group_member(user),
'is_contributor': node.is_contributor(user),
'is_admin': node.has_permission(user, ADMIN),
'is_admin_parent_contributor': parent.is_admin_parent(user, include_group_admin=False) if parent else False,
'is_admin_parent_contributor_or_group_member': parent.is_admin_parent(user) if parent else False,
'can_edit': node.has_permission(user, WRITE),
'can_edit_tags': node.has_permission(user, WRITE),
'has_read_permissions': node.has_permission(user, READ),
'permissions': node.get_permissions(user) if user else [],
'id': user._id if user else None,
'username': user.username if user else None,
'fullname': user.fullname if user else '',
'can_comment': node.can_comment(auth),
'show_wiki_widget': _should_show_wiki_widget(node, user),
'dashboard_id': bookmark_collection_id,
'institutions': get_affiliated_institutions(user) if user else [],
},
# TODO: Namespace with nested dicts
'addons_enabled': [each.short_name for each in addons],
'addons': configs,
'addon_widgets': widgets,
'addon_widget_js': js,
'addon_widget_css': css,
'node_categories': [
{'value': key, 'display_name': value}
for key, value in list(settings.NODE_CATEGORY_MAP.items())
]
}
# Default should be at top of list for UI and for the project overview page the default region
# for a component is that of the it's parent node.
region_list = get_storage_region_list(user, node=node)
data.update({'storage_regions': region_list})
data.update({'storage_flag_is_active': storage_i18n_flag_active()})
if storage_usage_flag_active():
storage_usage = node.storage_usage
if storage_usage:
data['node']['storage_usage'] = sizeof_fmt(storage_usage)
if embed_contributors and not anonymous:
data['node']['contributors'] = utils.serialize_visible_contributors(node)
else:
data['node']['contributors'] = list(node.contributors.values_list('guids___id', flat=True))
if embed_descendants:
descendants, all_readable = _get_readable_descendants(auth=auth, node=node)
data['user']['can_sort'] = all_readable
data['node']['descendants'] = [
serialize_node_summary(node=each, auth=auth, primary=not node.has_node_link_to(each), show_path=False)
for each in descendants
]
if embed_registrations:
data['node']['registrations'] = [
serialize_node_summary(node=each, auth=auth, show_path=False)
for each in node.registrations_all.order_by('-registered_date').exclude(is_deleted=True)
]
if embed_forks:
data['node']['forks'] = [
serialize_node_summary(node=each, auth=auth, show_path=False)
for each in node.forks.exclude(type='osf.registration').exclude(is_deleted=True).order_by('-forked_date')
]
return data
def get_affiliated_institutions(obj):
ret = []
for institution in obj.affiliated_institutions.all():
ret.append({
'name': institution.name,
'logo_path': institution.logo_path,
'logo_path_rounded_corners': institution.logo_path_rounded_corners,
'id': institution._id,
})
return ret
def serialize_collections(cgms, auth):
return [{
'title': cgm.collection.title,
'name': cgm.collection.provider.name,
'url': '/collections/{}/'.format(cgm.collection.provider._id),
'status': cgm.status,
'type': cgm.collected_type,
'issue': cgm.issue,
'volume': cgm.volume,
'program_area': cgm.program_area,
'subjects': list(cgm.subjects.values_list('text', flat=True)),
'is_public': cgm.collection.is_public,
'logo': cgm.collection.provider.get_asset_url('favicon')
} for cgm in cgms if cgm.collection.provider and (cgm.collection.is_public or
(auth.user and auth.user.has_perm('read_collection', cgm.collection)))]
def serialize_preprints(node, user):
return [
{
'title': preprint.title,
'is_moderated': preprint.provider.reviews_workflow,
'is_withdrawn': preprint.date_withdrawn is not None,
'state': preprint.machine_state,
'word': preprint.provider.preprint_word,
'provider': {'name': 'OSF Preprints' if preprint.provider.name == 'Open Science Framework' else preprint.provider.name, 'workflow': preprint.provider.reviews_workflow},
'url': preprint.url,
'absolute_url': preprint.absolute_url
} for preprint in Preprint.objects.can_view(base_queryset=node.preprints, user=user).filter(date_withdrawn__isnull=True)
]
def serialize_children(child_list, nested, indent=0):
"""
Returns the serialized representation of a list of child nodes.
This is a helper function for _get_children and as such it does not
redundantly check permissions.
"""
results = []
for child in child_list:
results.append({
'id': child._id,
'title': child.title,
'is_public': child.is_public,
'parent_id': child.parentnode_id,
'indent': indent
})
if child._id in nested.keys():
results.extend(serialize_children(nested.get(child._id), nested, indent + 1))
return results
def _get_children(node, auth):
"""
Returns the serialized representation of the given node and all of its children
for which the given user has ADMIN permission.
"""
parent_node_sqs = NodeRelation.objects.filter(child=OuterRef('pk'), is_node_link=False).values('parent__guids___id')
children = (Node.objects.get_children(node)
.filter(is_deleted=False)
.annotate(parentnode_id=Subquery(parent_node_sqs[:1])))
admin_children = Node.objects.get_nodes_for_user(auth.user, ADMIN_NODE, children)
nested = defaultdict(list)
for child in admin_children:
nested[child.parentnode_id].append(child)
return serialize_children(nested[node._id], nested)
@must_be_valid_project
@must_have_permission(ADMIN)
def private_link_table(node, **kwargs):
data = {
'node': {
'absolute_url': node.absolute_url,
'private_links': [x.to_json() for x in node.private_links_active],
}
}
return data
@collect_auth
@must_be_valid_project
@must_have_permission(ADMIN)
def get_editable_children(auth, node, **kwargs):
children = _get_children(node, auth)
return {
'node': {'id': node._id, 'title': node.title, 'is_public': node.is_public},
'children': children,
}
@must_be_valid_project
def get_recent_logs(node, **kwargs):
logs = list(reversed(node.logs._to_primary_keys()))[:3]
return {'logs': logs}
def _get_readable_descendants(auth, node, permission=None):
descendants = []
all_readable = True
for child in node.get_nodes(is_deleted=False):
if permission:
perm = permission.lower().strip()
if not child.has_permission(auth.user, perm):
all_readable = False
continue
# User can view child
if child.can_view(auth):
descendants.append(child)
# Child is a node link and user has write permission
elif node.linked_nodes.filter(id=child.id).exists():
if node.has_permission(auth.user, WRITE):
descendants.append(child)
else:
all_readable = False
else:
all_readable = False
for descendant in child.find_readable_descendants(auth):
descendants.append(descendant)
return descendants, all_readable
def serialize_child_tree(child_list, user, nested):
"""
Recursively serializes and returns a list of child nodes.
This is a helper function for node_child_tree and as such it does not
redundantly check permissions.
"""
serialized_children = []
for child in child_list:
if child.has_permission(user, READ) or child.has_permission_on_children(user, READ):
# is_admin further restricted here to mean user is a traditional admin group contributor -
# admin group membership not sufficient
contributors = [{
'id': contributor.user._id,
'is_admin': child.is_admin_contributor(contributor.user),
'is_confirmed': contributor.user.is_confirmed,
'visible': contributor.visible
} for contributor in child.contributor_set.all()]
serialized_children.append({
'node': {
'id': child._id,
'url': child.url,
'title': sanitize.unescape_entities(child.title),
'is_public': child.is_public,
'contributors': contributors,
'is_admin': child.has_permission(user, ADMIN),
'is_supplemental_project': child.has_linked_published_preprints,
},
'user_id': user._id,
'children': serialize_child_tree(nested.get(child._id), user, nested) if child._id in nested.keys() else [],
'nodeType': 'project' if not child.parentnode_id else 'component',
'category': child.category,
'permissions': {
'view': True,
'is_admin': child.has_permission(user, ADMIN)
}
})
return sorted(serialized_children, key=lambda k: len(k['children']), reverse=True)
def node_child_tree(user, node):
""" Returns the serialized representation (for treebeard) of a given node and its children.
:param user: OSFUser object
:param node: parent project Node object
:return: treebeard-formatted data
"""
serialized_nodes = []
assert node, '{} is not a valid Node.'.format(node._id)
parent_node_sqs = NodeRelation.objects.filter(child=OuterRef('pk'), is_node_link=False).values('parent__guids___id')
children = (Node.objects.get_children(node)
.filter(is_deleted=False)
.annotate(parentnode_id=Subquery(parent_node_sqs[:1]))
.include('contributor__user__guids')
)
nested = defaultdict(list)
for child in children:
nested[child.parentnode_id].append(child)
contributors = [{
'id': contributor.user._id,
'is_admin': node.is_admin_contributor(contributor.user),
'is_confirmed': contributor.user.is_confirmed,
'visible': contributor.visible
} for contributor in node.contributor_set.all().include('user__guids')]
can_read = node.has_permission(user, READ)
is_admin = node.has_permission(user, ADMIN)
if can_read or node.has_permission_on_children(user, READ):
serialized_nodes.append({
'node': {
'id': node._id,
'url': node.url if can_read else '',
'title': sanitize.unescape_entities(node.title) if can_read else 'Private Project',
'is_public': node.is_public,
'contributors': contributors,
'is_admin': is_admin,
'is_supplemental_project': node.has_linked_published_preprints,
},
'user_id': user._id,
'children': serialize_child_tree(nested.get(node._id), user, nested) if node._id in nested.keys() else [],
'kind': 'folder' if not node.parent_node or not node.parent_node.has_permission(user, READ) else 'node',
'nodeType': node.project_or_component,
'category': node.category,
'permissions': {
'view': can_read,
'is_admin': is_admin
}
})
return serialized_nodes
@must_be_logged_in
@must_be_valid_project
def get_node_tree(auth, **kwargs):
node = kwargs.get('node') or kwargs['project']
tree = node_child_tree(auth.user, node)
return tree
@must_be_valid_project
@must_have_permission(ADMIN)
def project_generate_private_link_post(auth, node, **kwargs):
""" creata a new private link object and add it to the node and its selected children"""
node_ids = request.json.get('node_ids', [])
name = request.json.get('name', '')
anonymous = request.json.get('anonymous', False)
if node._id not in node_ids:
node_ids.insert(0, node._id)
nodes = [AbstractNode.load(node_id) for node_id in node_ids]
try:
new_link = new_private_link(
name=name, user=auth.user, nodes=nodes, anonymous=anonymous
)
except ValidationError as e:
raise HTTPError(
http_status.HTTP_400_BAD_REQUEST,
data=dict(message_long=str(e))
)
return new_link
@must_be_valid_project
@must_have_permission(ADMIN)
def project_private_link_edit(auth, **kwargs):
name = request.json.get('value', '')
try:
validate_title(name)
except ValidationError as e:
message = 'Invalid link name.' if e.message == 'Invalid title.' else e.message
raise HTTPError(
http_status.HTTP_400_BAD_REQUEST,
data=dict(message_long=message)
)
private_link_id = request.json.get('pk', '')
private_link = PrivateLink.load(private_link_id)
if private_link:
new_name = strip_html(name)
private_link.name = new_name
private_link.save()
return new_name
else:
raise HTTPError(
http_status.HTTP_400_BAD_REQUEST,
data=dict(message_long='View-only link not found.')
)
def _serialize_node_search(node):
"""Serialize a node for use in pointer search.
:param Node node: Node to serialize
:return: Dictionary of node data
"""
data = {
'id': node._id,
'title': node.title,
'etal': len(node.visible_contributors) > 1,
'isRegistration': node.is_registration
}
if node.is_registration:
data['title'] += ' (registration)'
data['dateRegistered'] = node.registered_date.isoformat()
else:
data['dateCreated'] = node.created.isoformat()
data['dateModified'] = node.modified.isoformat()
first_author = node.visible_contributors[0]
data['firstAuthor'] = first_author.family_name or first_author.given_name or first_author.fullname
return data
@must_be_logged_in
def search_node(auth, **kwargs):
"""
"""
# Get arguments
node = AbstractNode.load(request.json.get('nodeId'))
include_public = request.json.get('includePublic')
size = float(request.json.get('size', '5').strip())
page = request.json.get('page', 0)
query = request.json.get('query', '').strip()
start = (page * size)
if not query:
return {'nodes': []}
# Exclude current node from query if provided
nin = [node.id] + list(node._nodes.values_list('pk', flat=True)) if node else []
can_view_query = Q(_contributors=auth.user)
if include_public:
can_view_query = can_view_query | Q(is_public=True)
nodes = (AbstractNode.objects
.filter(
can_view_query,
title__icontains=query,
is_deleted=False)
.exclude(id__in=nin)
.exclude(type='osf.collection')
.exclude(type='osf.quickfilesnode'))
count = nodes.count()
pages = math.ceil(count / size)
validate_page_num(page, pages)
return {
'nodes': [
_serialize_node_search(each)
for each in nodes[start: start + size]
if each.contributors
],
'total': count,
'pages': pages,
'page': page
}
def _add_pointers(node, pointers, auth):
"""
:param Node node: Node to which pointers will be added
:param list pointers: Nodes to add as pointers
"""
added = False
for pointer in pointers:
if isinstance(node, Collection):
node.collect_object(pointer, auth.user)
else:
node.add_pointer(pointer, auth, save=False)
added = True
if added:
node.save()
@collect_auth
def add_pointer(auth):
"""Add a single pointer to a node using only JSON parameters
"""
to_node_id = request.json.get('toNodeID')
pointer_to_move = request.json.get('pointerID')
if not (to_node_id and pointer_to_move):
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
pointer = AbstractNode.load(pointer_to_move)
to_node = Guid.load(to_node_id).referent
try:
_add_pointers(to_node, [pointer], auth)
except ValueError:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
@must_have_permission(WRITE)
@must_not_be_registration
def add_pointers(auth, node, **kwargs):
"""Add pointers to a node.
"""
node_ids = request.json.get('nodeIds')
if not node_ids:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
nodes = [
AbstractNode.load(node_id)
for node_id in node_ids
]
try:
_add_pointers(node, nodes, auth)
except ValueError:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
return {}
@must_have_permission(WRITE)
@must_not_be_registration
def remove_pointer(auth, node, **kwargs):
"""Remove a pointer from a node, raising a 400 if the pointer is not
in `node.nodes`.
"""
# TODO: since these a delete request, shouldn't use request body. put pointer
# id in the URL instead
pointer_id = request.json.get('pointerId')
if pointer_id is None:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
pointer = AbstractNode.load(pointer_id)
if pointer is None:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
try:
node.rm_pointer(pointer, auth=auth)
except ValueError:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
node.save()
@must_have_permission(WRITE)
@must_not_be_registration
def fork_pointer(auth, node, **kwargs):
"""Fork a pointer. Raises BAD_REQUEST if pointer not provided, not found,
or not present in `nodes`.
:param Auth auth: Consolidated authorization
:param Node node: root from which pointer is child
:return: Fork of node to which nodelink(pointer) points
"""
NodeRelation = apps.get_model('osf.NodeRelation')
linked_node_id = request.json.get('nodeId')
linked_node = AbstractNode.load(linked_node_id)
pointer = NodeRelation.objects.filter(child=linked_node, is_node_link=True, parent=node).first()
if pointer is None:
# TODO: Change this to 404?
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
try:
fork = node.fork_pointer(pointer, auth=auth, save=True)
except ValueError:
raise HTTPError(http_status.HTTP_400_BAD_REQUEST)
return {
'data': {
'node': serialize_node_summary(node=fork, auth=auth, show_path=False)
}
}, http_status.HTTP_201_CREATED
def abbrev_authors(node):
lead_author = node.visible_contributors[0]
ret = lead_author.family_name or lead_author.given_name or lead_author.fullname
if node.visible_contributors.count() > 1:
ret += ' et al.'
return ret
def serialize_pointer(node, auth):
if node.can_view(auth):
return {
'id': node._id,
'url': node.url,
'title': node.title,
'authorShort': abbrev_authors(node),
}
return {
'url': None,
'title': 'Private Component',
'authorShort': 'Private Author(s)',
}
@must_be_contributor_or_public
def get_pointed(auth, node, **kwargs):
"""View that returns the pointers for a project."""
NodeRelation = apps.get_model('osf.NodeRelation')
return {'pointed': [
serialize_pointer(each.parent, auth)
for each in NodeRelation.objects.filter(child=node, is_node_link=True)
]}
| apache-2.0 |
vberaudi/scipy | scipy/io/arff/tests/test_arffread.py | 27 | 7727 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import datetime
import os
import sys
from os.path import join as pjoin
if sys.version_info[0] >= 3:
from io import StringIO
else:
from cStringIO import StringIO
import numpy as np
from numpy.testing import (TestCase, assert_array_almost_equal, assert_array_equal, assert_equal,
assert_, assert_raises, dec, run_module_suite)
from scipy.io.arff.arffread import loadarff
from scipy.io.arff.arffread import read_header, parse_type, ParseArffError
from scipy._lib._version import NumpyVersion
data_path = pjoin(os.path.dirname(__file__), 'data')
test1 = os.path.join(data_path, 'test1.arff')
test2 = os.path.join(data_path, 'test2.arff')
test3 = os.path.join(data_path, 'test3.arff')
test4 = pjoin(data_path, 'test4.arff')
test5 = pjoin(data_path, 'test5.arff')
test6 = pjoin(data_path, 'test6.arff')
test7 = pjoin(data_path, 'test7.arff')
test8 = pjoin(data_path, 'test8.arff')
expect4_data = [(0.1, 0.2, 0.3, 0.4, 'class1'),
(-0.1, -0.2, -0.3, -0.4, 'class2'),
(1, 2, 3, 4, 'class3')]
expected_types = ['numeric', 'numeric', 'numeric', 'numeric', 'nominal']
missing = pjoin(data_path, 'missing.arff')
expect_missing_raw = np.array([[1, 5], [2, 4], [np.nan, np.nan]])
expect_missing = np.empty(3, [('yop', float), ('yap', float)])
expect_missing['yop'] = expect_missing_raw[:, 0]
expect_missing['yap'] = expect_missing_raw[:, 1]
class DataTest(TestCase):
def test1(self):
# Parsing trivial file with nothing.
self._test(test4)
def test2(self):
# Parsing trivial file with some comments in the data section.
self._test(test5)
def test3(self):
# Parsing trivial file with nominal attribute of 1 character.
self._test(test6)
def _test(self, test_file):
data, meta = loadarff(test_file)
for i in range(len(data)):
for j in range(4):
assert_array_almost_equal(expect4_data[i][j], data[i][j])
assert_equal(meta.types(), expected_types)
def test_filelike(self):
# Test reading from file-like object (StringIO)
f1 = open(test1)
data1, meta1 = loadarff(f1)
f1.close()
f2 = open(test1)
data2, meta2 = loadarff(StringIO(f2.read()))
f2.close()
assert_(data1 == data2)
assert_(repr(meta1) == repr(meta2))
class MissingDataTest(TestCase):
def test_missing(self):
data, meta = loadarff(missing)
for i in ['yop', 'yap']:
assert_array_almost_equal(data[i], expect_missing[i])
class HeaderTest(TestCase):
def test_type_parsing(self):
# Test parsing type of attribute from their value.
ofile = open(test2)
rel, attrs = read_header(ofile)
ofile.close()
expected = ['numeric', 'numeric', 'numeric', 'numeric', 'numeric',
'numeric', 'string', 'string', 'nominal', 'nominal']
for i in range(len(attrs)):
assert_(parse_type(attrs[i][1]) == expected[i])
def test_badtype_parsing(self):
# Test parsing wrong type of attribute from their value.
ofile = open(test3)
rel, attrs = read_header(ofile)
ofile.close()
for name, value in attrs:
assert_raises(ParseArffError, parse_type, value)
def test_fullheader1(self):
# Parsing trivial header with nothing.
ofile = open(test1)
rel, attrs = read_header(ofile)
ofile.close()
# Test relation
assert_(rel == 'test1')
# Test numerical attributes
assert_(len(attrs) == 5)
for i in range(4):
assert_(attrs[i][0] == 'attr%d' % i)
assert_(attrs[i][1] == 'REAL')
# Test nominal attribute
assert_(attrs[4][0] == 'class')
assert_(attrs[4][1] == '{class0, class1, class2, class3}')
def test_dateheader(self):
ofile = open(test7)
rel, attrs = read_header(ofile)
ofile.close()
assert_(rel == 'test7')
assert_(len(attrs) == 5)
assert_(attrs[0][0] == 'attr_year')
assert_(attrs[0][1] == 'DATE yyyy')
assert_(attrs[1][0] == 'attr_month')
assert_(attrs[1][1] == 'DATE yyyy-MM')
assert_(attrs[2][0] == 'attr_date')
assert_(attrs[2][1] == 'DATE yyyy-MM-dd')
assert_(attrs[3][0] == 'attr_datetime_local')
assert_(attrs[3][1] == 'DATE "yyyy-MM-dd HH:mm"')
assert_(attrs[4][0] == 'attr_datetime_missing')
assert_(attrs[4][1] == 'DATE "yyyy-MM-dd HH:mm"')
def test_dateheader_unsupported(self):
ofile = open(test8)
rel, attrs = read_header(ofile)
ofile.close()
assert_(rel == 'test8')
assert_(len(attrs) == 2)
assert_(attrs[0][0] == 'attr_datetime_utc')
assert_(attrs[0][1] == 'DATE "yyyy-MM-dd HH:mm Z"')
assert_(attrs[1][0] == 'attr_datetime_full')
assert_(attrs[1][1] == 'DATE "yy-MM-dd HH:mm:ss z"')
class DateAttributeTest(TestCase):
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0', "No np.datetime64 in Numpy < 1.7.0")
def setUp(self):
self.data, self.meta = loadarff(test7)
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0', "No np.datetime64 in Numpy < 1.7.0")
def test_year_attribute(self):
expected = np.array([
'1999',
'2004',
'1817',
'2100',
'2013',
'1631'
], dtype='datetime64[Y]')
assert_array_equal(self.data["attr_year"], expected)
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0', "No np.datetime64 in Numpy < 1.7.0")
def test_month_attribute(self):
expected = np.array([
'1999-01',
'2004-12',
'1817-04',
'2100-09',
'2013-11',
'1631-10'
], dtype='datetime64[M]')
assert_array_equal(self.data["attr_month"], expected)
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0', "No np.datetime64 in Numpy < 1.7.0")
def test_date_attribute(self):
expected = np.array([
'1999-01-31',
'2004-12-01',
'1817-04-28',
'2100-09-10',
'2013-11-30',
'1631-10-15'
], dtype='datetime64[D]')
assert_array_equal(self.data["attr_date"], expected)
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0', "No np.datetime64 in Numpy < 1.7.0")
def test_datetime_local_attribute(self):
expected = np.array([
datetime.datetime(year=1999, month=1, day=31, hour=0, minute=1),
datetime.datetime(year=2004, month=12, day=1, hour=23, minute=59),
datetime.datetime(year=1817, month=4, day=28, hour=13, minute=0),
datetime.datetime(year=2100, month=9, day=10, hour=12, minute=0),
datetime.datetime(year=2013, month=11, day=30, hour=4, minute=55),
datetime.datetime(year=1631, month=10, day=15, hour=20, minute=4)
], dtype='datetime64[m]')
assert_array_equal(self.data["attr_datetime_local"], expected)
@dec.skipif(NumpyVersion(np.__version__) < '1.7.0', "No np.datetime64 in Numpy < 1.7.0")
def test_datetime_missing(self):
expected = np.array([
'nat',
'2004-12-01T23:59Z',
'nat',
'nat',
'2013-11-30T04:55Z',
'1631-10-15T20:04Z'
], dtype='datetime64[m]')
assert_array_equal(self.data["attr_datetime_missing"], expected)
def test_datetime_timezone(self):
assert_raises(ValueError, loadarff, test8)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
8l/beri | cheritest/trunk/tests/cp2/test_cp2_ctoptr_tag.py | 2 | 1520 | #-
# Copyright (c) 2014 Michael Roe
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
from beritest_tools import BaseBERITestCase
from nose.plugins.attrib import attr
class test_cp2_ctoptr_tag(BaseBERITestCase):
@attr('capabilities')
def test_cp2_ctoptr_tag_1(self):
'''Check that ctoptr of a capability with the tag bit unset returns 0'''
self.assertRegisterEqual(self.MIPS.a0, 0, "ctoptr of a capability with the tag bit unset did not return 0")
| apache-2.0 |
coolsvap/dox | dox/tests/test_images.py | 1 | 3848 | # -*- coding: utf-8 -*-
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_images
--------------
Tests for `dox.images` module.
"""
import fixtures
import testscenarios
from dox import images
from dox.tests import base
def get_fake_image(value):
if value is not None:
def fake_value(self):
return value
else:
def fake_value(self):
return ['ubuntu']
return fake_value
class TestImages(base.TestCase):
scenarios = [
('have_dockerfile', dict(
dockerfile=True, tox_ini=False, dox_yaml=False,
tox_value=[], dox_value=[], images=[])),
('no_dockerfile', dict(
dockerfile=False, tox_ini=False, dox_yaml=False,
tox_value=[], dox_value=[], images=['ubuntu'])),
('tox_no_docker', dict(
dockerfile=False, tox_ini=True, dox_yaml=False,
tox_value=[], dox_value=[], images=['ubuntu'])),
('tox_docker', dict(
dockerfile=False, tox_ini=True, dox_yaml=False,
tox_value=['tox_docker'], dox_value=[], images=['tox_docker'])),
('dox_image', dict(
dockerfile=False, tox_ini=False, dox_yaml=True,
tox_value=[], dox_value=[], images=['ubuntu'])),
('dox_no_image', dict(
dockerfile=False, tox_ini=False, dox_yaml=True,
tox_value=[], dox_value=['dox_value'], images=['dox_value'])),
('both_dox_wins', dict(
dockerfile=False, tox_ini=True, dox_yaml=True,
tox_value=['tox_wins'], dox_value=['dox_wins'],
images=['dox_wins'])),
('both_no_dox', dict(
dockerfile=False, tox_ini=True, dox_yaml=True,
tox_value=['tox_wins'], dox_value=[], images=['ubuntu'])),
('both_dockerfile_passthru', dict(
dockerfile=True, tox_ini=True, dox_yaml=True,
tox_value=[], dox_value=[], images=[])),
('all_dockerfile_dox_override', dict(
dockerfile=True, tox_ini=True, dox_yaml=True,
tox_value=[], dox_value=['dox_wins'], images=['dox_wins'])),
('all_dockerfile_tox_loses', dict(
dockerfile=True, tox_ini=True, dox_yaml=True,
tox_value=['tox_wins'], dox_value=[], images=[])),
]
def setUp(self):
super(TestImages, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'dox.config.dockerfile.Dockerfile.exists',
base.bool_to_fake(self.dockerfile)))
self.useFixture(fixtures.MonkeyPatch(
'dox.config.dox_yaml.DoxYaml.exists',
base.bool_to_fake(self.dox_yaml)))
self.useFixture(fixtures.MonkeyPatch(
'dox.config.tox_ini.ToxIni.exists',
base.bool_to_fake(self.tox_ini)))
self.useFixture(fixtures.MonkeyPatch(
'dox.config.dox_yaml.DoxYaml.get_images',
get_fake_image(self.dox_value)))
self.useFixture(fixtures.MonkeyPatch(
'dox.config.tox_ini.ToxIni.get_images',
get_fake_image(self.tox_value)))
def test_images(self):
image = images.get_images({})
self.assertEqual(image, self.images)
def load_tests(loader, in_tests, pattern):
return testscenarios.load_tests_apply_scenarios(loader, in_tests, pattern)
| apache-2.0 |
Microsoft/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/idlelib/idle_test/test_run.py | 3 | 1099 | import unittest
from unittest import mock
from test.support import captured_stderr
import idlelib.run as idlerun
class RunTest(unittest.TestCase):
def test_print_exception_unhashable(self):
class UnhashableException(Exception):
def __eq__(self, other):
return True
ex1 = UnhashableException('ex1')
ex2 = UnhashableException('ex2')
try:
raise ex2 from ex1
except UnhashableException:
try:
raise ex1
except UnhashableException:
with captured_stderr() as output:
with mock.patch.object(idlerun,
'cleanup_traceback') as ct:
ct.side_effect = lambda t, e: t
idlerun.print_exception()
tb = output.getvalue().strip().splitlines()
self.assertEqual(11, len(tb))
self.assertIn('UnhashableException: ex2', tb[3])
self.assertIn('UnhashableException: ex1', tb[10])
if __name__ == '__main__':
unittest.main(verbosity=2)
| apache-2.0 |
FCP-INDI/nipype | nipype/interfaces/fsl/tests/test_utils.py | 10 | 10430 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import os
from tempfile import mkdtemp
from shutil import rmtree
import numpy as np
import nibabel as nb
from nipype.testing import (assert_equal, assert_not_equal,
assert_raises, skipif)
import nipype.interfaces.fsl.utils as fsl
from nipype.interfaces.fsl import no_fsl, Info
from .test_maths import (set_output_type, create_files_in_directory,
clean_directory)
@skipif(no_fsl)
def test_fslroi():
filelist, outdir, cwd, _ = create_files_in_directory()
roi = fsl.ExtractROI()
# make sure command gets called
yield assert_equal, roi.cmd, 'fslroi'
# test raising error with mandatory args absent
yield assert_raises, ValueError, roi.run
# .inputs based parameters setting
roi.inputs.in_file = filelist[0]
roi.inputs.roi_file = 'foo_roi.nii'
roi.inputs.t_min = 10
roi.inputs.t_size = 20
yield assert_equal, roi.cmdline, 'fslroi %s foo_roi.nii 10 20' % filelist[0]
# .run based parameter setting
roi2 = fsl.ExtractROI(in_file=filelist[0],
roi_file='foo2_roi.nii',
t_min=20, t_size=40,
x_min=3, x_size=30,
y_min=40, y_size=10,
z_min=5, z_size=20)
yield assert_equal, roi2.cmdline, \
'fslroi %s foo2_roi.nii 3 30 40 10 5 20 20 40' % filelist[0]
clean_directory(outdir, cwd)
# test arguments for opt_map
# Fslroi class doesn't have a filled opt_map{}
@skipif(no_fsl)
def test_fslmerge():
filelist, outdir, cwd, _ = create_files_in_directory()
merger = fsl.Merge()
# make sure command gets called
yield assert_equal, merger.cmd, 'fslmerge'
# test raising error with mandatory args absent
yield assert_raises, ValueError, merger.run
# .inputs based parameters setting
merger.inputs.in_files = filelist
merger.inputs.merged_file = 'foo_merged.nii'
merger.inputs.dimension = 't'
merger.inputs.output_type = 'NIFTI'
yield assert_equal, merger.cmdline, 'fslmerge -t foo_merged.nii %s' % ' '.join(filelist)
# verify that providing a tr value updates the dimension to tr
merger.inputs.tr = 2.25
yield assert_equal, merger.cmdline, 'fslmerge -tr foo_merged.nii %s %.2f' % (' '.join(filelist), 2.25)
# .run based parameter setting
merger2 = fsl.Merge(in_files=filelist,
merged_file='foo_merged.nii',
dimension='t',
output_type='NIFTI',
tr=2.25)
yield assert_equal, merger2.cmdline, \
'fslmerge -tr foo_merged.nii %s %.2f' % (' '.join(filelist), 2.25)
clean_directory(outdir, cwd)
# test arguments for opt_map
# Fslmerge class doesn't have a filled opt_map{}
# test fslmath
@skipif(no_fsl)
def test_fslmaths():
filelist, outdir, cwd, _ = create_files_in_directory()
math = fsl.ImageMaths()
# make sure command gets called
yield assert_equal, math.cmd, 'fslmaths'
# test raising error with mandatory args absent
yield assert_raises, ValueError, math.run
# .inputs based parameters setting
math.inputs.in_file = filelist[0]
math.inputs.op_string = '-add 2.5 -mul input_volume2'
math.inputs.out_file = 'foo_math.nii'
yield assert_equal, math.cmdline, \
'fslmaths %s -add 2.5 -mul input_volume2 foo_math.nii' % filelist[0]
# .run based parameter setting
math2 = fsl.ImageMaths(in_file=filelist[0], op_string='-add 2.5',
out_file='foo2_math.nii')
yield assert_equal, math2.cmdline, 'fslmaths %s -add 2.5 foo2_math.nii' % filelist[0]
# test arguments for opt_map
# Fslmath class doesn't have opt_map{}
clean_directory(outdir, cwd)
# test overlay
@skipif(no_fsl)
def test_overlay():
filelist, outdir, cwd, _ = create_files_in_directory()
overlay = fsl.Overlay()
# make sure command gets called
yield assert_equal, overlay.cmd, 'overlay'
# test raising error with mandatory args absent
yield assert_raises, ValueError, overlay.run
# .inputs based parameters setting
overlay.inputs.stat_image = filelist[0]
overlay.inputs.stat_thresh = (2.5, 10)
overlay.inputs.background_image = filelist[1]
overlay.inputs.auto_thresh_bg = True
overlay.inputs.show_negative_stats = True
overlay.inputs.out_file = 'foo_overlay.nii'
yield assert_equal, overlay.cmdline, \
'overlay 1 0 %s -a %s 2.50 10.00 %s -2.50 -10.00 foo_overlay.nii' % (
filelist[1], filelist[0], filelist[0])
# .run based parameter setting
overlay2 = fsl.Overlay(stat_image=filelist[0], stat_thresh=(2.5, 10),
background_image=filelist[1], auto_thresh_bg=True,
out_file='foo2_overlay.nii')
yield assert_equal, overlay2.cmdline, 'overlay 1 0 %s -a %s 2.50 10.00 foo2_overlay.nii' % (
filelist[1], filelist[0])
clean_directory(outdir, cwd)
# test slicer
@skipif(no_fsl)
def test_slicer():
filelist, outdir, cwd, _ = create_files_in_directory()
slicer = fsl.Slicer()
# make sure command gets called
yield assert_equal, slicer.cmd, 'slicer'
# test raising error with mandatory args absent
yield assert_raises, ValueError, slicer.run
# .inputs based parameters setting
slicer.inputs.in_file = filelist[0]
slicer.inputs.image_edges = filelist[1]
slicer.inputs.intensity_range = (10., 20.)
slicer.inputs.all_axial = True
slicer.inputs.image_width = 750
slicer.inputs.out_file = 'foo_bar.png'
yield assert_equal, slicer.cmdline, \
'slicer %s %s -L -i 10.000 20.000 -A 750 foo_bar.png' % (
filelist[0], filelist[1])
# .run based parameter setting
slicer2 = fsl.Slicer(
in_file=filelist[0], middle_slices=True, label_slices=False,
out_file='foo_bar2.png')
yield assert_equal, slicer2.cmdline, 'slicer %s -a foo_bar2.png' % (filelist[0])
clean_directory(outdir, cwd)
def create_parfiles():
np.savetxt('a.par', np.random.rand(6, 3))
np.savetxt('b.par', np.random.rand(6, 3))
return ['a.par', 'b.par']
# test fsl_tsplot
@skipif(no_fsl)
def test_plottimeseries():
filelist, outdir, cwd, _ = create_files_in_directory()
parfiles = create_parfiles()
plotter = fsl.PlotTimeSeries()
# make sure command gets called
yield assert_equal, plotter.cmd, 'fsl_tsplot'
# test raising error with mandatory args absent
yield assert_raises, ValueError, plotter.run
# .inputs based parameters setting
plotter.inputs.in_file = parfiles[0]
plotter.inputs.labels = ['x', 'y', 'z']
plotter.inputs.y_range = (0, 1)
plotter.inputs.title = 'test plot'
plotter.inputs.out_file = 'foo.png'
yield assert_equal, plotter.cmdline, \
('fsl_tsplot -i %s -a x,y,z -o foo.png -t \'test plot\' -u 1 --ymin=0 --ymax=1'
% parfiles[0])
# .run based parameter setting
plotter2 = fsl.PlotTimeSeries(
in_file=parfiles, title='test2 plot', plot_range=(2, 5),
out_file='bar.png')
yield assert_equal, plotter2.cmdline, \
'fsl_tsplot -i %s,%s -o bar.png --start=2 --finish=5 -t \'test2 plot\' -u 1' % tuple(
parfiles)
clean_directory(outdir, cwd)
@skipif(no_fsl)
def test_plotmotionparams():
filelist, outdir, cwd, _ = create_files_in_directory()
parfiles = create_parfiles()
plotter = fsl.PlotMotionParams()
# make sure command gets called
yield assert_equal, plotter.cmd, 'fsl_tsplot'
# test raising error with mandatory args absent
yield assert_raises, ValueError, plotter.run
# .inputs based parameters setting
plotter.inputs.in_file = parfiles[0]
plotter.inputs.in_source = 'fsl'
plotter.inputs.plot_type = 'rotations'
plotter.inputs.out_file = 'foo.png'
yield assert_equal, plotter.cmdline, \
('fsl_tsplot -i %s -o foo.png -t \'MCFLIRT estimated rotations (radians)\' '
'--start=1 --finish=3 -a x,y,z' % parfiles[0])
# .run based parameter setting
plotter2 = fsl.PlotMotionParams(
in_file=parfiles[1], in_source='spm', plot_type='translations',
out_file='bar.png')
yield assert_equal, plotter2.cmdline, \
('fsl_tsplot -i %s -o bar.png -t \'Realign estimated translations (mm)\' '
'--start=1 --finish=3 -a x,y,z' % parfiles[1])
clean_directory(outdir, cwd)
@skipif(no_fsl)
def test_convertxfm():
filelist, outdir, cwd, _ = create_files_in_directory()
cvt = fsl.ConvertXFM()
# make sure command gets called
yield assert_equal, cvt.cmd, "convert_xfm"
# test raising error with mandatory args absent
yield assert_raises, ValueError, cvt.run
# .inputs based parameters setting
cvt.inputs.in_file = filelist[0]
cvt.inputs.invert_xfm = True
cvt.inputs.out_file = "foo.mat"
yield assert_equal, cvt.cmdline, 'convert_xfm -omat foo.mat -inverse %s' % filelist[0]
# constructor based parameter setting
cvt2 = fsl.ConvertXFM(
in_file=filelist[0], in_file2=filelist[1], concat_xfm=True,
out_file="bar.mat")
yield assert_equal, cvt2.cmdline, \
"convert_xfm -omat bar.mat -concat %s %s" % (filelist[1], filelist[0])
clean_directory(outdir, cwd)
@skipif(no_fsl)
def test_swapdims(fsl_output_type=None):
prev_type = set_output_type(fsl_output_type)
files, testdir, origdir, out_ext = create_files_in_directory()
swap = fsl.SwapDimensions()
# Test the underlying command
yield assert_equal, swap.cmd, "fslswapdim"
# Test mandatory args
args = [dict(in_file=files[0]), dict(new_dims=("x", "y", "z"))]
for arg in args:
wontrun = fsl.SwapDimensions(**arg)
yield assert_raises, ValueError, wontrun.run
# Now test a basic command line
swap.inputs.in_file = files[0]
swap.inputs.new_dims = ("x", "y", "z")
yield assert_equal, swap.cmdline, "fslswapdim a.nii x y z %s" % os.path.realpath(os.path.join(testdir, "a_newdims%s" % out_ext))
# Test that we can set an output name
swap.inputs.out_file = "b.nii"
yield assert_equal, swap.cmdline, "fslswapdim a.nii x y z b.nii"
# Clean up
clean_directory(testdir, origdir)
set_output_type(prev_type)
| bsd-3-clause |
mozilla/verbatim | local_apps/pootle_project/urls.py | 6 | 1178 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2008 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with translate; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
from django.conf.urls.defaults import *
urlpatterns = patterns('pootle_project.views',
(r'^$|^index.html$', 'projects_index'),
(r'^(?P<project_code>[^/]*)/admin.html$', 'project_admin'),
(r'^(?P<project_code>[^/]*)/permissions.html$', 'project_admin_permissions'),
(r'^(?P<project_code>[^/]*)(/|/index.html)?$', 'project_language_index'),
)
| gpl-2.0 |
amarouni/incubator-beam | sdks/python/apache_beam/transforms/sideinputs_test.py | 9 | 11625 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for side inputs."""
import logging
import unittest
from nose.plugins.attrib import attr
import apache_beam as beam
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that, equal_to
from apache_beam.transforms import window
class SideInputsTest(unittest.TestCase):
# Enable nose tests running in parallel
_multiprocess_can_split_ = True
def create_pipeline(self):
return TestPipeline()
def run_windowed_side_inputs(self, elements, main_window_fn,
side_window_fn=None,
side_input_type=beam.pvalue.AsList,
combine_fn=None,
expected=None):
with self.create_pipeline() as p:
pcoll = p | beam.Create(elements) | beam.Map(
lambda t: window.TimestampedValue(t, t))
main = pcoll | 'WindowMain' >> beam.WindowInto(main_window_fn)
side = pcoll | 'WindowSide' >> beam.WindowInto(
side_window_fn or main_window_fn)
kw = {}
if combine_fn is not None:
side |= beam.CombineGlobally(combine_fn).without_defaults()
kw['default_value'] = 0
elif side_input_type == beam.pvalue.AsDict:
side |= beam.Map(lambda x: ('k%s' % x, 'v%s' % x))
res = main | beam.Map(lambda x, s: (x, s), side_input_type(side, **kw))
if side_input_type in (beam.pvalue.AsIter, beam.pvalue.AsList):
res |= beam.Map(lambda (x, s): (x, sorted(s)))
assert_that(res, equal_to(expected))
def test_global_global_windows(self):
self.run_windowed_side_inputs(
[1, 2, 3],
window.GlobalWindows(),
expected=[(1, [1, 2, 3]), (2, [1, 2, 3]), (3, [1, 2, 3])])
def test_same_fixed_windows(self):
self.run_windowed_side_inputs(
[1, 2, 11],
window.FixedWindows(10),
expected=[(1, [1, 2]), (2, [1, 2]), (11, [11])])
def test_different_fixed_windows(self):
self.run_windowed_side_inputs(
[1, 2, 11, 21, 31],
window.FixedWindows(10),
window.FixedWindows(20),
expected=[(1, [1, 2, 11]), (2, [1, 2, 11]), (11, [1, 2, 11]),
(21, [21, 31]), (31, [21, 31])])
def test_fixed_global_window(self):
self.run_windowed_side_inputs(
[1, 2, 11],
window.FixedWindows(10),
window.GlobalWindows(),
expected=[(1, [1, 2, 11]), (2, [1, 2, 11]), (11, [1, 2, 11])])
def test_sliding_windows(self):
self.run_windowed_side_inputs(
[1, 2, 4],
window.SlidingWindows(size=6, period=2),
window.SlidingWindows(size=6, period=2),
expected=[
# Element 1 falls in three windows
(1, [1]), # [-4, 2)
(1, [1, 2]), # [-2, 4)
(1, [1, 2, 4]), # [0, 6)
# as does 2,
(2, [1, 2]), # [-2, 4)
(2, [1, 2, 4]), # [0, 6)
(2, [2, 4]), # [2, 8)
# and 4.
(4, [1, 2, 4]), # [0, 6)
(4, [2, 4]), # [2, 8)
(4, [4]), # [4, 10)
])
def test_windowed_iter(self):
self.run_windowed_side_inputs(
[1, 2, 11],
window.FixedWindows(10),
side_input_type=beam.pvalue.AsIter,
expected=[(1, [1, 2]), (2, [1, 2]), (11, [11])])
def test_windowed_singleton(self):
self.run_windowed_side_inputs(
[1, 2, 11],
window.FixedWindows(10),
side_input_type=beam.pvalue.AsSingleton,
combine_fn=sum,
expected=[(1, 3), (2, 3), (11, 11)])
def test_windowed_dict(self):
self.run_windowed_side_inputs(
[1, 2, 11],
window.FixedWindows(10),
side_input_type=beam.pvalue.AsDict,
expected=[
(1, {'k1': 'v1', 'k2': 'v2'}),
(2, {'k1': 'v1', 'k2': 'v2'}),
(11, {'k11': 'v11'}),
])
@attr('ValidatesRunner')
def test_empty_singleton_side_input(self):
pipeline = self.create_pipeline()
pcol = pipeline | 'start' >> beam.Create([1, 2])
side = pipeline | 'side' >> beam.Create([]) # Empty side input.
def my_fn(k, s):
# TODO(robertwb): Should this be an error as in Java?
v = ('empty' if isinstance(s, beam.pvalue.EmptySideInput) else 'full')
return [(k, v)]
result = pcol | 'compute' >> beam.FlatMap(
my_fn, beam.pvalue.AsSingleton(side))
assert_that(result, equal_to([(1, 'empty'), (2, 'empty')]))
pipeline.run()
@attr('ValidatesRunner')
def test_multi_valued_singleton_side_input(self):
pipeline = self.create_pipeline()
pcol = pipeline | 'start' >> beam.Create([1, 2])
side = pipeline | 'side' >> beam.Create([3, 4]) # 2 values in side input.
pcol | 'compute' >> beam.FlatMap( # pylint: disable=expression-not-assigned
lambda x, s: [x * s], beam.pvalue.AsSingleton(side))
with self.assertRaises(Exception):
pipeline.run()
@attr('ValidatesRunner')
def test_default_value_singleton_side_input(self):
pipeline = self.create_pipeline()
pcol = pipeline | 'start' >> beam.Create([1, 2])
side = pipeline | 'side' >> beam.Create([]) # 0 values in side input.
result = pcol | beam.FlatMap(
lambda x, s: [x * s], beam.pvalue.AsSingleton(side, 10))
assert_that(result, equal_to([10, 20]))
pipeline.run()
@attr('ValidatesRunner')
def test_iterable_side_input(self):
pipeline = self.create_pipeline()
pcol = pipeline | 'start' >> beam.Create([1, 2])
side = pipeline | 'side' >> beam.Create([3, 4]) # 2 values in side input.
result = pcol | 'compute' >> beam.FlatMap(
lambda x, s: [x * y for y in s],
beam.pvalue.AsIter(side))
assert_that(result, equal_to([3, 4, 6, 8]))
pipeline.run()
@attr('ValidatesRunner')
def test_as_list_and_as_dict_side_inputs(self):
a_list = [5, 1, 3, 2, 9]
some_pairs = [('crouton', 17), ('supreme', None)]
pipeline = self.create_pipeline()
main_input = pipeline | 'main input' >> beam.Create([1])
side_list = pipeline | 'side list' >> beam.Create(a_list)
side_pairs = pipeline | 'side pairs' >> beam.Create(some_pairs)
results = main_input | 'concatenate' >> beam.Map(
lambda x, the_list, the_dict: [x, the_list, the_dict],
beam.pvalue.AsList(side_list), beam.pvalue.AsDict(side_pairs))
def matcher(expected_elem, expected_list, expected_pairs):
def match(actual):
[[actual_elem, actual_list, actual_dict]] = actual
equal_to([expected_elem])([actual_elem])
equal_to(expected_list)(actual_list)
equal_to(expected_pairs)(actual_dict.iteritems())
return match
assert_that(results, matcher(1, a_list, some_pairs))
pipeline.run()
@attr('ValidatesRunner')
def test_as_singleton_without_unique_labels(self):
# This should succeed as calling beam.pvalue.AsSingleton on the same
# PCollection twice with the same defaults will return the same
# view.
a_list = [2]
pipeline = self.create_pipeline()
main_input = pipeline | 'main input' >> beam.Create([1])
side_list = pipeline | 'side list' >> beam.Create(a_list)
results = main_input | beam.Map(
lambda x, s1, s2: [x, s1, s2],
beam.pvalue.AsSingleton(side_list), beam.pvalue.AsSingleton(side_list))
def matcher(expected_elem, expected_singleton):
def match(actual):
[[actual_elem, actual_singleton1, actual_singleton2]] = actual
equal_to([expected_elem])([actual_elem])
equal_to([expected_singleton])([actual_singleton1])
equal_to([expected_singleton])([actual_singleton2])
return match
assert_that(results, matcher(1, 2))
pipeline.run()
@attr('ValidatesRunner')
def test_as_singleton_with_different_defaults(self):
a_list = []
pipeline = self.create_pipeline()
main_input = pipeline | 'main input' >> beam.Create([1])
side_list = pipeline | 'side list' >> beam.Create(a_list)
results = main_input | beam.Map(
lambda x, s1, s2: [x, s1, s2],
beam.pvalue.AsSingleton(side_list, default_value=2),
beam.pvalue.AsSingleton(side_list, default_value=3))
def matcher(expected_elem, expected_singleton1, expected_singleton2):
def match(actual):
[[actual_elem, actual_singleton1, actual_singleton2]] = actual
equal_to([expected_elem])([actual_elem])
equal_to([expected_singleton1])([actual_singleton1])
equal_to([expected_singleton2])([actual_singleton2])
return match
assert_that(results, matcher(1, 2, 3))
pipeline.run()
@attr('ValidatesRunner')
def test_as_list_twice(self):
# This should succeed as calling beam.pvalue.AsList on the same
# PCollection twice will return the same view.
a_list = [1, 2, 3]
pipeline = self.create_pipeline()
main_input = pipeline | 'main input' >> beam.Create([1])
side_list = pipeline | 'side list' >> beam.Create(a_list)
results = main_input | beam.Map(
lambda x, ls1, ls2: [x, ls1, ls2],
beam.pvalue.AsList(side_list), beam.pvalue.AsList(side_list))
def matcher(expected_elem, expected_list):
def match(actual):
[[actual_elem, actual_list1, actual_list2]] = actual
equal_to([expected_elem])([actual_elem])
equal_to(expected_list)(actual_list1)
equal_to(expected_list)(actual_list2)
return match
assert_that(results, matcher(1, [1, 2, 3]))
pipeline.run()
@attr('ValidatesRunner')
def test_as_dict_twice(self):
some_kvs = [('a', 1), ('b', 2)]
pipeline = self.create_pipeline()
main_input = pipeline | 'main input' >> beam.Create([1])
side_kvs = pipeline | 'side kvs' >> beam.Create(some_kvs)
results = main_input | beam.Map(
lambda x, dct1, dct2: [x, dct1, dct2],
beam.pvalue.AsDict(side_kvs),
beam.pvalue.AsDict(side_kvs))
def matcher(expected_elem, expected_kvs):
def match(actual):
[[actual_elem, actual_dict1, actual_dict2]] = actual
equal_to([expected_elem])([actual_elem])
equal_to(expected_kvs)(actual_dict1.iteritems())
equal_to(expected_kvs)(actual_dict2.iteritems())
return match
assert_that(results, matcher(1, some_kvs))
pipeline.run()
@attr('ValidatesRunner')
def test_flattened_side_input(self):
pipeline = self.create_pipeline()
main_input = pipeline | 'main input' >> beam.Create([None])
side_input = (
pipeline | 'side1' >> beam.Create(['a']),
pipeline | 'side2' >> beam.Create(['b'])) | beam.Flatten()
results = main_input | beam.FlatMap(
lambda _, ab: ab,
beam.pvalue.AsList(side_input))
assert_that(results, equal_to(['a', 'b']))
pipeline.run()
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main()
| apache-2.0 |
edunham/wok | wok/util.py | 11 | 1471 | import re
from unicodedata import normalize
from datetime import date, time, datetime, timedelta
def chunk(li, n):
"""Yield succesive n-size chunks from l."""
for i in xrange(0, len(li), n):
yield li[i:i+n]
def date_and_times(meta):
date_part = None
time_part = None
if 'date' in meta:
date_part = meta['date']
if 'time' in meta:
time_part = meta['time']
if 'datetime' in meta:
if date_part is None:
if isinstance(meta['datetime'], datetime):
date_part = meta['datetime'].date()
elif isinstance(meta['datetime'], date):
date_part = meta['datetime']
if time_part is None and isinstance(meta['datetime'], datetime):
time_part = meta['datetime'].time()
if isinstance(time_part, int):
seconds = time_part % 60
minutes = (time_part / 60) % 60
hours = (time_part / 3600)
time_part = time(hours, minutes, seconds)
meta['date'] = date_part
meta['time'] = time_part
if date_part is not None and time_part is not None:
meta['datetime'] = datetime(date_part.year, date_part.month,
date_part.day, time_part.hour, time_part.minute,
time_part.second, time_part.microsecond, time_part.tzinfo)
elif date_part is not None:
meta['datetime'] = datetime(date_part.year, date_part.month, date_part.day)
else:
meta['datetime'] = None
| mit |
walterbender/portfolio | odf/draw.py | 1 | 5639 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from .namespaces import DRAWNS, STYLENS, PRESENTATIONNS
from .element import Element
def StyleRefElement(stylename=None, classnames=None, **args):
qattrs = {}
if stylename is not None:
f = stylename.getAttrNS(STYLENS, 'family')
if f == 'graphic':
qattrs[(DRAWNS, u'style-name')] = stylename
elif f == 'presentation':
qattrs[(PRESENTATIONNS, u'style-name')] = stylename
else:
raise ValueError(
"Style's family must be either 'graphic' or 'presentation'")
if classnames is not None:
f = classnames[0].getAttrNS(STYLENS, 'family')
if f == 'graphic':
qattrs[(DRAWNS, u'class-names')] = classnames
elif f == 'presentation':
qattrs[(PRESENTATIONNS, u'class-names')] = classnames
else:
raise ValueError(
"Style's family must be either 'graphic' or 'presentation'")
return Element(qattributes=qattrs, **args)
def DrawElement(name=None, **args):
e = Element(name=name, **args)
if 'displayname' not in args:
e.setAttrNS(DRAWNS, 'display-name', name)
return e
# Autogenerated
def A(**args):
return Element(qname=(DRAWNS, 'a'), **args)
def Applet(**args):
return Element(qname=(DRAWNS, 'applet'), **args)
def AreaCircle(**args):
return Element(qname=(DRAWNS, 'area-circle'), **args)
def AreaPolygon(**args):
return Element(qname=(DRAWNS, 'area-polygon'), **args)
def AreaRectangle(**args):
return Element(qname=(DRAWNS, 'area-rectangle'), **args)
def Caption(**args):
return StyleRefElement(qname=(DRAWNS, 'caption'), **args)
def Circle(**args):
return StyleRefElement(qname=(DRAWNS, 'circle'), **args)
def Connector(**args):
return StyleRefElement(qname=(DRAWNS, 'connector'), **args)
def ContourPath(**args):
return Element(qname=(DRAWNS, 'contour-path'), **args)
def ContourPolygon(**args):
return Element(qname=(DRAWNS, 'contour-polygon'), **args)
def Control(**args):
return StyleRefElement(qname=(DRAWNS, 'control'), **args)
def CustomShape(**args):
return StyleRefElement(qname=(DRAWNS, 'custom-shape'), **args)
def Ellipse(**args):
return StyleRefElement(qname=(DRAWNS, 'ellipse'), **args)
def EnhancedGeometry(**args):
return Element(qname=(DRAWNS, 'enhanced-geometry'), **args)
def Equation(**args):
return Element(qname=(DRAWNS, 'equation'), **args)
def FillImage(**args):
return DrawElement(qname=(DRAWNS, 'fill-image'), **args)
def FloatingFrame(**args):
return Element(qname=(DRAWNS, 'floating-frame'), **args)
def Frame(**args):
return StyleRefElement(qname=(DRAWNS, 'frame'), **args)
def G(**args):
return StyleRefElement(qname=(DRAWNS, 'g'), **args)
def GluePoint(**args):
return Element(qname=(DRAWNS, 'glue-point'), **args)
def Gradient(**args):
return DrawElement(qname=(DRAWNS, 'gradient'), **args)
def Handle(**args):
return Element(qname=(DRAWNS, 'handle'), **args)
def Hatch(**args):
return DrawElement(qname=(DRAWNS, 'hatch'), **args)
def Image(**args):
return Element(qname=(DRAWNS, 'image'), **args)
def ImageMap(**args):
return Element(qname=(DRAWNS, 'image-map'), **args)
def Layer(**args):
return Element(qname=(DRAWNS, 'layer'), **args)
def LayerSet(**args):
return Element(qname=(DRAWNS, 'layer-set'), **args)
def Line(**args):
return StyleRefElement(qname=(DRAWNS, 'line'), **args)
def Marker(**args):
return DrawElement(qname=(DRAWNS, 'marker'), **args)
def Measure(**args):
return StyleRefElement(qname=(DRAWNS, 'measure'), **args)
def Object(**args):
return Element(qname=(DRAWNS, 'object'), **args)
def ObjectOle(**args):
return Element(qname=(DRAWNS, 'object-ole'), **args)
def Opacity(**args):
return DrawElement(qname=(DRAWNS, 'opacity'), **args)
def Page(**args):
return Element(qname=(DRAWNS, 'page'), **args)
def PageThumbnail(**args):
return StyleRefElement(qname=(DRAWNS, 'page-thumbnail'), **args)
def Param(**args):
return Element(qname=(DRAWNS, 'param'), **args)
def Path(**args):
return StyleRefElement(qname=(DRAWNS, 'path'), **args)
def Plugin(**args):
return Element(qname=(DRAWNS, 'plugin'), **args)
def Polygon(**args):
return StyleRefElement(qname=(DRAWNS, 'polygon'), **args)
def Polyline(**args):
return StyleRefElement(qname=(DRAWNS, 'polyline'), **args)
def Rect(**args):
return StyleRefElement(qname=(DRAWNS, 'rect'), **args)
def RegularPolygon(**args):
return StyleRefElement(qname=(DRAWNS, 'regular-polygon'), **args)
def StrokeDash(**args):
return DrawElement(qname=(DRAWNS, 'stroke-dash'), **args)
def TextBox(**args):
return Element(qname=(DRAWNS, 'text-box'), **args)
| gpl-3.0 |
laiqiqi886/kbengine | kbe/src/lib/python/Lib/concurrent/futures/thread.py | 93 | 4548 | # Copyright 2009 Brian Quinlan. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Implements ThreadPoolExecutor."""
__author__ = 'Brian Quinlan ([email protected])'
import atexit
from concurrent.futures import _base
import queue
import threading
import weakref
# Workers are created as daemon threads. This is done to allow the interpreter
# to exit when there are still idle threads in a ThreadPoolExecutor's thread
# pool (i.e. shutdown() was not called). However, allowing workers to die with
# the interpreter has two undesirable properties:
# - The workers would still be running during interpretor shutdown,
# meaning that they would fail in unpredictable ways.
# - The workers could be killed while evaluating a work item, which could
# be bad if the callable being evaluated has external side-effects e.g.
# writing to a file.
#
# To work around this problem, an exit handler is installed which tells the
# workers to exit when their work queues are empty and then waits until the
# threads finish.
_threads_queues = weakref.WeakKeyDictionary()
_shutdown = False
def _python_exit():
global _shutdown
_shutdown = True
items = list(_threads_queues.items())
for t, q in items:
q.put(None)
for t, q in items:
t.join()
atexit.register(_python_exit)
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException as e:
self.future.set_exception(e)
else:
self.future.set_result(result)
def _worker(executor_reference, work_queue):
try:
while True:
work_item = work_queue.get(block=True)
if work_item is not None:
work_item.run()
# Delete references to object. See issue16284
del work_item
continue
executor = executor_reference()
# Exit if:
# - The interpreter is shutting down OR
# - The executor that owns the worker has been collected OR
# - The executor that owns the worker has been shutdown.
if _shutdown or executor is None or executor._shutdown:
# Notice other workers
work_queue.put(None)
return
del executor
except BaseException:
_base.LOGGER.critical('Exception in worker', exc_info=True)
class ThreadPoolExecutor(_base.Executor):
def __init__(self, max_workers):
"""Initializes a new ThreadPoolExecutor instance.
Args:
max_workers: The maximum number of threads that can be used to
execute the given calls.
"""
self._max_workers = max_workers
self._work_queue = queue.Queue()
self._threads = set()
self._shutdown = False
self._shutdown_lock = threading.Lock()
def submit(self, fn, *args, **kwargs):
with self._shutdown_lock:
if self._shutdown:
raise RuntimeError('cannot schedule new futures after shutdown')
f = _base.Future()
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
return f
submit.__doc__ = _base.Executor.submit.__doc__
def _adjust_thread_count(self):
# When the executor gets lost, the weakref callback will wake up
# the worker threads.
def weakref_cb(_, q=self._work_queue):
q.put(None)
# TODO(bquinlan): Should avoid creating new threads if there are more
# idle threads than items in the work queue.
if len(self._threads) < self._max_workers:
t = threading.Thread(target=_worker,
args=(weakref.ref(self, weakref_cb),
self._work_queue))
t.daemon = True
t.start()
self._threads.add(t)
_threads_queues[t] = self._work_queue
def shutdown(self, wait=True):
with self._shutdown_lock:
self._shutdown = True
self._work_queue.put(None)
if wait:
for t in self._threads:
t.join()
shutdown.__doc__ = _base.Executor.shutdown.__doc__
| lgpl-3.0 |
TravisCG/SI_scripts | samerec.py | 1 | 1175 | #!/usr/bin/python
"""
Read a pair of fastq files and filter out reads which IDs not found in both files
"""
import sys
fq1 = open(sys.argv[1])
fq2 = open(sys.argv[2])
out1 = open(sys.argv[3], "w")
out2 = open(sys.argv[4], "w")
stack1 = dict()
stack2 = dict()
counter = 0
same = False
while True:
read1 = fq1.readline()
read2 = fq2.readline()
if not read1 and not read2:
break
#TODO remove /1 and /2 from the end of the line
if counter % 4 == 0:
same = False
if read1 == read2:
same = True
else:
id1 = read1
id2 = read2
for k in stack1.keys():
if k in stack1 and k in stack2:
out1.write(k)
out2.write(k)
out1.write("".join(stack1[k]))
out2.write("".join(stack2[k]))
del stack1[k]
del stack2[k]
else:
if not same:
stack1[id1] = list()
stack2[id2] = list()
stack1[id1].append(read1)
stack2[id2].append(read2)
if same:
out1.write(read1)
out2.write(read2)
counter += 1
if counter % 1000000 == 0:
print counter
if k in stack1 and k in stack2:
out1.write(k)
out2.write(k)
out1.write("".join(stack1[k]))
out2.write("".join(stack2[k]))
del stack1[k]
del stack2[k]
out1.close()
out2.close()
| gpl-3.0 |
firerszd/kbengine | kbe/src/lib/python/Lib/tkinter/test/test_ttk/test_widgets.py | 59 | 56828 | import unittest
import tkinter
from tkinter import ttk
from test.support import requires
import sys
from tkinter.test.test_ttk.test_functions import MockTclObj
from tkinter.test.support import (AbstractTkTest, tcl_version, get_tk_patchlevel,
simulate_mouse_click)
from tkinter.test.widget_tests import (add_standard_options, noconv,
AbstractWidgetTest, StandardOptionsTests, IntegerSizeTests, PixelSizeTests,
setUpModule)
requires('gui')
class StandardTtkOptionsTests(StandardOptionsTests):
def test_class(self):
widget = self.create()
self.assertEqual(widget['class'], '')
errmsg='attempt to change read-only option'
if get_tk_patchlevel() < (8, 6, 0): # actually this was changed in 8.6b3
errmsg='Attempt to change read-only option'
self.checkInvalidParam(widget, 'class', 'Foo', errmsg=errmsg)
widget2 = self.create(class_='Foo')
self.assertEqual(widget2['class'], 'Foo')
def test_padding(self):
widget = self.create()
self.checkParam(widget, 'padding', 0, expected=('0',))
self.checkParam(widget, 'padding', 5, expected=('5',))
self.checkParam(widget, 'padding', (5, 6), expected=('5', '6'))
self.checkParam(widget, 'padding', (5, 6, 7),
expected=('5', '6', '7'))
self.checkParam(widget, 'padding', (5, 6, 7, 8),
expected=('5', '6', '7', '8'))
self.checkParam(widget, 'padding', ('5p', '6p', '7p', '8p'))
self.checkParam(widget, 'padding', (), expected='')
def test_style(self):
widget = self.create()
self.assertEqual(widget['style'], '')
errmsg = 'Layout Foo not found'
if hasattr(self, 'default_orient'):
errmsg = ('Layout %s.Foo not found' %
getattr(self, 'default_orient').title())
self.checkInvalidParam(widget, 'style', 'Foo',
errmsg=errmsg)
widget2 = self.create(class_='Foo')
self.assertEqual(widget2['class'], 'Foo')
# XXX
pass
class WidgetTest(AbstractTkTest, unittest.TestCase):
"""Tests methods available in every ttk widget."""
def setUp(self):
super().setUp()
self.widget = ttk.Button(self.root, width=0, text="Text")
self.widget.pack()
self.widget.wait_visibility()
def test_identify(self):
self.widget.update_idletasks()
self.assertEqual(self.widget.identify(
int(self.widget.winfo_width() / 2),
int(self.widget.winfo_height() / 2)
), "label")
self.assertEqual(self.widget.identify(-1, -1), "")
self.assertRaises(tkinter.TclError, self.widget.identify, None, 5)
self.assertRaises(tkinter.TclError, self.widget.identify, 5, None)
self.assertRaises(tkinter.TclError, self.widget.identify, 5, '')
def test_widget_state(self):
# XXX not sure about the portability of all these tests
self.assertEqual(self.widget.state(), ())
self.assertEqual(self.widget.instate(['!disabled']), True)
# changing from !disabled to disabled
self.assertEqual(self.widget.state(['disabled']), ('!disabled', ))
# no state change
self.assertEqual(self.widget.state(['disabled']), ())
# change back to !disable but also active
self.assertEqual(self.widget.state(['!disabled', 'active']),
('!active', 'disabled'))
# no state changes, again
self.assertEqual(self.widget.state(['!disabled', 'active']), ())
self.assertEqual(self.widget.state(['active', '!disabled']), ())
def test_cb(arg1, **kw):
return arg1, kw
self.assertEqual(self.widget.instate(['!disabled'],
test_cb, "hi", **{"msg": "there"}),
('hi', {'msg': 'there'}))
# attempt to set invalid statespec
currstate = self.widget.state()
self.assertRaises(tkinter.TclError, self.widget.instate,
['badstate'])
self.assertRaises(tkinter.TclError, self.widget.instate,
['disabled', 'badstate'])
# verify that widget didn't change its state
self.assertEqual(currstate, self.widget.state())
# ensuring that passing None as state doesn't modify current state
self.widget.state(['active', '!disabled'])
self.assertEqual(self.widget.state(), ('active', ))
class AbstractToplevelTest(AbstractWidgetTest, PixelSizeTests):
_conv_pixels = noconv
@add_standard_options(StandardTtkOptionsTests)
class FrameTest(AbstractToplevelTest, unittest.TestCase):
OPTIONS = (
'borderwidth', 'class', 'cursor', 'height',
'padding', 'relief', 'style', 'takefocus',
'width',
)
def create(self, **kwargs):
return ttk.Frame(self.root, **kwargs)
@add_standard_options(StandardTtkOptionsTests)
class LabelFrameTest(AbstractToplevelTest, unittest.TestCase):
OPTIONS = (
'borderwidth', 'class', 'cursor', 'height',
'labelanchor', 'labelwidget',
'padding', 'relief', 'style', 'takefocus',
'text', 'underline', 'width',
)
def create(self, **kwargs):
return ttk.LabelFrame(self.root, **kwargs)
def test_labelanchor(self):
widget = self.create()
self.checkEnumParam(widget, 'labelanchor',
'e', 'en', 'es', 'n', 'ne', 'nw', 's', 'se', 'sw', 'w', 'wn', 'ws',
errmsg='Bad label anchor specification {}')
self.checkInvalidParam(widget, 'labelanchor', 'center')
def test_labelwidget(self):
widget = self.create()
label = ttk.Label(self.root, text='Mupp', name='foo')
self.checkParam(widget, 'labelwidget', label, expected='.foo')
label.destroy()
class AbstractLabelTest(AbstractWidgetTest):
def checkImageParam(self, widget, name):
image = tkinter.PhotoImage(master=self.root, name='image1')
image2 = tkinter.PhotoImage(master=self.root, name='image2')
self.checkParam(widget, name, image, expected=('image1',))
self.checkParam(widget, name, 'image1', expected=('image1',))
self.checkParam(widget, name, (image,), expected=('image1',))
self.checkParam(widget, name, (image, 'active', image2),
expected=('image1', 'active', 'image2'))
self.checkParam(widget, name, 'image1 active image2',
expected=('image1', 'active', 'image2'))
self.checkInvalidParam(widget, name, 'spam',
errmsg='image "spam" doesn\'t exist')
def test_compound(self):
widget = self.create()
self.checkEnumParam(widget, 'compound',
'none', 'text', 'image', 'center',
'top', 'bottom', 'left', 'right')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state', 'active', 'disabled', 'normal')
def test_width(self):
widget = self.create()
self.checkParams(widget, 'width', 402, -402, 0)
@add_standard_options(StandardTtkOptionsTests)
class LabelTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'anchor', 'background',
'class', 'compound', 'cursor', 'font', 'foreground',
'image', 'justify', 'padding', 'relief', 'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'width', 'wraplength',
)
_conv_pixels = noconv
def create(self, **kwargs):
return ttk.Label(self.root, **kwargs)
def test_font(self):
widget = self.create()
self.checkParam(widget, 'font',
'-Adobe-Helvetica-Medium-R-Normal--*-120-*-*-*-*-*-*')
@add_standard_options(StandardTtkOptionsTests)
class ButtonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor', 'default',
'image', 'state', 'style', 'takefocus', 'text', 'textvariable',
'underline', 'width',
)
def create(self, **kwargs):
return ttk.Button(self.root, **kwargs)
def test_default(self):
widget = self.create()
self.checkEnumParam(widget, 'default', 'normal', 'active', 'disabled')
def test_invoke(self):
success = []
btn = ttk.Button(self.root, command=lambda: success.append(1))
btn.invoke()
self.assertTrue(success)
@add_standard_options(StandardTtkOptionsTests)
class CheckbuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor',
'image',
'offvalue', 'onvalue',
'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'variable', 'width',
)
def create(self, **kwargs):
return ttk.Checkbutton(self.root, **kwargs)
def test_offvalue(self):
widget = self.create()
self.checkParams(widget, 'offvalue', 1, 2.3, '', 'any string')
def test_onvalue(self):
widget = self.create()
self.checkParams(widget, 'onvalue', 1, 2.3, '', 'any string')
def test_invoke(self):
success = []
def cb_test():
success.append(1)
return "cb test called"
cbtn = ttk.Checkbutton(self.root, command=cb_test)
# the variable automatically created by ttk.Checkbutton is actually
# undefined till we invoke the Checkbutton
self.assertEqual(cbtn.state(), ('alternate', ))
self.assertRaises(tkinter.TclError, cbtn.tk.globalgetvar,
cbtn['variable'])
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
self.assertEqual(cbtn['onvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
self.assertTrue(success)
cbtn['command'] = ''
res = cbtn.invoke()
self.assertFalse(str(res))
self.assertLessEqual(len(success), 1)
self.assertEqual(cbtn['offvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class ComboboxTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'exportselection', 'height',
'justify', 'postcommand', 'state', 'style',
'takefocus', 'textvariable', 'values', 'width',
)
def setUp(self):
super().setUp()
self.combo = self.create()
def create(self, **kwargs):
return ttk.Combobox(self.root, **kwargs)
def test_height(self):
widget = self.create()
self.checkParams(widget, 'height', 100, 101.2, 102.6, -100, 0, '1i')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state', 'active', 'disabled', 'normal')
def _show_drop_down_listbox(self):
width = self.combo.winfo_width()
self.combo.event_generate('<ButtonPress-1>', x=width - 5, y=5)
self.combo.event_generate('<ButtonRelease-1>', x=width - 5, y=5)
self.combo.update_idletasks()
def test_virtual_event(self):
success = []
self.combo['values'] = [1]
self.combo.bind('<<ComboboxSelected>>',
lambda evt: success.append(True))
self.combo.pack()
self.combo.wait_visibility()
height = self.combo.winfo_height()
self._show_drop_down_listbox()
self.combo.update()
self.combo.event_generate('<Return>')
self.combo.update()
self.assertTrue(success)
def test_postcommand(self):
success = []
self.combo['postcommand'] = lambda: success.append(True)
self.combo.pack()
self.combo.wait_visibility()
self._show_drop_down_listbox()
self.assertTrue(success)
# testing postcommand removal
self.combo['postcommand'] = ''
self._show_drop_down_listbox()
self.assertEqual(len(success), 1)
def test_values(self):
def check_get_current(getval, currval):
self.assertEqual(self.combo.get(), getval)
self.assertEqual(self.combo.current(), currval)
self.assertEqual(self.combo['values'],
() if tcl_version < (8, 5) else '')
check_get_current('', -1)
self.checkParam(self.combo, 'values', 'mon tue wed thur',
expected=('mon', 'tue', 'wed', 'thur'))
self.checkParam(self.combo, 'values', ('mon', 'tue', 'wed', 'thur'))
self.checkParam(self.combo, 'values', (42, 3.14, '', 'any string'))
self.checkParam(self.combo, 'values', '', expected=())
self.combo['values'] = ['a', 1, 'c']
self.combo.set('c')
check_get_current('c', 2)
self.combo.current(0)
check_get_current('a', 0)
self.combo.set('d')
check_get_current('d', -1)
# testing values with empty string
self.combo.set('')
self.combo['values'] = (1, 2, '', 3)
check_get_current('', 2)
# testing values with empty string set through configure
self.combo.configure(values=[1, '', 2])
self.assertEqual(self.combo['values'],
('1', '', '2') if self.wantobjects else
'1 {} 2')
# testing values with spaces
self.combo['values'] = ['a b', 'a\tb', 'a\nb']
self.assertEqual(self.combo['values'],
('a b', 'a\tb', 'a\nb') if self.wantobjects else
'{a b} {a\tb} {a\nb}')
# testing values with special characters
self.combo['values'] = [r'a\tb', '"a"', '} {']
self.assertEqual(self.combo['values'],
(r'a\tb', '"a"', '} {') if self.wantobjects else
r'a\\tb {"a"} \}\ \{')
# out of range
self.assertRaises(tkinter.TclError, self.combo.current,
len(self.combo['values']))
# it expects an integer (or something that can be converted to int)
self.assertRaises(tkinter.TclError, self.combo.current, '')
# testing creating combobox with empty string in values
combo2 = ttk.Combobox(self.root, values=[1, 2, ''])
self.assertEqual(combo2['values'],
('1', '2', '') if self.wantobjects else '1 2 {}')
combo2.destroy()
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class EntryTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'background', 'class', 'cursor',
'exportselection', 'font',
'invalidcommand', 'justify',
'show', 'state', 'style', 'takefocus', 'textvariable',
'validate', 'validatecommand', 'width', 'xscrollcommand',
)
def setUp(self):
super().setUp()
self.entry = self.create()
def create(self, **kwargs):
return ttk.Entry(self.root, **kwargs)
def test_invalidcommand(self):
widget = self.create()
self.checkCommandParam(widget, 'invalidcommand')
def test_show(self):
widget = self.create()
self.checkParam(widget, 'show', '*')
self.checkParam(widget, 'show', '')
self.checkParam(widget, 'show', ' ')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state',
'disabled', 'normal', 'readonly')
def test_validate(self):
widget = self.create()
self.checkEnumParam(widget, 'validate',
'all', 'key', 'focus', 'focusin', 'focusout', 'none')
def test_validatecommand(self):
widget = self.create()
self.checkCommandParam(widget, 'validatecommand')
def test_bbox(self):
self.assertIsBoundingBox(self.entry.bbox(0))
self.assertRaises(tkinter.TclError, self.entry.bbox, 'noindex')
self.assertRaises(tkinter.TclError, self.entry.bbox, None)
def test_identify(self):
self.entry.pack()
self.entry.wait_visibility()
self.entry.update_idletasks()
self.assertEqual(self.entry.identify(5, 5), "textarea")
self.assertEqual(self.entry.identify(-1, -1), "")
self.assertRaises(tkinter.TclError, self.entry.identify, None, 5)
self.assertRaises(tkinter.TclError, self.entry.identify, 5, None)
self.assertRaises(tkinter.TclError, self.entry.identify, 5, '')
def test_validation_options(self):
success = []
test_invalid = lambda: success.append(True)
self.entry['validate'] = 'none'
self.entry['validatecommand'] = lambda: False
self.entry['invalidcommand'] = test_invalid
self.entry.validate()
self.assertTrue(success)
self.entry['invalidcommand'] = ''
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['invalidcommand'] = test_invalid
self.entry['validatecommand'] = lambda: True
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['validatecommand'] = ''
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['validatecommand'] = True
self.assertRaises(tkinter.TclError, self.entry.validate)
def test_validation(self):
validation = []
def validate(to_insert):
if not 'a' <= to_insert.lower() <= 'z':
validation.append(False)
return False
validation.append(True)
return True
self.entry['validate'] = 'key'
self.entry['validatecommand'] = self.entry.register(validate), '%S'
self.entry.insert('end', 1)
self.entry.insert('end', 'a')
self.assertEqual(validation, [False, True])
self.assertEqual(self.entry.get(), 'a')
def test_revalidation(self):
def validate(content):
for letter in content:
if not 'a' <= letter.lower() <= 'z':
return False
return True
self.entry['validatecommand'] = self.entry.register(validate), '%P'
self.entry.insert('end', 'avocado')
self.assertEqual(self.entry.validate(), True)
self.assertEqual(self.entry.state(), ())
self.entry.delete(0, 'end')
self.assertEqual(self.entry.get(), '')
self.entry.insert('end', 'a1b')
self.assertEqual(self.entry.validate(), False)
self.assertEqual(self.entry.state(), ('invalid', ))
self.entry.delete(1)
self.assertEqual(self.entry.validate(), True)
self.assertEqual(self.entry.state(), ())
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class PanedWindowTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'height',
'orient', 'style', 'takefocus', 'width',
)
def setUp(self):
super().setUp()
self.paned = self.create()
def create(self, **kwargs):
return ttk.PanedWindow(self.root, **kwargs)
def test_orient(self):
widget = self.create()
self.assertEqual(str(widget['orient']), 'vertical')
errmsg='attempt to change read-only option'
if get_tk_patchlevel() < (8, 6, 0): # actually this was changed in 8.6b3
errmsg='Attempt to change read-only option'
self.checkInvalidParam(widget, 'orient', 'horizontal',
errmsg=errmsg)
widget2 = self.create(orient='horizontal')
self.assertEqual(str(widget2['orient']), 'horizontal')
def test_add(self):
# attempt to add a child that is not a direct child of the paned window
label = ttk.Label(self.paned)
child = ttk.Label(label)
self.assertRaises(tkinter.TclError, self.paned.add, child)
label.destroy()
child.destroy()
# another attempt
label = ttk.Label(self.root)
child = ttk.Label(label)
self.assertRaises(tkinter.TclError, self.paned.add, child)
child.destroy()
label.destroy()
good_child = ttk.Label(self.root)
self.paned.add(good_child)
# re-adding a child is not accepted
self.assertRaises(tkinter.TclError, self.paned.add, good_child)
other_child = ttk.Label(self.paned)
self.paned.add(other_child)
self.assertEqual(self.paned.pane(0), self.paned.pane(1))
self.assertRaises(tkinter.TclError, self.paned.pane, 2)
good_child.destroy()
other_child.destroy()
self.assertRaises(tkinter.TclError, self.paned.pane, 0)
def test_forget(self):
self.assertRaises(tkinter.TclError, self.paned.forget, None)
self.assertRaises(tkinter.TclError, self.paned.forget, 0)
self.paned.add(ttk.Label(self.root))
self.paned.forget(0)
self.assertRaises(tkinter.TclError, self.paned.forget, 0)
def test_insert(self):
self.assertRaises(tkinter.TclError, self.paned.insert, None, 0)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, None)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, 0)
child = ttk.Label(self.root)
child2 = ttk.Label(self.root)
child3 = ttk.Label(self.root)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, child)
self.paned.insert('end', child2)
self.paned.insert(0, child)
self.assertEqual(self.paned.panes(), (str(child), str(child2)))
self.paned.insert(0, child2)
self.assertEqual(self.paned.panes(), (str(child2), str(child)))
self.paned.insert('end', child3)
self.assertEqual(self.paned.panes(),
(str(child2), str(child), str(child3)))
# reinserting a child should move it to its current position
panes = self.paned.panes()
self.paned.insert('end', child3)
self.assertEqual(panes, self.paned.panes())
# moving child3 to child2 position should result in child2 ending up
# in previous child position and child ending up in previous child3
# position
self.paned.insert(child2, child3)
self.assertEqual(self.paned.panes(),
(str(child3), str(child2), str(child)))
def test_pane(self):
self.assertRaises(tkinter.TclError, self.paned.pane, 0)
child = ttk.Label(self.root)
self.paned.add(child)
self.assertIsInstance(self.paned.pane(0), dict)
self.assertEqual(self.paned.pane(0, weight=None),
0 if self.wantobjects else '0')
# newer form for querying a single option
self.assertEqual(self.paned.pane(0, 'weight'),
0 if self.wantobjects else '0')
self.assertEqual(self.paned.pane(0), self.paned.pane(str(child)))
self.assertRaises(tkinter.TclError, self.paned.pane, 0,
badoption='somevalue')
def test_sashpos(self):
self.assertRaises(tkinter.TclError, self.paned.sashpos, None)
self.assertRaises(tkinter.TclError, self.paned.sashpos, '')
self.assertRaises(tkinter.TclError, self.paned.sashpos, 0)
child = ttk.Label(self.paned, text='a')
self.paned.add(child, weight=1)
self.assertRaises(tkinter.TclError, self.paned.sashpos, 0)
child2 = ttk.Label(self.paned, text='b')
self.paned.add(child2)
self.assertRaises(tkinter.TclError, self.paned.sashpos, 1)
self.paned.pack(expand=True, fill='both')
self.paned.wait_visibility()
curr_pos = self.paned.sashpos(0)
self.paned.sashpos(0, 1000)
self.assertNotEqual(curr_pos, self.paned.sashpos(0))
self.assertIsInstance(self.paned.sashpos(0), int)
@add_standard_options(StandardTtkOptionsTests)
class RadiobuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor',
'image',
'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'value', 'variable', 'width',
)
def create(self, **kwargs):
return ttk.Radiobutton(self.root, **kwargs)
def test_value(self):
widget = self.create()
self.checkParams(widget, 'value', 1, 2.3, '', 'any string')
def test_invoke(self):
success = []
def cb_test():
success.append(1)
return "cb test called"
myvar = tkinter.IntVar(self.root)
cbtn = ttk.Radiobutton(self.root, command=cb_test,
variable=myvar, value=0)
cbtn2 = ttk.Radiobutton(self.root, command=cb_test,
variable=myvar, value=1)
if self.wantobjects:
conv = lambda x: x
else:
conv = int
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
self.assertEqual(conv(cbtn['value']), myvar.get())
self.assertEqual(myvar.get(),
conv(cbtn.tk.globalgetvar(cbtn['variable'])))
self.assertTrue(success)
cbtn2['command'] = ''
res = cbtn2.invoke()
self.assertEqual(str(res), '')
self.assertLessEqual(len(success), 1)
self.assertEqual(conv(cbtn2['value']), myvar.get())
self.assertEqual(myvar.get(),
conv(cbtn.tk.globalgetvar(cbtn['variable'])))
self.assertEqual(str(cbtn['variable']), str(cbtn2['variable']))
class MenubuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'compound', 'cursor', 'direction',
'image', 'menu', 'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'width',
)
def create(self, **kwargs):
return ttk.Menubutton(self.root, **kwargs)
def test_direction(self):
widget = self.create()
self.checkEnumParam(widget, 'direction',
'above', 'below', 'left', 'right', 'flush')
def test_menu(self):
widget = self.create()
menu = tkinter.Menu(widget, name='menu')
self.checkParam(widget, 'menu', menu, conv=str)
menu.destroy()
@add_standard_options(StandardTtkOptionsTests)
class ScaleTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'cursor', 'from', 'length',
'orient', 'style', 'takefocus', 'to', 'value', 'variable',
)
_conv_pixels = noconv
default_orient = 'horizontal'
def setUp(self):
super().setUp()
self.scale = self.create()
self.scale.pack()
self.scale.update()
def create(self, **kwargs):
return ttk.Scale(self.root, **kwargs)
def test_from(self):
widget = self.create()
self.checkFloatParam(widget, 'from', 100, 14.9, 15.1, conv=False)
def test_length(self):
widget = self.create()
self.checkPixelsParam(widget, 'length', 130, 131.2, 135.6, '5i')
def test_to(self):
widget = self.create()
self.checkFloatParam(widget, 'to', 300, 14.9, 15.1, -10, conv=False)
def test_value(self):
widget = self.create()
self.checkFloatParam(widget, 'value', 300, 14.9, 15.1, -10, conv=False)
def test_custom_event(self):
failure = [1, 1, 1] # will need to be empty
funcid = self.scale.bind('<<RangeChanged>>', lambda evt: failure.pop())
self.scale['from'] = 10
self.scale['from_'] = 10
self.scale['to'] = 3
self.assertFalse(failure)
failure = [1, 1, 1]
self.scale.configure(from_=2, to=5)
self.scale.configure(from_=0, to=-2)
self.scale.configure(to=10)
self.assertFalse(failure)
def test_get(self):
if self.wantobjects:
conv = lambda x: x
else:
conv = float
scale_width = self.scale.winfo_width()
self.assertEqual(self.scale.get(scale_width, 0), self.scale['to'])
self.assertEqual(conv(self.scale.get(0, 0)), conv(self.scale['from']))
self.assertEqual(self.scale.get(), self.scale['value'])
self.scale['value'] = 30
self.assertEqual(self.scale.get(), self.scale['value'])
self.assertRaises(tkinter.TclError, self.scale.get, '', 0)
self.assertRaises(tkinter.TclError, self.scale.get, 0, '')
def test_set(self):
if self.wantobjects:
conv = lambda x: x
else:
conv = float
# set restricts the max/min values according to the current range
max = conv(self.scale['to'])
new_max = max + 10
self.scale.set(new_max)
self.assertEqual(conv(self.scale.get()), max)
min = conv(self.scale['from'])
self.scale.set(min - 1)
self.assertEqual(conv(self.scale.get()), min)
# changing directly the variable doesn't impose this limitation tho
var = tkinter.DoubleVar(self.root)
self.scale['variable'] = var
var.set(max + 5)
self.assertEqual(conv(self.scale.get()), var.get())
self.assertEqual(conv(self.scale.get()), max + 5)
del var
# the same happens with the value option
self.scale['value'] = max + 10
self.assertEqual(conv(self.scale.get()), max + 10)
self.assertEqual(conv(self.scale.get()), conv(self.scale['value']))
# nevertheless, note that the max/min values we can get specifying
# x, y coords are the ones according to the current range
self.assertEqual(conv(self.scale.get(0, 0)), min)
self.assertEqual(conv(self.scale.get(self.scale.winfo_width(), 0)), max)
self.assertRaises(tkinter.TclError, self.scale.set, None)
@add_standard_options(StandardTtkOptionsTests)
class ProgressbarTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'orient', 'length',
'mode', 'maximum', 'phase',
'style', 'takefocus', 'value', 'variable',
)
_conv_pixels = noconv
default_orient = 'horizontal'
def create(self, **kwargs):
return ttk.Progressbar(self.root, **kwargs)
def test_length(self):
widget = self.create()
self.checkPixelsParam(widget, 'length', 100.1, 56.7, '2i')
def test_maximum(self):
widget = self.create()
self.checkFloatParam(widget, 'maximum', 150.2, 77.7, 0, -10, conv=False)
def test_mode(self):
widget = self.create()
self.checkEnumParam(widget, 'mode', 'determinate', 'indeterminate')
def test_phase(self):
# XXX
pass
def test_value(self):
widget = self.create()
self.checkFloatParam(widget, 'value', 150.2, 77.7, 0, -10,
conv=False)
@unittest.skipIf(sys.platform == 'darwin',
'ttk.Scrollbar is special on MacOSX')
@add_standard_options(StandardTtkOptionsTests)
class ScrollbarTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'cursor', 'orient', 'style', 'takefocus',
)
default_orient = 'vertical'
def create(self, **kwargs):
return ttk.Scrollbar(self.root, **kwargs)
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class NotebookTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'height', 'padding', 'style', 'takefocus',
)
def setUp(self):
super().setUp()
self.nb = self.create(padding=0)
self.child1 = ttk.Label(self.root)
self.child2 = ttk.Label(self.root)
self.nb.add(self.child1, text='a')
self.nb.add(self.child2, text='b')
def create(self, **kwargs):
return ttk.Notebook(self.root, **kwargs)
def test_tab_identifiers(self):
self.nb.forget(0)
self.nb.hide(self.child2)
self.assertRaises(tkinter.TclError, self.nb.tab, self.child1)
self.assertEqual(self.nb.index('end'), 1)
self.nb.add(self.child2)
self.assertEqual(self.nb.index('end'), 1)
self.nb.select(self.child2)
self.assertTrue(self.nb.tab('current'))
self.nb.add(self.child1, text='a')
self.nb.pack()
self.nb.wait_visibility()
if sys.platform == 'darwin':
tb_idx = "@20,5"
else:
tb_idx = "@5,5"
self.assertEqual(self.nb.tab(tb_idx), self.nb.tab('current'))
for i in range(5, 100, 5):
try:
if self.nb.tab('@%d, 5' % i, text=None) == 'a':
break
except tkinter.TclError:
pass
else:
self.fail("Tab with text 'a' not found")
def test_add_and_hidden(self):
self.assertRaises(tkinter.TclError, self.nb.hide, -1)
self.assertRaises(tkinter.TclError, self.nb.hide, 'hi')
self.assertRaises(tkinter.TclError, self.nb.hide, None)
self.assertRaises(tkinter.TclError, self.nb.add, None)
self.assertRaises(tkinter.TclError, self.nb.add, ttk.Label(self.root),
unknown='option')
tabs = self.nb.tabs()
self.nb.hide(self.child1)
self.nb.add(self.child1)
self.assertEqual(self.nb.tabs(), tabs)
child = ttk.Label(self.root)
self.nb.add(child, text='c')
tabs = self.nb.tabs()
curr = self.nb.index('current')
# verify that the tab gets readded at its previous position
child2_index = self.nb.index(self.child2)
self.nb.hide(self.child2)
self.nb.add(self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.assertEqual(self.nb.index(self.child2), child2_index)
self.assertEqual(str(self.child2), self.nb.tabs()[child2_index])
# but the tab next to it (not hidden) is the one selected now
self.assertEqual(self.nb.index('current'), curr + 1)
def test_forget(self):
self.assertRaises(tkinter.TclError, self.nb.forget, -1)
self.assertRaises(tkinter.TclError, self.nb.forget, 'hi')
self.assertRaises(tkinter.TclError, self.nb.forget, None)
tabs = self.nb.tabs()
child1_index = self.nb.index(self.child1)
self.nb.forget(self.child1)
self.assertNotIn(str(self.child1), self.nb.tabs())
self.assertEqual(len(tabs) - 1, len(self.nb.tabs()))
self.nb.add(self.child1)
self.assertEqual(self.nb.index(self.child1), 1)
self.assertNotEqual(child1_index, self.nb.index(self.child1))
def test_index(self):
self.assertRaises(tkinter.TclError, self.nb.index, -1)
self.assertRaises(tkinter.TclError, self.nb.index, None)
self.assertIsInstance(self.nb.index('end'), int)
self.assertEqual(self.nb.index(self.child1), 0)
self.assertEqual(self.nb.index(self.child2), 1)
self.assertEqual(self.nb.index('end'), 2)
def test_insert(self):
# moving tabs
tabs = self.nb.tabs()
self.nb.insert(1, tabs[0])
self.assertEqual(self.nb.tabs(), (tabs[1], tabs[0]))
self.nb.insert(self.child1, self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.nb.insert('end', self.child1)
self.assertEqual(self.nb.tabs(), (tabs[1], tabs[0]))
self.nb.insert('end', 0)
self.assertEqual(self.nb.tabs(), tabs)
# bad moves
self.assertRaises(tkinter.TclError, self.nb.insert, 2, tabs[0])
self.assertRaises(tkinter.TclError, self.nb.insert, -1, tabs[0])
# new tab
child3 = ttk.Label(self.root)
self.nb.insert(1, child3)
self.assertEqual(self.nb.tabs(), (tabs[0], str(child3), tabs[1]))
self.nb.forget(child3)
self.assertEqual(self.nb.tabs(), tabs)
self.nb.insert(self.child1, child3)
self.assertEqual(self.nb.tabs(), (str(child3), ) + tabs)
self.nb.forget(child3)
self.assertRaises(tkinter.TclError, self.nb.insert, 2, child3)
self.assertRaises(tkinter.TclError, self.nb.insert, -1, child3)
# bad inserts
self.assertRaises(tkinter.TclError, self.nb.insert, 'end', None)
self.assertRaises(tkinter.TclError, self.nb.insert, None, 0)
self.assertRaises(tkinter.TclError, self.nb.insert, None, None)
def test_select(self):
self.nb.pack()
self.nb.wait_visibility()
success = []
tab_changed = []
self.child1.bind('<Unmap>', lambda evt: success.append(True))
self.nb.bind('<<NotebookTabChanged>>',
lambda evt: tab_changed.append(True))
self.assertEqual(self.nb.select(), str(self.child1))
self.nb.select(self.child2)
self.assertTrue(success)
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.update()
self.assertTrue(tab_changed)
def test_tab(self):
self.assertRaises(tkinter.TclError, self.nb.tab, -1)
self.assertRaises(tkinter.TclError, self.nb.tab, 'notab')
self.assertRaises(tkinter.TclError, self.nb.tab, None)
self.assertIsInstance(self.nb.tab(self.child1), dict)
self.assertEqual(self.nb.tab(self.child1, text=None), 'a')
# newer form for querying a single option
self.assertEqual(self.nb.tab(self.child1, 'text'), 'a')
self.nb.tab(self.child1, text='abc')
self.assertEqual(self.nb.tab(self.child1, text=None), 'abc')
self.assertEqual(self.nb.tab(self.child1, 'text'), 'abc')
def test_tabs(self):
self.assertEqual(len(self.nb.tabs()), 2)
self.nb.forget(self.child1)
self.nb.forget(self.child2)
self.assertEqual(self.nb.tabs(), ())
def test_traversal(self):
self.nb.pack()
self.nb.wait_visibility()
self.nb.select(0)
simulate_mouse_click(self.nb, 5, 5)
self.nb.focus_force()
self.nb.event_generate('<Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.focus_force()
self.nb.event_generate('<Shift-Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child1))
self.nb.focus_force()
self.nb.event_generate('<Shift-Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.tab(self.child1, text='a', underline=0)
self.nb.enable_traversal()
self.nb.focus_force()
simulate_mouse_click(self.nb, 5, 5)
if sys.platform == 'darwin':
self.nb.event_generate('<Option-a>')
else:
self.nb.event_generate('<Alt-a>')
self.assertEqual(self.nb.select(), str(self.child1))
@add_standard_options(StandardTtkOptionsTests)
class TreeviewTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'columns', 'cursor', 'displaycolumns',
'height', 'padding', 'selectmode', 'show',
'style', 'takefocus', 'xscrollcommand', 'yscrollcommand',
)
def setUp(self):
super().setUp()
self.tv = self.create(padding=0)
def create(self, **kwargs):
return ttk.Treeview(self.root, **kwargs)
def test_columns(self):
widget = self.create()
self.checkParam(widget, 'columns', 'a b c',
expected=('a', 'b', 'c'))
self.checkParam(widget, 'columns', ('a', 'b', 'c'))
self.checkParam(widget, 'columns', ())
def test_displaycolumns(self):
widget = self.create()
widget['columns'] = ('a', 'b', 'c')
self.checkParam(widget, 'displaycolumns', 'b a c',
expected=('b', 'a', 'c'))
self.checkParam(widget, 'displaycolumns', ('b', 'a', 'c'))
self.checkParam(widget, 'displaycolumns', '#all',
expected=('#all',))
self.checkParam(widget, 'displaycolumns', (2, 1, 0))
self.checkInvalidParam(widget, 'displaycolumns', ('a', 'b', 'd'),
errmsg='Invalid column index d')
self.checkInvalidParam(widget, 'displaycolumns', (1, 2, 3),
errmsg='Column index 3 out of bounds')
self.checkInvalidParam(widget, 'displaycolumns', (1, -2),
errmsg='Column index -2 out of bounds')
def test_height(self):
widget = self.create()
self.checkPixelsParam(widget, 'height', 100, -100, 0, '3c', conv=False)
self.checkPixelsParam(widget, 'height', 101.2, 102.6, conv=noconv)
def test_selectmode(self):
widget = self.create()
self.checkEnumParam(widget, 'selectmode',
'none', 'browse', 'extended')
def test_show(self):
widget = self.create()
self.checkParam(widget, 'show', 'tree headings',
expected=('tree', 'headings'))
self.checkParam(widget, 'show', ('tree', 'headings'))
self.checkParam(widget, 'show', ('headings', 'tree'))
self.checkParam(widget, 'show', 'tree', expected=('tree',))
self.checkParam(widget, 'show', 'headings', expected=('headings',))
def test_bbox(self):
self.tv.pack()
self.assertEqual(self.tv.bbox(''), '')
self.tv.wait_visibility()
self.tv.update()
item_id = self.tv.insert('', 'end')
children = self.tv.get_children()
self.assertTrue(children)
bbox = self.tv.bbox(children[0])
self.assertIsBoundingBox(bbox)
# compare width in bboxes
self.tv['columns'] = ['test']
self.tv.column('test', width=50)
bbox_column0 = self.tv.bbox(children[0], 0)
root_width = self.tv.column('#0', width=None)
if not self.wantobjects:
root_width = int(root_width)
self.assertEqual(bbox_column0[0], bbox[0] + root_width)
# verify that bbox of a closed item is the empty string
child1 = self.tv.insert(item_id, 'end')
self.assertEqual(self.tv.bbox(child1), '')
def test_children(self):
# no children yet, should get an empty tuple
self.assertEqual(self.tv.get_children(), ())
item_id = self.tv.insert('', 'end')
self.assertIsInstance(self.tv.get_children(), tuple)
self.assertEqual(self.tv.get_children()[0], item_id)
# add item_id and child3 as children of child2
child2 = self.tv.insert('', 'end')
child3 = self.tv.insert('', 'end')
self.tv.set_children(child2, item_id, child3)
self.assertEqual(self.tv.get_children(child2), (item_id, child3))
# child3 has child2 as parent, thus trying to set child2 as a children
# of child3 should result in an error
self.assertRaises(tkinter.TclError,
self.tv.set_children, child3, child2)
# remove child2 children
self.tv.set_children(child2)
self.assertEqual(self.tv.get_children(child2), ())
# remove root's children
self.tv.set_children('')
self.assertEqual(self.tv.get_children(), ())
def test_column(self):
# return a dict with all options/values
self.assertIsInstance(self.tv.column('#0'), dict)
# return a single value of the given option
if self.wantobjects:
self.assertIsInstance(self.tv.column('#0', width=None), int)
# set a new value for an option
self.tv.column('#0', width=10)
# testing new way to get option value
self.assertEqual(self.tv.column('#0', 'width'),
10 if self.wantobjects else '10')
self.assertEqual(self.tv.column('#0', width=None),
10 if self.wantobjects else '10')
# check read-only option
self.assertRaises(tkinter.TclError, self.tv.column, '#0', id='X')
self.assertRaises(tkinter.TclError, self.tv.column, 'invalid')
invalid_kws = [
{'unknown_option': 'some value'}, {'stretch': 'wrong'},
{'anchor': 'wrong'}, {'width': 'wrong'}, {'minwidth': 'wrong'}
]
for kw in invalid_kws:
self.assertRaises(tkinter.TclError, self.tv.column, '#0',
**kw)
def test_delete(self):
self.assertRaises(tkinter.TclError, self.tv.delete, '#0')
item_id = self.tv.insert('', 'end')
item2 = self.tv.insert(item_id, 'end')
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
self.tv.delete(item_id)
self.assertFalse(self.tv.get_children())
# reattach should fail
self.assertRaises(tkinter.TclError,
self.tv.reattach, item_id, '', 'end')
# test multiple item delete
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
self.assertEqual(self.tv.get_children(), (item1, item2))
self.tv.delete(item1, item2)
self.assertFalse(self.tv.get_children())
def test_detach_reattach(self):
item_id = self.tv.insert('', 'end')
item2 = self.tv.insert(item_id, 'end')
# calling detach without items is valid, although it does nothing
prev = self.tv.get_children()
self.tv.detach() # this should do nothing
self.assertEqual(prev, self.tv.get_children())
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
# detach item with children
self.tv.detach(item_id)
self.assertFalse(self.tv.get_children())
# reattach item with children
self.tv.reattach(item_id, '', 'end')
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
# move a children to the root
self.tv.move(item2, '', 'end')
self.assertEqual(self.tv.get_children(), (item_id, item2))
self.assertEqual(self.tv.get_children(item_id), ())
# bad values
self.assertRaises(tkinter.TclError,
self.tv.reattach, 'nonexistent', '', 'end')
self.assertRaises(tkinter.TclError,
self.tv.detach, 'nonexistent')
self.assertRaises(tkinter.TclError,
self.tv.reattach, item2, 'otherparent', 'end')
self.assertRaises(tkinter.TclError,
self.tv.reattach, item2, '', 'invalid')
# multiple detach
self.tv.detach(item_id, item2)
self.assertEqual(self.tv.get_children(), ())
self.assertEqual(self.tv.get_children(item_id), ())
def test_exists(self):
self.assertEqual(self.tv.exists('something'), False)
self.assertEqual(self.tv.exists(''), True)
self.assertEqual(self.tv.exists({}), False)
# the following will make a tk.call equivalent to
# tk.call(treeview, "exists") which should result in an error
# in the tcl interpreter since tk requires an item.
self.assertRaises(tkinter.TclError, self.tv.exists, None)
def test_focus(self):
# nothing is focused right now
self.assertEqual(self.tv.focus(), '')
item1 = self.tv.insert('', 'end')
self.tv.focus(item1)
self.assertEqual(self.tv.focus(), item1)
self.tv.delete(item1)
self.assertEqual(self.tv.focus(), '')
# try focusing inexistent item
self.assertRaises(tkinter.TclError, self.tv.focus, 'hi')
def test_heading(self):
# check a dict is returned
self.assertIsInstance(self.tv.heading('#0'), dict)
# check a value is returned
self.tv.heading('#0', text='hi')
self.assertEqual(self.tv.heading('#0', 'text'), 'hi')
self.assertEqual(self.tv.heading('#0', text=None), 'hi')
# invalid option
self.assertRaises(tkinter.TclError, self.tv.heading, '#0',
background=None)
# invalid value
self.assertRaises(tkinter.TclError, self.tv.heading, '#0',
anchor=1)
def test_heading_callback(self):
def simulate_heading_click(x, y):
simulate_mouse_click(self.tv, x, y)
self.tv.update()
success = [] # no success for now
self.tv.pack()
self.tv.wait_visibility()
self.tv.heading('#0', command=lambda: success.append(True))
self.tv.column('#0', width=100)
self.tv.update()
# assuming that the coords (5, 5) fall into heading #0
simulate_heading_click(5, 5)
if not success:
self.fail("The command associated to the treeview heading wasn't "
"invoked.")
success = []
commands = self.tv.master._tclCommands
self.tv.heading('#0', command=str(self.tv.heading('#0', command=None)))
self.assertEqual(commands, self.tv.master._tclCommands)
simulate_heading_click(5, 5)
if not success:
self.fail("The command associated to the treeview heading wasn't "
"invoked.")
# XXX The following raises an error in a tcl interpreter, but not in
# Python
#self.tv.heading('#0', command='I dont exist')
#simulate_heading_click(5, 5)
def test_index(self):
# item 'what' doesn't exist
self.assertRaises(tkinter.TclError, self.tv.index, 'what')
self.assertEqual(self.tv.index(''), 0)
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
c1 = self.tv.insert(item1, 'end')
c2 = self.tv.insert(item1, 'end')
self.assertEqual(self.tv.index(item1), 0)
self.assertEqual(self.tv.index(c1), 0)
self.assertEqual(self.tv.index(c2), 1)
self.assertEqual(self.tv.index(item2), 1)
self.tv.move(item2, '', 0)
self.assertEqual(self.tv.index(item2), 0)
self.assertEqual(self.tv.index(item1), 1)
# check that index still works even after its parent and siblings
# have been detached
self.tv.detach(item1)
self.assertEqual(self.tv.index(c2), 1)
self.tv.detach(c1)
self.assertEqual(self.tv.index(c2), 0)
# but it fails after item has been deleted
self.tv.delete(item1)
self.assertRaises(tkinter.TclError, self.tv.index, c2)
def test_insert_item(self):
# parent 'none' doesn't exist
self.assertRaises(tkinter.TclError, self.tv.insert, 'none', 'end')
# open values
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
open='')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
open='please')
self.assertFalse(self.tv.delete(self.tv.insert('', 'end', open=True)))
self.assertFalse(self.tv.delete(self.tv.insert('', 'end', open=False)))
# invalid index
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'middle')
# trying to duplicate item id is invalid
itemid = self.tv.insert('', 'end', 'first-item')
self.assertEqual(itemid, 'first-item')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
'first-item')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
MockTclObj('first-item'))
# unicode values
value = '\xe1ba'
item = self.tv.insert('', 'end', values=(value, ))
self.assertEqual(self.tv.item(item, 'values'),
(value,) if self.wantobjects else value)
self.assertEqual(self.tv.item(item, values=None),
(value,) if self.wantobjects else value)
self.tv.item(item, values=self.root.splitlist(self.tv.item(item, values=None)))
self.assertEqual(self.tv.item(item, values=None),
(value,) if self.wantobjects else value)
self.assertIsInstance(self.tv.item(item), dict)
# erase item values
self.tv.item(item, values='')
self.assertFalse(self.tv.item(item, values=None))
# item tags
item = self.tv.insert('', 'end', tags=[1, 2, value])
self.assertEqual(self.tv.item(item, tags=None),
('1', '2', value) if self.wantobjects else
'1 2 %s' % value)
self.tv.item(item, tags=[])
self.assertFalse(self.tv.item(item, tags=None))
self.tv.item(item, tags=(1, 2))
self.assertEqual(self.tv.item(item, tags=None),
('1', '2') if self.wantobjects else '1 2')
# values with spaces
item = self.tv.insert('', 'end', values=('a b c',
'%s %s' % (value, value)))
self.assertEqual(self.tv.item(item, values=None),
('a b c', '%s %s' % (value, value)) if self.wantobjects else
'{a b c} {%s %s}' % (value, value))
# text
self.assertEqual(self.tv.item(
self.tv.insert('', 'end', text="Label here"), text=None),
"Label here")
self.assertEqual(self.tv.item(
self.tv.insert('', 'end', text=value), text=None),
value)
def test_set(self):
self.tv['columns'] = ['A', 'B']
item = self.tv.insert('', 'end', values=['a', 'b'])
self.assertEqual(self.tv.set(item), {'A': 'a', 'B': 'b'})
self.tv.set(item, 'B', 'a')
self.assertEqual(self.tv.item(item, values=None),
('a', 'a') if self.wantobjects else 'a a')
self.tv['columns'] = ['B']
self.assertEqual(self.tv.set(item), {'B': 'a'})
self.tv.set(item, 'B', 'b')
self.assertEqual(self.tv.set(item, column='B'), 'b')
self.assertEqual(self.tv.item(item, values=None),
('b', 'a') if self.wantobjects else 'b a')
self.tv.set(item, 'B', 123)
self.assertEqual(self.tv.set(item, 'B'),
123 if self.wantobjects else '123')
self.assertEqual(self.tv.item(item, values=None),
(123, 'a') if self.wantobjects else '123 a')
self.assertEqual(self.tv.set(item),
{'B': 123} if self.wantobjects else {'B': '123'})
# inexistent column
self.assertRaises(tkinter.TclError, self.tv.set, item, 'A')
self.assertRaises(tkinter.TclError, self.tv.set, item, 'A', 'b')
# inexistent item
self.assertRaises(tkinter.TclError, self.tv.set, 'notme')
def test_tag_bind(self):
events = []
item1 = self.tv.insert('', 'end', tags=['call'])
item2 = self.tv.insert('', 'end', tags=['call'])
self.tv.tag_bind('call', '<ButtonPress-1>',
lambda evt: events.append(1))
self.tv.tag_bind('call', '<ButtonRelease-1>',
lambda evt: events.append(2))
self.tv.pack()
self.tv.wait_visibility()
self.tv.update()
pos_y = set()
found = set()
for i in range(0, 100, 10):
if len(found) == 2: # item1 and item2 already found
break
item_id = self.tv.identify_row(i)
if item_id and item_id not in found:
pos_y.add(i)
found.add(item_id)
self.assertEqual(len(pos_y), 2) # item1 and item2 y pos
for y in pos_y:
simulate_mouse_click(self.tv, 0, y)
# by now there should be 4 things in the events list, since each
# item had a bind for two events that were simulated above
self.assertEqual(len(events), 4)
for evt in zip(events[::2], events[1::2]):
self.assertEqual(evt, (1, 2))
def test_tag_configure(self):
# Just testing parameter passing for now
self.assertRaises(TypeError, self.tv.tag_configure)
self.assertRaises(tkinter.TclError, self.tv.tag_configure,
'test', sky='blue')
self.tv.tag_configure('test', foreground='blue')
self.assertEqual(str(self.tv.tag_configure('test', 'foreground')),
'blue')
self.assertEqual(str(self.tv.tag_configure('test', foreground=None)),
'blue')
self.assertIsInstance(self.tv.tag_configure('test'), dict)
@add_standard_options(StandardTtkOptionsTests)
class SeparatorTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'orient', 'style', 'takefocus',
# 'state'?
)
default_orient = 'horizontal'
def create(self, **kwargs):
return ttk.Separator(self.root, **kwargs)
@add_standard_options(StandardTtkOptionsTests)
class SizegripTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'style', 'takefocus',
# 'state'?
)
def create(self, **kwargs):
return ttk.Sizegrip(self.root, **kwargs)
tests_gui = (
ButtonTest, CheckbuttonTest, ComboboxTest, EntryTest,
FrameTest, LabelFrameTest, LabelTest, MenubuttonTest,
NotebookTest, PanedWindowTest, ProgressbarTest,
RadiobuttonTest, ScaleTest, ScrollbarTest, SeparatorTest,
SizegripTest, TreeviewTest, WidgetTest,
)
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
berendkleinhaneveld/VTK | ThirdParty/Twisted/twisted/internet/protocol.py | 31 | 26263 | # -*- test-case-name: twisted.test.test_factories,twisted.internet.test.test_protocol -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Standard implementations of Twisted protocol-related interfaces.
Start here if you are looking to write a new protocol implementation for
Twisted. The Protocol class contains some introductory material.
"""
from __future__ import division, absolute_import
import random
from zope.interface import implementer
from twisted.python import log, failure, components
from twisted.internet import interfaces, error, defer
@implementer(interfaces.IProtocolFactory, interfaces.ILoggingContext)
class Factory:
"""
This is a factory which produces protocols.
By default, buildProtocol will create a protocol of the class given in
self.protocol.
"""
# put a subclass of Protocol here:
protocol = None
numPorts = 0
noisy = True
@classmethod
def forProtocol(cls, protocol, *args, **kwargs):
"""
Create a factory for the given protocol.
It sets the C{protocol} attribute and returns the constructed factory
instance.
@param protocol: A L{Protocol} subclass
@param args: Positional arguments for the factory.
@param kwargs: Keyword arguments for the factory.
@return: A L{Factory} instance wired up to C{protocol}.
"""
factory = cls(*args, **kwargs)
factory.protocol = protocol
return factory
def logPrefix(self):
"""
Describe this factory for log messages.
"""
return self.__class__.__name__
def doStart(self):
"""Make sure startFactory is called.
Users should not call this function themselves!
"""
if not self.numPorts:
if self.noisy:
log.msg("Starting factory %r" % self)
self.startFactory()
self.numPorts = self.numPorts + 1
def doStop(self):
"""Make sure stopFactory is called.
Users should not call this function themselves!
"""
if self.numPorts == 0:
# this shouldn't happen, but does sometimes and this is better
# than blowing up in assert as we did previously.
return
self.numPorts = self.numPorts - 1
if not self.numPorts:
if self.noisy:
log.msg("Stopping factory %r" % self)
self.stopFactory()
def startFactory(self):
"""This will be called before I begin listening on a Port or Connector.
It will only be called once, even if the factory is connected
to multiple ports.
This can be used to perform 'unserialization' tasks that
are best put off until things are actually running, such
as connecting to a database, opening files, etcetera.
"""
def stopFactory(self):
"""This will be called before I stop listening on all Ports/Connectors.
This can be overridden to perform 'shutdown' tasks such as disconnecting
database connections, closing files, etc.
It will be called, for example, before an application shuts down,
if it was connected to a port. User code should not call this function
directly.
"""
def buildProtocol(self, addr):
"""Create an instance of a subclass of Protocol.
The returned instance will handle input on an incoming server
connection, and an attribute \"factory\" pointing to the creating
factory.
Override this method to alter how Protocol instances get created.
@param addr: an object implementing L{twisted.internet.interfaces.IAddress}
"""
p = self.protocol()
p.factory = self
return p
class ClientFactory(Factory):
"""A Protocol factory for clients.
This can be used together with the various connectXXX methods in
reactors.
"""
def startedConnecting(self, connector):
"""Called when a connection has been started.
You can call connector.stopConnecting() to stop the connection attempt.
@param connector: a Connector object.
"""
def clientConnectionFailed(self, connector, reason):
"""Called when a connection has failed to connect.
It may be useful to call connector.connect() - this will reconnect.
@type reason: L{twisted.python.failure.Failure}
"""
def clientConnectionLost(self, connector, reason):
"""Called when an established connection is lost.
It may be useful to call connector.connect() - this will reconnect.
@type reason: L{twisted.python.failure.Failure}
"""
class _InstanceFactory(ClientFactory):
"""
Factory used by ClientCreator.
@ivar deferred: The L{Deferred} which represents this connection attempt and
which will be fired when it succeeds or fails.
@ivar pending: After a connection attempt succeeds or fails, a delayed call
which will fire the L{Deferred} representing this connection attempt.
"""
noisy = False
pending = None
def __init__(self, reactor, instance, deferred):
self.reactor = reactor
self.instance = instance
self.deferred = deferred
def __repr__(self):
return "<ClientCreator factory: %r>" % (self.instance, )
def buildProtocol(self, addr):
"""
Return the pre-constructed protocol instance and arrange to fire the
waiting L{Deferred} to indicate success establishing the connection.
"""
self.pending = self.reactor.callLater(
0, self.fire, self.deferred.callback, self.instance)
self.deferred = None
return self.instance
def clientConnectionFailed(self, connector, reason):
"""
Arrange to fire the waiting L{Deferred} with the given failure to
indicate the connection could not be established.
"""
self.pending = self.reactor.callLater(
0, self.fire, self.deferred.errback, reason)
self.deferred = None
def fire(self, func, value):
"""
Clear C{self.pending} to avoid a reference cycle and then invoke func
with the value.
"""
self.pending = None
func(value)
class ClientCreator:
"""
Client connections that do not require a factory.
The various connect* methods create a protocol instance using the given
protocol class and arguments, and connect it, returning a Deferred of the
resulting protocol instance.
Useful for cases when we don't really need a factory. Mainly this
is when there is no shared state between protocol instances, and no need
to reconnect.
The C{connectTCP}, C{connectUNIX}, and C{connectSSL} methods each return a
L{Deferred} which will fire with an instance of the protocol class passed to
L{ClientCreator.__init__}. These Deferred can be cancelled to abort the
connection attempt (in a very unlikely case, cancelling the Deferred may not
prevent the protocol from being instantiated and connected to a transport;
if this happens, it will be disconnected immediately afterwards and the
Deferred will still errback with L{CancelledError}).
"""
def __init__(self, reactor, protocolClass, *args, **kwargs):
self.reactor = reactor
self.protocolClass = protocolClass
self.args = args
self.kwargs = kwargs
def _connect(self, method, *args, **kwargs):
"""
Initiate a connection attempt.
@param method: A callable which will actually start the connection
attempt. For example, C{reactor.connectTCP}.
@param *args: Positional arguments to pass to C{method}, excluding the
factory.
@param **kwargs: Keyword arguments to pass to C{method}.
@return: A L{Deferred} which fires with an instance of the protocol
class passed to this L{ClientCreator}'s initializer or fails if the
connection cannot be set up for some reason.
"""
def cancelConnect(deferred):
connector.disconnect()
if f.pending is not None:
f.pending.cancel()
d = defer.Deferred(cancelConnect)
f = _InstanceFactory(
self.reactor, self.protocolClass(*self.args, **self.kwargs), d)
connector = method(factory=f, *args, **kwargs)
return d
def connectTCP(self, host, port, timeout=30, bindAddress=None):
"""
Connect to a TCP server.
The parameters are all the same as to L{IReactorTCP.connectTCP} except
that the factory parameter is omitted.
@return: A L{Deferred} which fires with an instance of the protocol
class passed to this L{ClientCreator}'s initializer or fails if the
connection cannot be set up for some reason.
"""
return self._connect(
self.reactor.connectTCP, host, port, timeout=timeout,
bindAddress=bindAddress)
def connectUNIX(self, address, timeout=30, checkPID=False):
"""
Connect to a Unix socket.
The parameters are all the same as to L{IReactorUNIX.connectUNIX} except
that the factory parameter is omitted.
@return: A L{Deferred} which fires with an instance of the protocol
class passed to this L{ClientCreator}'s initializer or fails if the
connection cannot be set up for some reason.
"""
return self._connect(
self.reactor.connectUNIX, address, timeout=timeout,
checkPID=checkPID)
def connectSSL(self, host, port, contextFactory, timeout=30, bindAddress=None):
"""
Connect to an SSL server.
The parameters are all the same as to L{IReactorSSL.connectSSL} except
that the factory parameter is omitted.
@return: A L{Deferred} which fires with an instance of the protocol
class passed to this L{ClientCreator}'s initializer or fails if the
connection cannot be set up for some reason.
"""
return self._connect(
self.reactor.connectSSL, host, port,
contextFactory=contextFactory, timeout=timeout,
bindAddress=bindAddress)
class ReconnectingClientFactory(ClientFactory):
"""
Factory which auto-reconnects clients with an exponential back-off.
Note that clients should call my resetDelay method after they have
connected successfully.
@ivar maxDelay: Maximum number of seconds between connection attempts.
@ivar initialDelay: Delay for the first reconnection attempt.
@ivar factor: A multiplicitive factor by which the delay grows
@ivar jitter: Percentage of randomness to introduce into the delay length
to prevent stampeding.
@ivar clock: The clock used to schedule reconnection. It's mainly useful to
be parametrized in tests. If the factory is serialized, this attribute
will not be serialized, and the default value (the reactor) will be
restored when deserialized.
@type clock: L{IReactorTime}
@ivar maxRetries: Maximum number of consecutive unsuccessful connection
attempts, after which no further connection attempts will be made. If
this is not explicitly set, no maximum is applied.
"""
maxDelay = 3600
initialDelay = 1.0
# Note: These highly sensitive factors have been precisely measured by
# the National Institute of Science and Technology. Take extreme care
# in altering them, or you may damage your Internet!
# (Seriously: <http://physics.nist.gov/cuu/Constants/index.html>)
factor = 2.7182818284590451 # (math.e)
# Phi = 1.6180339887498948 # (Phi is acceptable for use as a
# factor if e is too large for your application.)
jitter = 0.11962656472 # molar Planck constant times c, joule meter/mole
delay = initialDelay
retries = 0
maxRetries = None
_callID = None
connector = None
clock = None
continueTrying = 1
def clientConnectionFailed(self, connector, reason):
if self.continueTrying:
self.connector = connector
self.retry()
def clientConnectionLost(self, connector, unused_reason):
if self.continueTrying:
self.connector = connector
self.retry()
def retry(self, connector=None):
"""
Have this connector connect again, after a suitable delay.
"""
if not self.continueTrying:
if self.noisy:
log.msg("Abandoning %s on explicit request" % (connector,))
return
if connector is None:
if self.connector is None:
raise ValueError("no connector to retry")
else:
connector = self.connector
self.retries += 1
if self.maxRetries is not None and (self.retries > self.maxRetries):
if self.noisy:
log.msg("Abandoning %s after %d retries." %
(connector, self.retries))
return
self.delay = min(self.delay * self.factor, self.maxDelay)
if self.jitter:
self.delay = random.normalvariate(self.delay,
self.delay * self.jitter)
if self.noisy:
log.msg("%s will retry in %d seconds" % (connector, self.delay,))
def reconnector():
self._callID = None
connector.connect()
if self.clock is None:
from twisted.internet import reactor
self.clock = reactor
self._callID = self.clock.callLater(self.delay, reconnector)
def stopTrying(self):
"""
Put a stop to any attempt to reconnect in progress.
"""
# ??? Is this function really stopFactory?
if self._callID:
self._callID.cancel()
self._callID = None
self.continueTrying = 0
if self.connector:
try:
self.connector.stopConnecting()
except error.NotConnectingError:
pass
def resetDelay(self):
"""
Call this method after a successful connection: it resets the delay and
the retry counter.
"""
self.delay = self.initialDelay
self.retries = 0
self._callID = None
self.continueTrying = 1
def __getstate__(self):
"""
Remove all of the state which is mutated by connection attempts and
failures, returning just the state which describes how reconnections
should be attempted. This will make the unserialized instance
behave just as this one did when it was first instantiated.
"""
state = self.__dict__.copy()
for key in ['connector', 'retries', 'delay',
'continueTrying', '_callID', 'clock']:
if key in state:
del state[key]
return state
class ServerFactory(Factory):
"""Subclass this to indicate that your protocol.Factory is only usable for servers.
"""
class BaseProtocol:
"""
This is the abstract superclass of all protocols.
Some methods have helpful default implementations here so that they can
easily be shared, but otherwise the direct subclasses of this class are more
interesting, L{Protocol} and L{ProcessProtocol}.
"""
connected = 0
transport = None
def makeConnection(self, transport):
"""Make a connection to a transport and a server.
This sets the 'transport' attribute of this Protocol, and calls the
connectionMade() callback.
"""
self.connected = 1
self.transport = transport
self.connectionMade()
def connectionMade(self):
"""Called when a connection is made.
This may be considered the initializer of the protocol, because
it is called when the connection is completed. For clients,
this is called once the connection to the server has been
established; for servers, this is called after an accept() call
stops blocking and a socket has been received. If you need to
send any greeting or initial message, do it here.
"""
connectionDone=failure.Failure(error.ConnectionDone())
connectionDone.cleanFailure()
@implementer(interfaces.IProtocol, interfaces.ILoggingContext)
class Protocol(BaseProtocol):
"""
This is the base class for streaming connection-oriented protocols.
If you are going to write a new connection-oriented protocol for Twisted,
start here. Any protocol implementation, either client or server, should
be a subclass of this class.
The API is quite simple. Implement L{dataReceived} to handle both
event-based and synchronous input; output can be sent through the
'transport' attribute, which is to be an instance that implements
L{twisted.internet.interfaces.ITransport}. Override C{connectionLost} to be
notified when the connection ends.
Some subclasses exist already to help you write common types of protocols:
see the L{twisted.protocols.basic} module for a few of them.
"""
def logPrefix(self):
"""
Return a prefix matching the class name, to identify log messages
related to this protocol instance.
"""
return self.__class__.__name__
def dataReceived(self, data):
"""Called whenever data is received.
Use this method to translate to a higher-level message. Usually, some
callback will be made upon the receipt of each complete protocol
message.
@param data: a string of indeterminate length. Please keep in mind
that you will probably need to buffer some data, as partial
(or multiple) protocol messages may be received! I recommend
that unit tests for protocols call through to this method with
differing chunk sizes, down to one byte at a time.
"""
def connectionLost(self, reason=connectionDone):
"""Called when the connection is shut down.
Clear any circular references here, and any external references
to this Protocol. The connection has been closed.
@type reason: L{twisted.python.failure.Failure}
"""
@implementer(interfaces.IConsumer)
class ProtocolToConsumerAdapter(components.Adapter):
def write(self, data):
self.original.dataReceived(data)
def registerProducer(self, producer, streaming):
pass
def unregisterProducer(self):
pass
components.registerAdapter(ProtocolToConsumerAdapter, interfaces.IProtocol,
interfaces.IConsumer)
@implementer(interfaces.IProtocol)
class ConsumerToProtocolAdapter(components.Adapter):
def dataReceived(self, data):
self.original.write(data)
def connectionLost(self, reason):
pass
def makeConnection(self, transport):
pass
def connectionMade(self):
pass
components.registerAdapter(ConsumerToProtocolAdapter, interfaces.IConsumer,
interfaces.IProtocol)
@implementer(interfaces.IProcessProtocol)
class ProcessProtocol(BaseProtocol):
"""
Base process protocol implementation which does simple dispatching for
stdin, stdout, and stderr file descriptors.
"""
def childDataReceived(self, childFD, data):
if childFD == 1:
self.outReceived(data)
elif childFD == 2:
self.errReceived(data)
def outReceived(self, data):
"""
Some data was received from stdout.
"""
def errReceived(self, data):
"""
Some data was received from stderr.
"""
def childConnectionLost(self, childFD):
if childFD == 0:
self.inConnectionLost()
elif childFD == 1:
self.outConnectionLost()
elif childFD == 2:
self.errConnectionLost()
def inConnectionLost(self):
"""
This will be called when stdin is closed.
"""
def outConnectionLost(self):
"""
This will be called when stdout is closed.
"""
def errConnectionLost(self):
"""
This will be called when stderr is closed.
"""
def processExited(self, reason):
"""
This will be called when the subprocess exits.
@type reason: L{twisted.python.failure.Failure}
"""
def processEnded(self, reason):
"""
Called when the child process exits and all file descriptors
associated with it have been closed.
@type reason: L{twisted.python.failure.Failure}
"""
class AbstractDatagramProtocol:
"""
Abstract protocol for datagram-oriented transports, e.g. IP, ICMP, ARP, UDP.
"""
transport = None
numPorts = 0
noisy = True
def __getstate__(self):
d = self.__dict__.copy()
d['transport'] = None
return d
def doStart(self):
"""Make sure startProtocol is called.
This will be called by makeConnection(), users should not call it.
"""
if not self.numPorts:
if self.noisy:
log.msg("Starting protocol %s" % self)
self.startProtocol()
self.numPorts = self.numPorts + 1
def doStop(self):
"""Make sure stopProtocol is called.
This will be called by the port, users should not call it.
"""
assert self.numPorts > 0
self.numPorts = self.numPorts - 1
self.transport = None
if not self.numPorts:
if self.noisy:
log.msg("Stopping protocol %s" % self)
self.stopProtocol()
def startProtocol(self):
"""Called when a transport is connected to this protocol.
Will only be called once, even if multiple ports are connected.
"""
def stopProtocol(self):
"""Called when the transport is disconnected.
Will only be called once, after all ports are disconnected.
"""
def makeConnection(self, transport):
"""Make a connection to a transport and a server.
This sets the 'transport' attribute of this DatagramProtocol, and calls the
doStart() callback.
"""
assert self.transport == None
self.transport = transport
self.doStart()
def datagramReceived(self, datagram, addr):
"""Called when a datagram is received.
@param datagram: the string received from the transport.
@param addr: tuple of source of datagram.
"""
@implementer(interfaces.ILoggingContext)
class DatagramProtocol(AbstractDatagramProtocol):
"""
Protocol for datagram-oriented transport, e.g. UDP.
@type transport: C{NoneType} or
L{IUDPTransport<twisted.internet.interfaces.IUDPTransport>} provider
@ivar transport: The transport with which this protocol is associated,
if it is associated with one.
"""
def logPrefix(self):
"""
Return a prefix matching the class name, to identify log messages
related to this protocol instance.
"""
return self.__class__.__name__
def connectionRefused(self):
"""Called due to error from write in connected mode.
Note this is a result of ICMP message generated by *previous*
write.
"""
class ConnectedDatagramProtocol(DatagramProtocol):
"""Protocol for connected datagram-oriented transport.
No longer necessary for UDP.
"""
def datagramReceived(self, datagram):
"""Called when a datagram is received.
@param datagram: the string received from the transport.
"""
def connectionFailed(self, failure):
"""Called if connecting failed.
Usually this will be due to a DNS lookup failure.
"""
@implementer(interfaces.ITransport)
class FileWrapper:
"""A wrapper around a file-like object to make it behave as a Transport.
This doesn't actually stream the file to the attached protocol,
and is thus useful mainly as a utility for debugging protocols.
"""
closed = 0
disconnecting = 0
producer = None
streamingProducer = 0
def __init__(self, file):
self.file = file
def write(self, data):
try:
self.file.write(data)
except:
self.handleException()
# self._checkProducer()
def _checkProducer(self):
# Cheating; this is called at "idle" times to allow producers to be
# found and dealt with
if self.producer:
self.producer.resumeProducing()
def registerProducer(self, producer, streaming):
"""From abstract.FileDescriptor
"""
self.producer = producer
self.streamingProducer = streaming
if not streaming:
producer.resumeProducing()
def unregisterProducer(self):
self.producer = None
def stopConsuming(self):
self.unregisterProducer()
self.loseConnection()
def writeSequence(self, iovec):
self.write("".join(iovec))
def loseConnection(self):
self.closed = 1
try:
self.file.close()
except (IOError, OSError):
self.handleException()
def getPeer(self):
# XXX: According to ITransport, this should return an IAddress!
return 'file', 'file'
def getHost(self):
# XXX: According to ITransport, this should return an IAddress!
return 'file'
def handleException(self):
pass
def resumeProducing(self):
# Never sends data anyways
pass
def pauseProducing(self):
# Never sends data anyways
pass
def stopProducing(self):
self.loseConnection()
__all__ = ["Factory", "ClientFactory", "ReconnectingClientFactory", "connectionDone",
"Protocol", "ProcessProtocol", "FileWrapper", "ServerFactory",
"AbstractDatagramProtocol", "DatagramProtocol", "ConnectedDatagramProtocol",
"ClientCreator"]
| bsd-3-clause |
gnowledge/ncert_nroer | gstudio/templatetags/gstudio_tags.py | 1 | 27697 |
# Copyright (c) 2011, 2012 Free Software Foundation
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Template tags and filters for Gstudio"""
from hashlib import md5
from random import sample
from urllib import urlencode
from datetime import datetime
from django.db.models import Q
from django.db import connection
from django.template import Node as nd
from django.template import Library
from django.template import TemplateSyntaxError
from django.contrib.comments.models import CommentFlag
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import smart_unicode
from django.contrib.comments import get_model as get_comment_model
from unidecode import unidecode
from tagging.models import Tag
from tagging.utils import calculate_cloud
from gstudio.models import Nodetype
from gstudio.models import Author
from gstudio.models import Metatype
from gstudio.gnowql import get_node
from gstudio.managers import tags_published
from gstudio.comparison import VectorBuilder
from gstudio.comparison import pearson_score
from gstudio.templatetags.zcalendar import GstudioCalendar
from gstudio.templatetags.zbreadcrumbs import retrieve_breadcrumbs
from django.http import HttpResponseRedirect
from gstudio.CNL import *
from gstudio.methods import check_release_or_not
import os
from settings import STATIC_URL,ADMIN_MEDIA_PREFIX
import tarfile
from gstudio.methods import *
import contextlib
register = Library()
VECTORS = None
VECTORS_FACTORY = lambda: VectorBuilder(Nodetype.published.all(),
['title', 'excerpt', 'content'])
CACHE_NODETYPES_RELATED = {}
@contextlib.contextmanager
def cd_change(tmp_location):
cd = os.getcwd()
os.chdir(tmp_location)
try:
yield
finally:
os.chdir(cd)
@register.assignment_tag
def get_each_activity(each):
try:
spl=each.split(",")
print spl
if spl[1]:
strn=spl[1].split("!")
else:
return "empty"
print strn
strg=strn[0]
cnt=strg.find("Edit")
print strg,cnt
if cnt > -1:
return "Edit"
cnt=strg.find("Upload")
if cnt > -1:
return "Upload"
cnt=strg.find("Add")
if cnt > -1:
return "Add"
cnt=strg.find("Delete")
if cnt > -1:
return "Delete"
return "empty"
except:
return "empty"
@register.assignment_tag
def get_each_title(each):
try:
spl=each.split("-")
tit=spl[1].split("http")
return tit[0]
except:
return ""
@register.assignment_tag
def get_each_url(each):
try:
spl=each.split("http")
rets="http"+spl[1]
return rets
except:
return ""
@register.assignment_tag
def get_slug_of_video(videoid):
print "videoid",videoid
slug=""
vid=Gbobject.objects.filter(id=videoid)
if vid:
Gbobject.objects.get(id=videoid)
slug=vid.slug
print "videoslug",vid.slug
return slug
@register.assignment_tag
def get_image_object(objectid):
obj=Gbobject.objects.get(id=objectid)
return obj
@register.assignment_tag
def get_related_images(imageid):
try:
gbobject=Gbobject.objects.get(id=imageid)
tag = Tag.objects.get_for_object(gbobject)
otherRelatedimages = []
for each in tag:
print "alliteS",each.items.all()
for each1 in each.items.all():
tagItem = each1.object
print "tagitem",tagItem
check = tagItem.objecttypes.all()
if check.filter(title__contains="Image"):
if not tagItem.id == gbobject.id:
print tagItem,"tagit"
otherRelatedimages.append(tagItem)
except:
pass
return otherRelatedimages
@register.assignment_tag
def get_related_docus(docid):
try:
gbobject=Gbobject.objects.get(id=docid)
tag = Tag.objects.get_for_object(gbobject)
otherRelateddocs = []
for each in tag:
print "alliteS",each.items.all()
for each1 in each.items.all():
tagItem = each1.object
print "tagitem",tagItem
check = tagItem.objecttypes.all()
if check.filter(title__contains="Document"):
if not tagItem.id == gbobject.id:
print tagItem,"tagit"
otherRelateddocs.append(tagItem)
except:
pass
return otherRelateddocs
@register.assignment_tag
def get_first_object(imgcolln):
col=imgcolln[0]
return col
@register.assignment_tag
def split_images(imglst):
split=[]
lnimg=len(imglst)
j=0
while j < lnimg:
i=0
ins=[]
while i < 3 and j < lnimg :
ins.append(imglst[j])
i=i+1
j=j+1
split.append(ins)
return split
@register.assignment_tag
def get_doc_download(docid):
try:
sys=System.objects.get(id=docid)
filn="static/img/" + sys.title + ".tar.gz"
os.system("rm -rf /tmp/nroer/docdownload/")
os.system("mkdir /tmp/nroer/docdownload/")
strn="rm "+filn
print "delest",strn
os.system(strn)
tar=tarfile.open(filn,"w:gz")
mems=get_gbobjects(docid)
print "mems",mems
for each in mems:
fna="img/"+str(each.altnames)
fname=os.path.join("static/",fna)
strn="cp "+ fname +" /tmp/nroer/docdownload/"
print strn,"cpystr"
os.system(strn)
with cd_change("/tmp/nroer/docdownload/"):
for files in os.listdir('.'):
tar.add(files)
print "adding"
tar.close()
print "filname",filn
except:
pass
return filn
@register.inclusion_tag('gstudio/editdoccollns.html')
def show_edit_doc_collection(doccolid,user):
template='gstudio/editdoccollns.html'
print template,"t"
listcolls={}
syst=Objecttype.objects.get(title='Document')
a=syst.get_nbh['contains_members']
for each in a:
listcolls[each.id]=each.title
sys=System.objects.get(id=doccolid)
testlst=get_gbobjects(doccolid)
return {'template':template,'test':testlst,'user':user,'test1':listcolls,'doc':sys}
@register.inclusion_tag('gstudio/editcollection.html')
def show_edit_collection(imgcolid,user):
template='gstudio/editcollection.html'
listcolls={}
syst=Objecttype.objects.get(title='Image')
a=syst.get_nbh['contains_members']
for each in a:
listcolls[each.id]=each.title
sys=System.objects.get(id=imgcolid)
testlst=get_gbobjects(imgcolid)
print "editlist",testlst
return {'template':template,'test':testlst,'user':user,'test1':listcolls,'image':sys}
@register.assignment_tag
def check_if_collection(sysid):
a=Systemtype.objects.get(title='Imagecollection')
b=Systemtype.objects.get(title='Documentcollection')
fl=0
for each in a.member_systems.all():
if each.id == sysid:
fl=1
for each1 in b.member_systems.all():
if each1.id == sysid:
fl=1
return fl
@register.assignment_tag
def show_image_collections(imgcolid):
listcol=get_gbobjects(imgcolid)
return listcol
@register.assignment_tag
def show_doc_collections(doccolid):
listcol=get_gbobjects(doccolid)
return listcol
@register.assignment_tag
def get_document_collections():
print "inside getdoccoll"
listcolls={}
syst=Systemtype.objects.get(title='Documentcollection')
a=syst.member_systems.all()
for each in a:
listcolls[each.id]=each.title
return listcolls
@register.assignment_tag
def get_image_collections():
listcolls={}
syst=Systemtype.objects.get(title='Imagecollection')
a=syst.member_systems.all()
for each in a:
listcolls[each.id]=each.title
return listcolls
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_metatypes(template='gstudio/tags/metatypes.html'):
"""Return the metatypes"""
return {'template': template,
'metatypes': Metatype.tree.all()}
#@register.inclusion_tag('gstudio/tags/dummy.html')
#def get_subtypes(template='gstudio/tags/nodetypes.html'):
# """Return the subtypes"""
# return {'template': template,
# 'subtypes': Nodetype.tree.all()}
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_authors(number=5, template='gstudio/tags/authors.html'):
"""Return the published authors"""
return {'template': template,
'authors': Author.published.all()[:number]}
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_recent_nodetypes(number=5, template='gstudio/tags/recent_nodetypes.html'):
"""Return the most recent nodetypes"""
return {'template': template,
'nodetypes': Nodetype.published.all()[:number]}
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_featured_nodetypes(number=5,
template='gstudio/tags/featured_nodetypes.html'):
"""Return the featured nodetypes"""
return {'template': template,
'nodetypes': Nodetype.published.filter(featured=True)[:number]}
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_random_nodetypes(number=5, template='gstudio/tags/random_nodetypes.html'):
"""Return random nodetypes"""
nodetypes = Nodetype.published.all()
if number > len(nodetypes):
number = len(nodetypes)
return {'template': template,
'nodetypes': sample(nodetypes, number)}
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_popular_nodetypes(number=5, template='gstudio/tags/popular_nodetypes.html'):
"""Return popular nodetypes"""
ctype = ContentType.objects.get_for_model(Nodetype)
query = """SELECT object_pk, COUNT(*) AS score
FROM %s
WHERE content_type_id = %%s
AND is_public = '1'
GROUP BY object_pk
ORDER BY score DESC""" % get_comment_model()._meta.db_table
cursor = connection.cursor()
cursor.execute(query, [ctype.id])
object_ids = [int(row[0]) for row in cursor.fetchall()]
# Use ``in_bulk`` here instead of an ``id__in`` filter, because ``id__in``
# would clobber the ordering.
object_dict = Nodetype.published.in_bulk(object_ids)
return {'template': template,
'nodetypes': [object_dict[object_id]
for object_id in object_ids
if object_id in object_dict][:number]}
@register.inclusion_tag('gstudio/tags/dummy.html', takes_context=True)
def get_similar_nodetypes(context, number=5,
template='gstudio/tags/similar_nodetypes.html',
flush=False):
"""Return similar nodetypes"""
global VECTORS
global CACHE_NODETYPES_RELATED
if VECTORS is None or flush:
VECTORS = VECTORS_FACTORY()
CACHE_NODETYPES_RELATED = {}
def compute_related(object_id, dataset):
"""Compute related nodetypes to a nodetype with a dataset"""
object_vector = None
for nodetype, e_vector in dataset.items():
if nodetype.pk == object_id:
object_vector = e_vector
if not object_vector:
return []
nodetype_related = {}
for nodetype, e_vector in dataset.items():
if nodetype.pk != object_id:
score = pearson_score(object_vector, e_vector)
if score:
nodetype_related[nodetype] = score
related = sorted(nodetype_related.items(), key=lambda(k, v): (v, k))
return [rel[0] for rel in related]
object_id = context['object'].pk
columns, dataset = VECTORS()
key = '%s-%s' % (object_id, VECTORS.key)
if not key in CACHE_NODETYPES_RELATED.keys():
CACHE_NODETYPES_RELATED[key] = compute_related(object_id, dataset)
nodetypes = CACHE_NODETYPES_RELATED[key][:number]
return {'template': template,
'nodetypes': nodetypes}
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_archives_nodetypes(template='gstudio/tags/archives_nodetypes.html'):
"""Return archives nodetypes"""
return {'template': template,
'archives': Nodetype.published.dates('creation_date', 'month',
order='DESC')}
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_archives_nodetypes_tree(
template='gstudio/tags/archives_nodetypes_tree.html'):
"""Return archives nodetypes as a Tree"""
return {'template': template,
'archives': Nodetype.published.dates('creation_date', 'day',
order='ASC')}
@register.inclusion_tag('gstudio/tags/dummy.html', takes_context=True)
def get_calendar_nodetypes(context, year=None, month=None,
template='gstudio/tags/calendar.html'):
"""Return an HTML calendar of nodetypes"""
if not year or not month:
date_month = context.get('month') or context.get('day') or \
getattr(context.get('object'), 'creation_date', None) or \
datetime.today()
year, month = date_month.timetuple()[:2]
calendar = GstudioCalendar()
current_month = datetime(year, month, 1)
dates = list(Nodetype.published.dates('creation_date', 'month'))
if not current_month in dates:
dates.append(current_month)
dates.sort()
index = dates.index(current_month)
previous_month = index > 0 and dates[index - 1] or None
next_month = index != len(dates) - 1 and dates[index + 1] or None
return {'template': template,
'next_month': next_month,
'previous_month': previous_month,
'calendar': calendar.formatmonth(year, month)}
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_recent_comments(number=5, template='gstudio/tags/recent_comments.html'):
"""Return the most recent comments"""
# Using map(smart_unicode... fix bug related to issue #8554
#Modified comments to include CNL
nodetype_published_pks = map(smart_unicode,
Nodetype.published.values_list('id', flat=True))
content_type = ContentType.objects.get_for_model(Nodetype)
comments = get_comment_model().objects.filter(
Q(flags=None) | Q(flags__flag=CommentFlag.MODERATOR_APPROVAL),
content_type=content_type, object_pk__in=nodetype_published_pks,
is_public=True).order_by('-submit_date')[:number]
return {'template': template,
'comments': comments}
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_recent_oldcomments(number=5, template='gstudio/tags/recent_comments.html'):
"""Return the most recent comments"""
# Using map(smart_unicode... fix bug related to issue #8554
nodetype_published_pks = map(smart_unicode,
Nodetype.published.values_list('id', flat=True))
content_type = ContentType.objects.get_for_model(Nodetype)
comments = get_comment_model().objects.filter(
Q(flags=None) | Q(flags__flag=CommentFlag.MODERATOR_APPROVAL),
content_type=content_type, object_pk__in=nodetype_published_pks,
is_public=True).order_by('-submit_date')[:number]
return {'template': template,
'comments': comments}
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_recent_linkbacks(number=5,
template='gstudio/tags/recent_linkbacks.html'):
"""Return the most recent linkbacks"""
nodetype_published_pks = map(smart_unicode,
Nodetype.published.values_list('id', flat=True))
content_type = ContentType.objects.get_for_model(Nodetype)
linkbacks = get_comment_model().objects.filter(
content_type=content_type,
object_pk__in=nodetype_published_pks,
flags__flag__in=['pingback', 'trackback'],
is_public=True).order_by(
'-submit_date')[:number]
return {'template': template,
'linkbacks': linkbacks}
@register.inclusion_tag('gstudio/tags/dummy.html', takes_context=True)
def gstudio_pagination(context, page, begin_pages=3, end_pages=3,
before_pages=2, after_pages=2,
template='gstudio/tags/pagination.html'):
"""Return a Digg-like pagination, by splitting long list of page
into 3 blocks of pages"""
GET_string = ''
for key, value in context['request'].GET.items():
if key != 'page':
GET_string += '&%s=%s' % (key, value)
begin = page.paginator.page_range[:begin_pages]
end = page.paginator.page_range[-end_pages:]
middle = page.paginator.page_range[max(page.number - before_pages - 1, 0):
page.number + after_pages]
if set(begin) & set(end): # [1, 2, 3], [...], [2, 3, 4]
begin = sorted(set(begin + end)) # [1, 2, 3, 4]
middle, end = [], []
elif begin[-1] + 1 == end[0]: # [1, 2, 3], [...], [4, 5, 6]
begin += end # [1, 2, 3, 4, 5, 6]
middle, end = [], []
elif set(begin) & set(middle): # [1, 2, 3], [2, 3, 4], [...]
begin = sorted(set(begin + middle)) # [1, 2, 3, 4]
middle = []
elif begin[-1] + 1 == middle[0]: # [1, 2, 3], [4, 5, 6], [...]
begin += middle # [1, 2, 3, 4, 5, 6]
middle = []
elif middle[-1] + 1 == end[0]: # [...], [15, 16, 17], [18, 19, 20]
end = middle + end # [15, 16, 17, 18, 19, 20]
middle = []
elif set(middle) & set(end): # [...], [17, 18, 19], [18, 19, 20]
end = sorted(set(middle + end)) # [17, 18, 19, 20]
middle = []
return {'template': template, 'page': page, 'GET_string': GET_string,
'begin': begin, 'middle': middle, 'end': end}
@register.inclusion_tag('gstudio/tags/dummy.html', takes_context=True)
def gstudio_breadcrumbs(context, separator='>>', root_name='Home',
template='gstudio/tags/breadcrumbs.html',):
"""Return a breadcrumb for the application"""
path = context['request'].path
page_object = context.get('object') or context.get('metatype') or \
context.get('tag') or context.get('author') or context.get('image') or context.get('video')or context.get('doc') or context.get('meet_ob')
breadcrumbs = retrieve_breadcrumbs(path, page_object, root_name)
print breadcrumbs,"brcrbs",path,page_object,root_name
return {'template': template,
'separator': separator,
'breadcrumbs': breadcrumbs}
@register.simple_tag
def get_gravatar(email, size=80, rating='g', default=None):
"""Return url for a Gravatar"""
url = 'http://www.gravatar.com/avatar/%s.jpg' % \
md5(email.strip().lower()).hexdigest()
options = {'s': size, 'r': rating}
if default:
options['d'] = default
url = '%s?%s' % (url, urlencode(options))
return url.replace('&', '&')
@register.simple_tag
def get_type(name):
"""Return the type of node"""
return get_node(name)
class TagsNode(nd):
def __init__(self, context_var):
self.context_var = context_var
def render(self, context):
context[self.context_var] = tags_published()
return ''
#define get_CNL function
@register.assignment_tag
def get_CNL(no, takes_context = True):
l = get_CNL_list(no)
return l
@register.tag
def get_tags(parser, token):
"""{% get_tags as var %}"""
bits = token.split_contents()
if len(bits) != 3:
raise TemplateSyntaxError(
'get_tags tag takes exactly two arguments')
if bits[1] != 'as':
raise TemplateSyntaxError(
"first argument to get_tags tag must be 'as'")
return TagsNode(bits[2])
@register.simple_tag
def redirect(username):
link = "/"
return HttpResponseRedirect(link)
@register.inclusion_tag('gstudio/tags/dummy.html')
def get_tag_cloud(steps=6, template='gstudio/tags/tag_cloud.html'):
"""Return a cloud of published tags"""
tags = Tag.objects.usage_for_queryset(
Nodetype.published.all(), counts=True)
return {'template': template,
'tags': calculate_cloud(tags, steps)}
@register.inclusion_tag('gstudio/tags/comment.html')
def show_comment(comment,idusr,flag,admin_id,attob):
return {'comment':comment , 'idusr' : idusr, "flag" : flag, "admin_id" : admin_id , "attribute" : attob}
@register.inclusion_tag('gstudio/tags/commentpage.html')
def show_commentpage(comment,idusr,flag,admin_id,attob):
return {'comment':comment , 'idusr' : idusr, "flag" : flag, "admin_id" : admin_id , "attribute" : attob}
@register.simple_tag
def show_nodesystem(object_id):
search=object_id
nbh=""
url=""
for each in System.objects.all():
sysid=each.id
for eachsys in each.systemtypes.all():
if eachsys.title=="Meeting":
url="group/gnowsys-grp/"
objecttitle = "TWIST"
elif eachsys.title=="Wikipage":
url="page/gnowsys-page/"
objecttitle = "WIKIPAGE"
for eachob in each.system_set.all():
if eachob.gbobject_set.all():
for eachgbob in eachob.gbobject_set.all():
if search==eachgbob.id:
nbh=url+str(sysid)
if search==sysid:
nbh=url+str(sysid)
return nbh
@register.assignment_tag
def check_release(meeting):
var = check_release_or_not(meeting)
return var
@register.assignment_tag
def check_subscribe(meeting,user):
var = check_subscribe_or_not(meeting,user)
return var
@register.assignment_tag
def check_user_admin(userid):
var=check_usr_admin(userid)
return var
@register.assignment_tag
def get_static_url():
var = os.path.join(os.path.dirname(__file__),STATIC_URL)
return var
@register.assignment_tag
def get_factory_looms():
fs = []
fs = get_factory_loom_OTs()
return fs
@register.inclusion_tag('gstudio/puttagsearch.html')
def put_tag_search():
template='gstudio/puttagsearch.html'
return {'template':template}
@register.assignment_tag
def put_home_content():
var = get_home_content()
return var
@register.assignment_tag
def put_more_content():
var = get_more_content()
return var
@register.assignment_tag
def put_home_title():
var = get_home_title()
return var
@register.inclusion_tag('gstudio/addreln.html')
def add_res_relation(meetingob,user):
template='gstudio/addreln.html'
return {'template':template,'meetingob':meetingob,'user':user}
@register.simple_tag
def get_available_level():
listlev=[]
lev=System.objects.get(id=19021)
s=unidecode(lev.title)
listlev.append(str(s))
lev=System.objects.get(id=18968)
s=unidecode(lev.title)
listlev.append(str(s))
lev=System.objects.get(id=39965)
s=unidecode(lev.title)
listlev.append(str(s))
return listlev
@register.simple_tag
def get_available_subjs():
listsubjs=[]
wikis=Systemtype.objects.get(title='Collection')
wiki=wikis.member_systems.all()
for each in wiki:
#unicodedata.normalize('NFKD', a.title).encode('ascii','ignore')
# s=unicodedata.normalize('NFKD', each.title).encode('ascii','ignore')+" - with inverse - "+unicodedata.normalize('NFKD',each.inverse).encode('ascii','ignore')
s=unidecode(each.title)
listsubjs.append(str(s))
return listsubjs
@register.simple_tag
def get_available_rts():
listrts={}
for each in Relationtype.objects.all():
#unicodedata.normalize('NFKD', a.title).encode('ascii','ignore')
# s=unicodedata.normalize('NFKD', each.title).encode('ascii','ignore')+" - with inverse - "+unicodedata.normalize('NFKD',each.inverse).encode('ascii','ignore')
s=unidecode(each.title)+" - with inverse - "
listrts[str(s)]=each.id
return listrts
@register.simple_tag
def get_available_objects():
listsubjs={}
# obtype=""
# vid=Nodetype.objects.get(title='Video')
# img=Nodetype.objects.get(title='Image')
# doc=Nodetype.objects.get(title='Document')
# col=Systemtype.objects.get(title='Collection')
# wiki=Systemtype.objects.get(title='Wikipage')
# meet=Systemtype.objects.get(title='Meeting')
for each in Gbobject.objects.all():
obtypes=each.objecttypes.all()
if not ('page box of' in each.title or 'message box of' in each.title):
# if vid in obtypes:
# obtype="is a video"
# if img in obtypes:
# obtype="is an image"
# if doc in obtypes:
# obtype="is a document"
# checksys=System.objects.filter(id=each.id).count()
# if checksys > 0:
# sysob=System.objects.get(id=each.id)
# systype=sysob.systemtypes.all()
# if col in systype:
# obtype="is a collection"
# elif wiki in systype:
# obtype="is a text document"
# elif meet in systype:
# obtype="is a Thread"
s=each.id
listsubjs[each.id]=s
return str(listsubjs)
@register.inclusion_tag('gstudio/edittitle.html')
def edit_title(objectid,objecttitle):
gbobject = Gbobject.objects.get(id=objectid)
template='gstudio/edititle.html'
return {'template':template,'objectid':objectid,'objecttitle':objecttitle,'gbobject':gbobject}
@register.simple_tag
def get_add_tag():
listtag = []
tag = Tag.objects.all()
for each in tag:
listtag.append(each.__str__())
return str(listtag)
@register.simple_tag
def get_page_drawer():
pagedrawer = []
#wikiset = Systemtype.objects.all()
drawerset = Systemtype.objects.get(title="Wikipage")
drawer= drawerset.member_systems.all()
for each in drawer:
pagedrawer.append(each.__str__())
return str(pagedrawer)
@register.inclusion_tag('gstudio/priorpost.html')
def addpriorpost(objectid,user):
template='gstudio/priorpost.html'
gbobject = Gbobject.objects.get(id=objectid)
priorgbobject = gbobject.prior_nodes.all()
posteriorgbobject = gbobject.posterior_nodes.all()
return {'template':template,'objectid':objectid,'priorgbobject':priorgbobject,'posteriorgbobject':posteriorgbobject,'user':user}
@register.inclusion_tag('gstudio/addingtag.html')
def addtag(viewtag,objectid,user):
gbobject = Gbobject.objects.get(id=objectid)
template='gstudio/addingtag.html'
return {'viewtag':viewtag,'objectid':objectid,'user':user,'gbobject':gbobject}
@register.simple_tag
def get_pri_post_page():
listobject = []
gbobject = Gbobject.objects.all()
for each in gbobject:
if not ('page box of' in each.title or 'message box of' in each.title):
listobject.append(each.__str__())
return str(listobject)
@register.inclusion_tag('gstudio/publicprivate.html')
def public_private(objectid,status):
template = 'gstudio/publicprivate.html'
return {'objectid':objectid,'status':status}
| agpl-3.0 |
doormon/doormon-server | server/lib/flask/helpers.py | 776 | 33793 | # -*- coding: utf-8 -*-
"""
flask.helpers
~~~~~~~~~~~~~
Implements various helpers.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import pkgutil
import posixpath
import mimetypes
from time import time
from zlib import adler32
from threading import RLock
from werkzeug.routing import BuildError
from functools import update_wrapper
try:
from werkzeug.urls import url_quote
except ImportError:
from urlparse import quote as url_quote
from werkzeug.datastructures import Headers
from werkzeug.exceptions import NotFound
# this was moved in 0.7
try:
from werkzeug.wsgi import wrap_file
except ImportError:
from werkzeug.utils import wrap_file
from jinja2 import FileSystemLoader
from .signals import message_flashed
from .globals import session, _request_ctx_stack, _app_ctx_stack, \
current_app, request
from ._compat import string_types, text_type
# sentinel
_missing = object()
# what separators does this operating system provide that are not a slash?
# this is used by the send_from_directory function to ensure that nobody is
# able to access files from outside the filesystem.
_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep]
if sep not in (None, '/'))
def _endpoint_from_view_func(view_func):
"""Internal helper that returns the default endpoint for a given
function. This always is the function name.
"""
assert view_func is not None, 'expected view func if endpoint ' \
'is not provided.'
return view_func.__name__
def stream_with_context(generator_or_function):
"""Request contexts disappear when the response is started on the server.
This is done for efficiency reasons and to make it less likely to encounter
memory leaks with badly written WSGI middlewares. The downside is that if
you are using streamed responses, the generator cannot access request bound
information any more.
This function however can help you keep the context around for longer::
from flask import stream_with_context, request, Response
@app.route('/stream')
def streamed_response():
@stream_with_context
def generate():
yield 'Hello '
yield request.args['name']
yield '!'
return Response(generate())
Alternatively it can also be used around a specific generator::
from flask import stream_with_context, request, Response
@app.route('/stream')
def streamed_response():
def generate():
yield 'Hello '
yield request.args['name']
yield '!'
return Response(stream_with_context(generate()))
.. versionadded:: 0.9
"""
try:
gen = iter(generator_or_function)
except TypeError:
def decorator(*args, **kwargs):
gen = generator_or_function()
return stream_with_context(gen)
return update_wrapper(decorator, generator_or_function)
def generator():
ctx = _request_ctx_stack.top
if ctx is None:
raise RuntimeError('Attempted to stream with context but '
'there was no context in the first place to keep around.')
with ctx:
# Dummy sentinel. Has to be inside the context block or we're
# not actually keeping the context around.
yield None
# The try/finally is here so that if someone passes a WSGI level
# iterator in we're still running the cleanup logic. Generators
# don't need that because they are closed on their destruction
# automatically.
try:
for item in gen:
yield item
finally:
if hasattr(gen, 'close'):
gen.close()
# The trick is to start the generator. Then the code execution runs until
# the first dummy None is yielded at which point the context was already
# pushed. This item is discarded. Then when the iteration continues the
# real generator is executed.
wrapped_g = generator()
next(wrapped_g)
return wrapped_g
def make_response(*args):
"""Sometimes it is necessary to set additional headers in a view. Because
views do not have to return response objects but can return a value that
is converted into a response object by Flask itself, it becomes tricky to
add headers to it. This function can be called instead of using a return
and you will get a response object which you can use to attach headers.
If view looked like this and you want to add a new header::
def index():
return render_template('index.html', foo=42)
You can now do something like this::
def index():
response = make_response(render_template('index.html', foo=42))
response.headers['X-Parachutes'] = 'parachutes are cool'
return response
This function accepts the very same arguments you can return from a
view function. This for example creates a response with a 404 error
code::
response = make_response(render_template('not_found.html'), 404)
The other use case of this function is to force the return value of a
view function into a response which is helpful with view
decorators::
response = make_response(view_function())
response.headers['X-Parachutes'] = 'parachutes are cool'
Internally this function does the following things:
- if no arguments are passed, it creates a new response argument
- if one argument is passed, :meth:`flask.Flask.make_response`
is invoked with it.
- if more than one argument is passed, the arguments are passed
to the :meth:`flask.Flask.make_response` function as tuple.
.. versionadded:: 0.6
"""
if not args:
return current_app.response_class()
if len(args) == 1:
args = args[0]
return current_app.make_response(args)
def url_for(endpoint, **values):
"""Generates a URL to the given endpoint with the method provided.
Variable arguments that are unknown to the target endpoint are appended
to the generated URL as query arguments. If the value of a query argument
is `None`, the whole pair is skipped. In case blueprints are active
you can shortcut references to the same blueprint by prefixing the
local endpoint with a dot (``.``).
This will reference the index function local to the current blueprint::
url_for('.index')
For more information, head over to the :ref:`Quickstart <url-building>`.
To integrate applications, :class:`Flask` has a hook to intercept URL build
errors through :attr:`Flask.build_error_handler`. The `url_for` function
results in a :exc:`~werkzeug.routing.BuildError` when the current app does
not have a URL for the given endpoint and values. When it does, the
:data:`~flask.current_app` calls its :attr:`~Flask.build_error_handler` if
it is not `None`, which can return a string to use as the result of
`url_for` (instead of `url_for`'s default to raise the
:exc:`~werkzeug.routing.BuildError` exception) or re-raise the exception.
An example::
def external_url_handler(error, endpoint, **values):
"Looks up an external URL when `url_for` cannot build a URL."
# This is an example of hooking the build_error_handler.
# Here, lookup_url is some utility function you've built
# which looks up the endpoint in some external URL registry.
url = lookup_url(endpoint, **values)
if url is None:
# External lookup did not have a URL.
# Re-raise the BuildError, in context of original traceback.
exc_type, exc_value, tb = sys.exc_info()
if exc_value is error:
raise exc_type, exc_value, tb
else:
raise error
# url_for will use this result, instead of raising BuildError.
return url
app.build_error_handler = external_url_handler
Here, `error` is the instance of :exc:`~werkzeug.routing.BuildError`, and
`endpoint` and `**values` are the arguments passed into `url_for`. Note
that this is for building URLs outside the current application, and not for
handling 404 NotFound errors.
.. versionadded:: 0.10
The `_scheme` parameter was added.
.. versionadded:: 0.9
The `_anchor` and `_method` parameters were added.
.. versionadded:: 0.9
Calls :meth:`Flask.handle_build_error` on
:exc:`~werkzeug.routing.BuildError`.
:param endpoint: the endpoint of the URL (name of the function)
:param values: the variable arguments of the URL rule
:param _external: if set to `True`, an absolute URL is generated. Server
address can be changed via `SERVER_NAME` configuration variable which
defaults to `localhost`.
:param _scheme: a string specifying the desired URL scheme. The `_external`
parameter must be set to `True` or a `ValueError` is raised.
:param _anchor: if provided this is added as anchor to the URL.
:param _method: if provided this explicitly specifies an HTTP method.
"""
appctx = _app_ctx_stack.top
reqctx = _request_ctx_stack.top
if appctx is None:
raise RuntimeError('Attempted to generate a URL without the '
'application context being pushed. This has to be '
'executed when application context is available.')
# If request specific information is available we have some extra
# features that support "relative" urls.
if reqctx is not None:
url_adapter = reqctx.url_adapter
blueprint_name = request.blueprint
if not reqctx.request._is_old_module:
if endpoint[:1] == '.':
if blueprint_name is not None:
endpoint = blueprint_name + endpoint
else:
endpoint = endpoint[1:]
else:
# TODO: get rid of this deprecated functionality in 1.0
if '.' not in endpoint:
if blueprint_name is not None:
endpoint = blueprint_name + '.' + endpoint
elif endpoint.startswith('.'):
endpoint = endpoint[1:]
external = values.pop('_external', False)
# Otherwise go with the url adapter from the appctx and make
# the urls external by default.
else:
url_adapter = appctx.url_adapter
if url_adapter is None:
raise RuntimeError('Application was not able to create a URL '
'adapter for request independent URL generation. '
'You might be able to fix this by setting '
'the SERVER_NAME config variable.')
external = values.pop('_external', True)
anchor = values.pop('_anchor', None)
method = values.pop('_method', None)
scheme = values.pop('_scheme', None)
appctx.app.inject_url_defaults(endpoint, values)
if scheme is not None:
if not external:
raise ValueError('When specifying _scheme, _external must be True')
url_adapter.url_scheme = scheme
try:
rv = url_adapter.build(endpoint, values, method=method,
force_external=external)
except BuildError as error:
# We need to inject the values again so that the app callback can
# deal with that sort of stuff.
values['_external'] = external
values['_anchor'] = anchor
values['_method'] = method
return appctx.app.handle_url_build_error(error, endpoint, values)
if anchor is not None:
rv += '#' + url_quote(anchor)
return rv
def get_template_attribute(template_name, attribute):
"""Loads a macro (or variable) a template exports. This can be used to
invoke a macro from within Python code. If you for example have a
template named `_cider.html` with the following contents:
.. sourcecode:: html+jinja
{% macro hello(name) %}Hello {{ name }}!{% endmacro %}
You can access this from Python code like this::
hello = get_template_attribute('_cider.html', 'hello')
return hello('World')
.. versionadded:: 0.2
:param template_name: the name of the template
:param attribute: the name of the variable of macro to access
"""
return getattr(current_app.jinja_env.get_template(template_name).module,
attribute)
def flash(message, category='message'):
"""Flashes a message to the next request. In order to remove the
flashed message from the session and to display it to the user,
the template has to call :func:`get_flashed_messages`.
.. versionchanged:: 0.3
`category` parameter added.
:param message: the message to be flashed.
:param category: the category for the message. The following values
are recommended: ``'message'`` for any kind of message,
``'error'`` for errors, ``'info'`` for information
messages and ``'warning'`` for warnings. However any
kind of string can be used as category.
"""
# Original implementation:
#
# session.setdefault('_flashes', []).append((category, message))
#
# This assumed that changes made to mutable structures in the session are
# are always in sync with the sess on object, which is not true for session
# implementations that use external storage for keeping their keys/values.
flashes = session.get('_flashes', [])
flashes.append((category, message))
session['_flashes'] = flashes
message_flashed.send(current_app._get_current_object(),
message=message, category=category)
def get_flashed_messages(with_categories=False, category_filter=[]):
"""Pulls all flashed messages from the session and returns them.
Further calls in the same request to the function will return
the same messages. By default just the messages are returned,
but when `with_categories` is set to `True`, the return value will
be a list of tuples in the form ``(category, message)`` instead.
Filter the flashed messages to one or more categories by providing those
categories in `category_filter`. This allows rendering categories in
separate html blocks. The `with_categories` and `category_filter`
arguments are distinct:
* `with_categories` controls whether categories are returned with message
text (`True` gives a tuple, where `False` gives just the message text).
* `category_filter` filters the messages down to only those matching the
provided categories.
See :ref:`message-flashing-pattern` for examples.
.. versionchanged:: 0.3
`with_categories` parameter added.
.. versionchanged:: 0.9
`category_filter` parameter added.
:param with_categories: set to `True` to also receive categories.
:param category_filter: whitelist of categories to limit return values
"""
flashes = _request_ctx_stack.top.flashes
if flashes is None:
_request_ctx_stack.top.flashes = flashes = session.pop('_flashes') \
if '_flashes' in session else []
if category_filter:
flashes = list(filter(lambda f: f[0] in category_filter, flashes))
if not with_categories:
return [x[1] for x in flashes]
return flashes
def send_file(filename_or_fp, mimetype=None, as_attachment=False,
attachment_filename=None, add_etags=True,
cache_timeout=None, conditional=False):
"""Sends the contents of a file to the client. This will use the
most efficient method available and configured. By default it will
try to use the WSGI server's file_wrapper support. Alternatively
you can set the application's :attr:`~Flask.use_x_sendfile` attribute
to ``True`` to directly emit an `X-Sendfile` header. This however
requires support of the underlying webserver for `X-Sendfile`.
By default it will try to guess the mimetype for you, but you can
also explicitly provide one. For extra security you probably want
to send certain files as attachment (HTML for instance). The mimetype
guessing requires a `filename` or an `attachment_filename` to be
provided.
Please never pass filenames to this function from user sources without
checking them first. Something like this is usually sufficient to
avoid security problems::
if '..' in filename or filename.startswith('/'):
abort(404)
.. versionadded:: 0.2
.. versionadded:: 0.5
The `add_etags`, `cache_timeout` and `conditional` parameters were
added. The default behavior is now to attach etags.
.. versionchanged:: 0.7
mimetype guessing and etag support for file objects was
deprecated because it was unreliable. Pass a filename if you are
able to, otherwise attach an etag yourself. This functionality
will be removed in Flask 1.0
.. versionchanged:: 0.9
cache_timeout pulls its default from application config, when None.
:param filename_or_fp: the filename of the file to send. This is
relative to the :attr:`~Flask.root_path` if a
relative path is specified.
Alternatively a file object might be provided
in which case `X-Sendfile` might not work and
fall back to the traditional method. Make sure
that the file pointer is positioned at the start
of data to send before calling :func:`send_file`.
:param mimetype: the mimetype of the file if provided, otherwise
auto detection happens.
:param as_attachment: set to `True` if you want to send this file with
a ``Content-Disposition: attachment`` header.
:param attachment_filename: the filename for the attachment if it
differs from the file's filename.
:param add_etags: set to `False` to disable attaching of etags.
:param conditional: set to `True` to enable conditional responses.
:param cache_timeout: the timeout in seconds for the headers. When `None`
(default), this value is set by
:meth:`~Flask.get_send_file_max_age` of
:data:`~flask.current_app`.
"""
mtime = None
if isinstance(filename_or_fp, string_types):
filename = filename_or_fp
file = None
else:
from warnings import warn
file = filename_or_fp
filename = getattr(file, 'name', None)
# XXX: this behavior is now deprecated because it was unreliable.
# removed in Flask 1.0
if not attachment_filename and not mimetype \
and isinstance(filename, string_types):
warn(DeprecationWarning('The filename support for file objects '
'passed to send_file is now deprecated. Pass an '
'attach_filename if you want mimetypes to be guessed.'),
stacklevel=2)
if add_etags:
warn(DeprecationWarning('In future flask releases etags will no '
'longer be generated for file objects passed to the send_file '
'function because this behavior was unreliable. Pass '
'filenames instead if possible, otherwise attach an etag '
'yourself based on another value'), stacklevel=2)
if filename is not None:
if not os.path.isabs(filename):
filename = os.path.join(current_app.root_path, filename)
if mimetype is None and (filename or attachment_filename):
mimetype = mimetypes.guess_type(filename or attachment_filename)[0]
if mimetype is None:
mimetype = 'application/octet-stream'
headers = Headers()
if as_attachment:
if attachment_filename is None:
if filename is None:
raise TypeError('filename unavailable, required for '
'sending as attachment')
attachment_filename = os.path.basename(filename)
headers.add('Content-Disposition', 'attachment',
filename=attachment_filename)
if current_app.use_x_sendfile and filename:
if file is not None:
file.close()
headers['X-Sendfile'] = filename
headers['Content-Length'] = os.path.getsize(filename)
data = None
else:
if file is None:
file = open(filename, 'rb')
mtime = os.path.getmtime(filename)
headers['Content-Length'] = os.path.getsize(filename)
data = wrap_file(request.environ, file)
rv = current_app.response_class(data, mimetype=mimetype, headers=headers,
direct_passthrough=True)
# if we know the file modification date, we can store it as the
# the time of the last modification.
if mtime is not None:
rv.last_modified = int(mtime)
rv.cache_control.public = True
if cache_timeout is None:
cache_timeout = current_app.get_send_file_max_age(filename)
if cache_timeout is not None:
rv.cache_control.max_age = cache_timeout
rv.expires = int(time() + cache_timeout)
if add_etags and filename is not None:
rv.set_etag('flask-%s-%s-%s' % (
os.path.getmtime(filename),
os.path.getsize(filename),
adler32(
filename.encode('utf-8') if isinstance(filename, text_type)
else filename
) & 0xffffffff
))
if conditional:
rv = rv.make_conditional(request)
# make sure we don't send x-sendfile for servers that
# ignore the 304 status code for x-sendfile.
if rv.status_code == 304:
rv.headers.pop('x-sendfile', None)
return rv
def safe_join(directory, filename):
"""Safely join `directory` and `filename`.
Example usage::
@app.route('/wiki/<path:filename>')
def wiki_page(filename):
filename = safe_join(app.config['WIKI_FOLDER'], filename)
with open(filename, 'rb') as fd:
content = fd.read() # Read and process the file content...
:param directory: the base directory.
:param filename: the untrusted filename relative to that directory.
:raises: :class:`~werkzeug.exceptions.NotFound` if the resulting path
would fall out of `directory`.
"""
filename = posixpath.normpath(filename)
for sep in _os_alt_seps:
if sep in filename:
raise NotFound()
if os.path.isabs(filename) or \
filename == '..' or \
filename.startswith('../'):
raise NotFound()
return os.path.join(directory, filename)
def send_from_directory(directory, filename, **options):
"""Send a file from a given directory with :func:`send_file`. This
is a secure way to quickly expose static files from an upload folder
or something similar.
Example usage::
@app.route('/uploads/<path:filename>')
def download_file(filename):
return send_from_directory(app.config['UPLOAD_FOLDER'],
filename, as_attachment=True)
.. admonition:: Sending files and Performance
It is strongly recommended to activate either `X-Sendfile` support in
your webserver or (if no authentication happens) to tell the webserver
to serve files for the given path on its own without calling into the
web application for improved performance.
.. versionadded:: 0.5
:param directory: the directory where all the files are stored.
:param filename: the filename relative to that directory to
download.
:param options: optional keyword arguments that are directly
forwarded to :func:`send_file`.
"""
filename = safe_join(directory, filename)
if not os.path.isfile(filename):
raise NotFound()
options.setdefault('conditional', True)
return send_file(filename, **options)
def get_root_path(import_name):
"""Returns the path to a package or cwd if that cannot be found. This
returns the path of a package or the folder that contains a module.
Not to be confused with the package path returned by :func:`find_package`.
"""
# Module already imported and has a file attribute. Use that first.
mod = sys.modules.get(import_name)
if mod is not None and hasattr(mod, '__file__'):
return os.path.dirname(os.path.abspath(mod.__file__))
# Next attempt: check the loader.
loader = pkgutil.get_loader(import_name)
# Loader does not exist or we're referring to an unloaded main module
# or a main module without path (interactive sessions), go with the
# current working directory.
if loader is None or import_name == '__main__':
return os.getcwd()
# For .egg, zipimporter does not have get_filename until Python 2.7.
# Some other loaders might exhibit the same behavior.
if hasattr(loader, 'get_filename'):
filepath = loader.get_filename(import_name)
else:
# Fall back to imports.
__import__(import_name)
filepath = sys.modules[import_name].__file__
# filepath is import_name.py for a module, or __init__.py for a package.
return os.path.dirname(os.path.abspath(filepath))
def find_package(import_name):
"""Finds a package and returns the prefix (or None if the package is
not installed) as well as the folder that contains the package or
module as a tuple. The package path returned is the module that would
have to be added to the pythonpath in order to make it possible to
import the module. The prefix is the path below which a UNIX like
folder structure exists (lib, share etc.).
"""
root_mod_name = import_name.split('.')[0]
loader = pkgutil.get_loader(root_mod_name)
if loader is None or import_name == '__main__':
# import name is not found, or interactive/main module
package_path = os.getcwd()
else:
# For .egg, zipimporter does not have get_filename until Python 2.7.
if hasattr(loader, 'get_filename'):
filename = loader.get_filename(root_mod_name)
elif hasattr(loader, 'archive'):
# zipimporter's loader.archive points to the .egg or .zip
# archive filename is dropped in call to dirname below.
filename = loader.archive
else:
# At least one loader is missing both get_filename and archive:
# Google App Engine's HardenedModulesHook
#
# Fall back to imports.
__import__(import_name)
filename = sys.modules[import_name].__file__
package_path = os.path.abspath(os.path.dirname(filename))
# package_path ends with __init__.py for a package
if loader.is_package(root_mod_name):
package_path = os.path.dirname(package_path)
site_parent, site_folder = os.path.split(package_path)
py_prefix = os.path.abspath(sys.prefix)
if package_path.startswith(py_prefix):
return py_prefix, package_path
elif site_folder.lower() == 'site-packages':
parent, folder = os.path.split(site_parent)
# Windows like installations
if folder.lower() == 'lib':
base_dir = parent
# UNIX like installations
elif os.path.basename(parent).lower() == 'lib':
base_dir = os.path.dirname(parent)
else:
base_dir = site_parent
return base_dir, package_path
return None, package_path
class locked_cached_property(object):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value. Works like the one in Werkzeug but has a lock for
thread safety.
"""
def __init__(self, func, name=None, doc=None):
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
self.lock = RLock()
def __get__(self, obj, type=None):
if obj is None:
return self
with self.lock:
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
class _PackageBoundObject(object):
def __init__(self, import_name, template_folder=None):
#: The name of the package or module. Do not change this once
#: it was set by the constructor.
self.import_name = import_name
#: location of the templates. `None` if templates should not be
#: exposed.
self.template_folder = template_folder
#: Where is the app root located?
self.root_path = get_root_path(self.import_name)
self._static_folder = None
self._static_url_path = None
def _get_static_folder(self):
if self._static_folder is not None:
return os.path.join(self.root_path, self._static_folder)
def _set_static_folder(self, value):
self._static_folder = value
static_folder = property(_get_static_folder, _set_static_folder)
del _get_static_folder, _set_static_folder
def _get_static_url_path(self):
if self._static_url_path is None:
if self.static_folder is None:
return None
return '/' + os.path.basename(self.static_folder)
return self._static_url_path
def _set_static_url_path(self, value):
self._static_url_path = value
static_url_path = property(_get_static_url_path, _set_static_url_path)
del _get_static_url_path, _set_static_url_path
@property
def has_static_folder(self):
"""This is `True` if the package bound object's container has a
folder named ``'static'``.
.. versionadded:: 0.5
"""
return self.static_folder is not None
@locked_cached_property
def jinja_loader(self):
"""The Jinja loader for this package bound object.
.. versionadded:: 0.5
"""
if self.template_folder is not None:
return FileSystemLoader(os.path.join(self.root_path,
self.template_folder))
def get_send_file_max_age(self, filename):
"""Provides default cache_timeout for the :func:`send_file` functions.
By default, this function returns ``SEND_FILE_MAX_AGE_DEFAULT`` from
the configuration of :data:`~flask.current_app`.
Static file functions such as :func:`send_from_directory` use this
function, and :func:`send_file` calls this function on
:data:`~flask.current_app` when the given cache_timeout is `None`. If a
cache_timeout is given in :func:`send_file`, that timeout is used;
otherwise, this method is called.
This allows subclasses to change the behavior when sending files based
on the filename. For example, to set the cache timeout for .js files
to 60 seconds::
class MyFlask(flask.Flask):
def get_send_file_max_age(self, name):
if name.lower().endswith('.js'):
return 60
return flask.Flask.get_send_file_max_age(self, name)
.. versionadded:: 0.9
"""
return current_app.config['SEND_FILE_MAX_AGE_DEFAULT']
def send_static_file(self, filename):
"""Function used internally to send static files from the static
folder to the browser.
.. versionadded:: 0.5
"""
if not self.has_static_folder:
raise RuntimeError('No static folder for this object')
# Ensure get_send_file_max_age is called in all cases.
# Here, we ensure get_send_file_max_age is called for Blueprints.
cache_timeout = self.get_send_file_max_age(filename)
return send_from_directory(self.static_folder, filename,
cache_timeout=cache_timeout)
def open_resource(self, resource, mode='rb'):
"""Opens a resource from the application's resource folder. To see
how this works, consider the following folder structure::
/myapplication.py
/schema.sql
/static
/style.css
/templates
/layout.html
/index.html
If you want to open the `schema.sql` file you would do the
following::
with app.open_resource('schema.sql') as f:
contents = f.read()
do_something_with(contents)
:param resource: the name of the resource. To access resources within
subfolders use forward slashes as separator.
:param mode: resource file opening mode, default is 'rb'.
"""
if mode not in ('r', 'rb'):
raise ValueError('Resources can only be opened for reading')
return open(os.path.join(self.root_path, resource), mode)
| gpl-2.0 |
gemfire/py-gemfire-rest | tests/PerformanceTests.py | 2 | 2432 | '''
Copyright (c) 2014 Pivotal Software, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software distributed under the License
is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
'''
from GemfireClient import *
import time
from Customer import *
class PerformanceTests:
def __init__(self):
hostname = raw_input("Enter hostname: ")
port = raw_input("Enter port: ")
self.client = GemfireClient(hostname,port)
self.myRegion = self.client.get_region(raw_input("Enter Region Name: "))
def warmup(self):
for x in range(0,10):
self.myRegion.put(x,{"random":"random"})
self.myRegion.get(x)
self.myRegion.clear()
def put(self, num):
self.warmup()
start = time.clock()
for x in range(0,num):
self.myRegion.put(x,Customer("John Doe", 42))
end = time.clock()
return (end-start)
def put_all(self, num):
self.warmup()
item = {}
for x in range(0,num):
item[x] = Customer("New Person", 1)
start = time.clock()
self.myRegion.put_all(item)
end = time.clock()
return (end-start)
def get(self, num):
self.warmup()
for x in range(0,num):
self.myRegion.put(x,Customer("John Doe", 42))
start = time.clock()
for x in range(0,num):
self.myRegion.get(x)
end = time.clock()
return (end-start)
def run_test(self,testname):
filename = raw_input("Enter filename to store run data: ")
file = open(filename, "w")
op_num = input("Number of operations per run: ")
runs = input("Number of runs: ")
name = getattr(PerformanceTests,testname)
total = 0
for x in range(0,runs):
y=name(self,op_num)
file.write(str(y)+"\n")
total+=y
file.close()
print "The average run time for " + str(op_num) + " " + testname + "s was " + str(total/runs) + " seconds"
| apache-2.0 |
UdK-VPT/Open_eQuarter | mole3/stat_corr/rb_present_wall_uvalue_AVG_by_building_age_lookup.py | 2 | 2425 | # coding: utf8
# OeQ autogenerated lookup function for 'U-Values of Walls in correlation to year of construction, based on the source data of the survey for the "German Building Typology developed by the "Institut für Wohnen und Umwelt", Darmstadt/Germany, 2011-2013'
import math
import numpy as np
from . import oeqLookuptable as oeq
def get(*xin):
l_lookup = oeq.lookuptable(
[0,1.85,
1849,1.85,
1850,1.85,
1851,1.85,
1852,1.85,
1853,1.85,
1854,1.85,
1855,1.85,
1856,1.848,
1857,1.846,
1858,1.843,
1859,1.84,
1860,1.837,
1861,1.834,
1862,1.832,
1863,1.83,
1864,1.829,
1865,1.829,
1866,1.83,
1867,1.83,
1868,1.83,
1869,1.83,
1870,1.83,
1871,1.83,
1872,1.83,
1873,1.83,
1874,1.83,
1875,1.83,
1876,1.83,
1877,1.83,
1878,1.83,
1879,1.83,
1880,1.83,
1881,1.83,
1882,1.83,
1883,1.83,
1884,1.83,
1885,1.83,
1886,1.83,
1887,1.83,
1888,1.83,
1889,1.83,
1890,1.83,
1891,1.83,
1892,1.83,
1893,1.83,
1894,1.83,
1895,1.83,
1896,1.83,
1897,1.83,
1898,1.83,
1899,1.83,
1900,1.83,
1901,1.83,
1902,1.83,
1903,1.83,
1904,1.83,
1905,1.831,
1906,1.831,
1907,1.83,
1908,1.829,
1909,1.828,
1910,1.828,
1911,1.83,
1912,1.834,
1913,1.837,
1914,1.837,
1915,1.83,
1916,1.814,
1917,1.79,
1918,1.761,
1919,1.73,
1920,1.699,
1921,1.67,
1922,1.646,
1923,1.63,
1924,1.623,
1925,1.623,
1926,1.626,
1927,1.63,
1928,1.632,
1929,1.632,
1930,1.631,
1931,1.63,
1932,1.63,
1933,1.63,
1934,1.63,
1935,1.63,
1936,1.63,
1937,1.629,
1938,1.629,
1939,1.63,
1940,1.632,
1941,1.634,
1942,1.634,
1943,1.63,
1944,1.62,
1945,1.604,
1946,1.579,
1947,1.546,
1948,1.503,
1949,1.451,
1950,1.393,
1951,1.337,
1952,1.287,
1953,1.25,
1954,1.229,
1955,1.221,
1956,1.218,
1957,1.215,
1958,1.207,
1959,1.196,
1960,1.186,
1961,1.18,
1962,1.181,
1963,1.185,
1964,1.187,
1965,1.18,
1966,1.161,
1967,1.131,
1968,1.095,
1969,1.055,
1970,1.015,
1971,0.979,
1972,0.949,
1973,0.93,
1974,0.923,
1975,0.924,
1976,0.928,
1977,0.93,
1978,0.924,
1979,0.907,
1980,0.877,
1981,0.83,
1982,0.766,
1983,0.695,
1984,0.629,
1985,0.58,
1986,0.557,
1987,0.555,
1988,0.566,
1989,0.58,
1990,0.59,
1991,0.595,
1992,0.592,
1993,0.58,
1994,0.559,
1995,0.533,
1996,0.505,
1997,0.48,
1998,0.461,
1999,0.446,
2000,0.434,
2001,0.42,
2002,0.404,
2003,0.387,
2004,0.371,
2005,0.36,
2006,0.36,
2007,0.36,
2008,0.36,
2009,0.36,
2010,0.361,
2011,0.361,
2012,0.361,
2013,0.36,
2014,0.36,
2015,0.36,
2016,0.36,
2017,0.36,
2018,0.36,
2019,0.36,
2020,0.36,
2021,0.36,
10000,0.2])
return(l_lookup.lookup(xin))
| gpl-2.0 |
bdh1011/wau | venv/lib/python2.7/site-packages/nbformat/v4/tests/test_validate.py | 3 | 2506 | """Tests for nbformat validation"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import io
import os
import nose.tools as nt
from nbformat.validator import validate, ValidationError
from ..nbjson import reads
from ..nbbase import (
nbformat,
new_code_cell, new_markdown_cell, new_notebook,
new_output, new_raw_cell,
)
def validate4(obj, ref=None):
return validate(obj, ref, version=nbformat)
def test_valid_code_cell():
cell = new_code_cell()
validate4(cell, 'code_cell')
def test_invalid_code_cell():
cell = new_code_cell()
cell['source'] = 5
with nt.assert_raises(ValidationError):
validate4(cell, 'code_cell')
cell = new_code_cell()
del cell['metadata']
with nt.assert_raises(ValidationError):
validate4(cell, 'code_cell')
cell = new_code_cell()
del cell['source']
with nt.assert_raises(ValidationError):
validate4(cell, 'code_cell')
cell = new_code_cell()
del cell['cell_type']
with nt.assert_raises(ValidationError):
validate4(cell, 'code_cell')
def test_invalid_markdown_cell():
cell = new_markdown_cell()
cell['source'] = 5
with nt.assert_raises(ValidationError):
validate4(cell, 'markdown_cell')
cell = new_markdown_cell()
del cell['metadata']
with nt.assert_raises(ValidationError):
validate4(cell, 'markdown_cell')
cell = new_markdown_cell()
del cell['source']
with nt.assert_raises(ValidationError):
validate4(cell, 'markdown_cell')
cell = new_markdown_cell()
del cell['cell_type']
with nt.assert_raises(ValidationError):
validate4(cell, 'markdown_cell')
def test_invalid_raw_cell():
cell = new_raw_cell()
cell['source'] = 5
with nt.assert_raises(ValidationError):
validate4(cell, 'raw_cell')
cell = new_raw_cell()
del cell['metadata']
with nt.assert_raises(ValidationError):
validate4(cell, 'raw_cell')
cell = new_raw_cell()
del cell['source']
with nt.assert_raises(ValidationError):
validate4(cell, 'raw_cell')
cell = new_raw_cell()
del cell['cell_type']
with nt.assert_raises(ValidationError):
validate4(cell, 'raw_cell')
def test_sample_notebook():
here = os.path.dirname(__file__)
with io.open(os.path.join(here, os.pardir, os.pardir, 'tests', "test4.ipynb"), encoding='utf-8') as f:
nb = reads(f.read())
validate4(nb)
| mit |
bstadie/cgt | examples/demo_char_rnn.py | 13 | 12470 | """
A nearly direct translation of Andrej's code
https://github.com/karpathy/char-rnn
"""
from __future__ import division
import cgt
from cgt import nn, utils, profiler
import numpy as np, numpy.random as nr
import os.path as osp
import argparse
from time import time
from StringIO import StringIO
from param_collection import ParamCollection
# via https://github.com/karpathy/char-rnn/blob/master/model/GRU.lua
# via http://arxiv.org/pdf/1412.3555v1.pdf
def make_deep_gru(size_input, size_mem, n_layers, size_output, size_batch):
inputs = [cgt.matrix() for i_layer in xrange(n_layers+1)]
outputs = []
for i_layer in xrange(n_layers):
prev_h = inputs[i_layer+1] # note that inputs[0] is the external input, so we add 1
x = inputs[0] if i_layer==0 else outputs[i_layer-1]
size_x = size_input if i_layer==0 else size_mem
update_gate = cgt.sigmoid(
nn.Affine(size_x, size_mem,name="i2u")(x)
+ nn.Affine(size_mem, size_mem, name="h2u")(prev_h))
reset_gate = cgt.sigmoid(
nn.Affine(size_x, size_mem,name="i2r")(x)
+ nn.Affine(size_mem, size_mem, name="h2r")(prev_h))
gated_hidden = reset_gate * prev_h
p2 = nn.Affine(size_mem, size_mem)(gated_hidden)
p1 = nn.Affine(size_x, size_mem)(x)
hidden_target = cgt.tanh(p1+p2)
next_h = (1.0-update_gate)*prev_h + update_gate*hidden_target
outputs.append(next_h)
category_activations = nn.Affine(size_mem, size_output,name="pred")(outputs[-1])
logprobs = nn.logsoftmax(category_activations)
outputs.append(logprobs)
return nn.Module(inputs, outputs)
def make_deep_lstm(size_input, size_mem, n_layers, size_output, size_batch):
inputs = [cgt.matrix(fixed_shape=(size_batch, size_input))]
for _ in xrange(2*n_layers):
inputs.append(cgt.matrix(fixed_shape=(size_batch, size_mem)))
outputs = []
for i_layer in xrange(n_layers):
prev_h = inputs[i_layer*2]
prev_c = inputs[i_layer*2+1]
if i_layer==0:
x = inputs[0]
size_x = size_input
else:
x = outputs[(i_layer-1)*2]
size_x = size_mem
input_sums = nn.Affine(size_x, 4*size_mem)(x) + nn.Affine(size_x, 4*size_mem)(prev_h)
sigmoid_chunk = cgt.sigmoid(input_sums[:,0:3*size_mem])
in_gate = sigmoid_chunk[:,0:size_mem]
forget_gate = sigmoid_chunk[:,size_mem:2*size_mem]
out_gate = sigmoid_chunk[:,2*size_mem:3*size_mem]
in_transform = cgt.tanh(input_sums[:,3*size_mem:4*size_mem])
next_c = forget_gate*prev_c + in_gate * in_transform
next_h = out_gate*cgt.tanh(next_c)
outputs.append(next_c)
outputs.append(next_h)
category_activations = nn.Affine(size_mem, size_output)(outputs[-1])
logprobs = nn.logsoftmax(category_activations)
outputs.append(logprobs)
return nn.Module(inputs, outputs)
def flatcat(xs):
return cgt.concatenate([x.flatten() for x in xs])
def cat_sample(ps):
"""
sample from categorical distribution
ps is a 2D array whose rows are vectors of probabilities
"""
r = nr.rand(len(ps))
out = np.zeros(len(ps),dtype='i4')
cumsums = np.cumsum(ps, axis=1)
for (irow,csrow) in enumerate(cumsums):
for (icol, csel) in enumerate(csrow):
if csel > r[irow]:
out[irow] = icol
break
return out
def rmsprop_update(grad, state):
state.sqgrad[:] *= state.decay_rate
state.count *= state.decay_rate
np.square(grad, out=state.scratch) # scratch=g^2
state.sqgrad += state.scratch
state.count += 1
np.sqrt(state.sqgrad, out=state.scratch) # scratch = sum of squares
np.divide(state.scratch, np.sqrt(state.count), out=state.scratch) # scratch = rms
np.divide(grad, state.scratch, out=state.scratch) # scratch = grad/rms
np.multiply(state.scratch, state.step_size, out=state.scratch)
state.theta[:] -= state.scratch
def make_loss_and_grad_and_step(arch, size_input, size_output, size_mem, size_batch, n_layers, n_unroll):
# symbolic variables
x_tnk = cgt.tensor3()
targ_tnk = cgt.tensor3()
make_network = make_deep_lstm if arch=="lstm" else make_deep_gru
network = make_network(size_input, size_mem, n_layers, size_output, size_batch)
init_hiddens = [cgt.matrix() for _ in xrange(get_num_hiddens(arch, n_layers))]
# TODO fixed sizes
cur_hiddens = init_hiddens
loss = 0
for t in xrange(n_unroll):
outputs = network([x_tnk[t]] + cur_hiddens)
cur_hiddens, prediction_logprobs = outputs[:-1], outputs[-1]
# loss = loss + nn.categorical_negloglik(prediction_probs, targ_tnk[t]).sum()
loss = loss - (prediction_logprobs*targ_tnk[t]).sum()
cur_hiddens = outputs[:-1]
final_hiddens = cur_hiddens
loss = loss / (n_unroll * size_batch)
params = network.get_parameters()
gradloss = cgt.grad(loss, params)
flatgrad = flatcat(gradloss)
with utils.Message("compiling loss+grad"):
f_loss_and_grad = cgt.function([x_tnk, targ_tnk] + init_hiddens, [loss, flatgrad] + final_hiddens)
f_loss = cgt.function([x_tnk, targ_tnk] + init_hiddens, loss)
assert len(init_hiddens) == len(final_hiddens)
x_nk = cgt.matrix('x')
outputs = network([x_nk] + init_hiddens)
f_step = cgt.function([x_nk]+init_hiddens, outputs)
# print "node count", cgt.count_nodes(flatgrad)
return network, f_loss, f_loss_and_grad, f_step
class Table(dict):
"dictionary-like object that exposes its keys as attributes"
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
def make_rmsprop_state(theta, step_size, decay_rate):
return Table(theta=theta, sqgrad=np.zeros_like(theta)+1e-6, scratch=np.empty_like(theta),
step_size=step_size, decay_rate=decay_rate, count=0)
class Loader(object):
def __init__(self, data_dir, size_batch, n_unroll, split_fractions):
input_file = osp.join(data_dir,"input.txt")
preproc_file = osp.join(data_dir, "preproc.npz")
run_preproc = not osp.exists(preproc_file) or osp.getmtime(input_file) > osp.getmtime(preproc_file)
if run_preproc:
text_to_tensor(input_file, preproc_file)
data_file = np.load(preproc_file)
self.char2ind = {char:ind for (ind,char) in enumerate(data_file["chars"])}
data = data_file["inds"]
data = data[:data.shape[0] - (data.shape[0] % size_batch)].reshape(size_batch, -1).T # inds_tn
n_batches = (data.shape[0]-1) // n_unroll
data = data[:n_batches*n_unroll+1] # now t-1 is divisble by batch size
self.n_unroll = n_unroll
self.data = data
self.n_train_batches = int(n_batches*split_fractions[0])
self.n_test_batches = int(n_batches*split_fractions[1])
self.n_val_batches = n_batches - self.n_train_batches - self.n_test_batches
print "%i train batches, %i test batches, %i val batches"%(self.n_train_batches, self.n_test_batches, self.n_val_batches)
@property
def size_vocab(self):
return len(self.char2ind)
def train_batches_iter(self):
for i in xrange(self.n_train_batches):
start = i*self.n_unroll
stop = (i+1)*self.n_unroll
yield ind2onehot(self.data[start:stop], self.size_vocab), ind2onehot(self.data[start+1:stop+1], self.size_vocab) # XXX
# XXX move elsewhere
def ind2onehot(inds, n_cls):
inds = np.asarray(inds)
out = np.zeros(inds.shape+(n_cls,),cgt.floatX)
out.flat[np.arange(inds.size)*n_cls + inds.ravel()] = 1
return out
def text_to_tensor(text_file, preproc_file):
with open(text_file,"r") as fh:
text = fh.read()
char2ind = {}
inds = []
for char in text:
ind = char2ind.get(char, -1)
if ind == -1:
ind = len(char2ind)
char2ind[char] = ind
inds.append(ind)
np.savez(preproc_file, inds = inds, chars = sorted(char2ind, key = lambda char : char2ind[char]))
def get_num_hiddens(arch, n_layers):
return {"lstm" : 2 * n_layers, "gru" : n_layers}[arch]
def sample(f_step, init_hiddens, char2ind, n_steps, temperature, seed_text = ""):
vocab_size = len(char2ind)
ind2char = {ind:char for (char,ind) in char2ind.iteritems()}
cur_hiddens = init_hiddens
t = StringIO()
t.write(seed_text)
for char in seed_text:
x_1k = ind2onehot([char2ind[char]], vocab_size)
net_outputs = f_step(x_1k, cur_hiddens)
cur_hiddens, logprobs_1k = net_outputs[:-1], net_outputs[-1]
if len(seed_text)==0:
logprobs_1k = np.zeros((1,vocab_size))
for _ in xrange(n_steps):
logprobs_1k /= temperature
probs_1k = np.exp(logprobs_1k*2)
probs_1k /= probs_1k.sum()
index = cat_sample(probs_1k)[0]
char = ind2char[index]
x_1k = ind2onehot([index], vocab_size)
net_outputs = f_step(x_1k, *cur_hiddens)
cur_hiddens, logprobs_1k = net_outputs[:-1], net_outputs[-1]
t.write(char)
cgt.utils.colorprint(cgt.utils.Color.YELLOW, t.getvalue() + "\n")
def main():
nr.seed(0)
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", type=str, default="alice")
parser.add_argument("--size_mem", type=int,default=64)
parser.add_argument("--size_batch", type=int,default=64)
parser.add_argument("--n_layers",type=int,default=2)
parser.add_argument("--n_unroll",type=int,default=16)
parser.add_argument("--step_size",type=float,default=.01)
parser.add_argument("--decay_rate",type=float,default=0.95)
parser.add_argument("--n_epochs",type=int,default=20)
parser.add_argument("--arch",choices=["lstm","gru"],default="lstm")
parser.add_argument("--grad_check",action="store_true")
parser.add_argument("--profile",action="store_true")
parser.add_argument("--unittest",action="store_true")
parser.add_argument("--temperature",type=float,default=1)
args = parser.parse_args()
cgt.set_precision("quad" if args.grad_check else "single")
assert args.n_unroll > 1
loader = Loader(args.data_dir,args.size_batch, args.n_unroll, (1.0,0,0))
network, f_loss, f_loss_and_grad, f_step = make_loss_and_grad_and_step(args.arch, loader.size_vocab,
loader.size_vocab, args.size_mem, args.size_batch, args.n_layers, args.n_unroll)
if args.profile: profiler.start()
params = network.get_parameters()
pc = ParamCollection(params)
pc.set_value_flat(nr.uniform(-.1, .1, size=(pc.get_total_size(),)))
def initialize_hiddens(n):
return [np.zeros((n, args.size_mem), cgt.floatX) for _ in xrange(get_num_hiddens(args.arch, args.n_layers))]
if args.grad_check:
x,y = loader.train_batches_iter().next()
prev_hiddens = initialize_hiddens(args.size_batch)
def f(thnew):
thold = pc.get_value_flat()
pc.set_value_flat(thnew)
loss = f_loss(x,y, *prev_hiddens)
pc.set_value_flat(thold)
return loss
from cgt.numeric_diff import numeric_grad
g_num = numeric_grad(f, pc.get_value_flat(),eps=1e-10)
result = f_loss_and_grad(x,y,*prev_hiddens)
g_anal = result[1]
assert np.allclose(g_num, g_anal, atol=1e-4)
print "Gradient check succeeded!"
return
optim_state = make_rmsprop_state(theta=pc.get_value_flat(), step_size = args.step_size,
decay_rate = args.decay_rate)
for iepoch in xrange(args.n_epochs):
losses = []
tstart = time()
print "starting epoch",iepoch
cur_hiddens = initialize_hiddens(args.size_batch)
for (x,y) in loader.train_batches_iter():
out = f_loss_and_grad(x,y, *cur_hiddens)
loss = out[0]
grad = out[1]
cur_hiddens = out[2:]
rmsprop_update(grad, optim_state)
pc.set_value_flat(optim_state.theta)
losses.append(loss)
if args.unittest: return
print "%.3f s/batch. avg loss = %.3f"%((time()-tstart)/len(losses), np.mean(losses))
optim_state.step_size *= .98 #pylint: disable=E1101
sample(f_step, initialize_hiddens(1), char2ind=loader.char2ind, n_steps=1000, temperature=args.temperature, seed_text = "")
if args.profile: profiler.print_stats()
if __name__ == "__main__":
main()
| mit |
jacshfr/mozilla-bedrock | vendor-local/packages/chardet/chardet/mbcsgroupprober.py | 236 | 1889 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from charsetgroupprober import CharSetGroupProber
from utf8prober import UTF8Prober
from sjisprober import SJISProber
from eucjpprober import EUCJPProber
from gb2312prober import GB2312Prober
from euckrprober import EUCKRProber
from big5prober import Big5Prober
from euctwprober import EUCTWProber
class MBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [ \
UTF8Prober(),
SJISProber(),
EUCJPProber(),
GB2312Prober(),
EUCKRProber(),
Big5Prober(),
EUCTWProber()]
self.reset()
| mpl-2.0 |
ToonTownInfiniteRepo/ToontownInfinite | Panda3D-1.9.0/python/Lib/site-packages/setuptools/tests/test_upload_docs.py | 522 | 2139 | """build_ext tests
"""
import sys, os, shutil, tempfile, unittest, site, zipfile
from setuptools.command.upload_docs import upload_docs
from setuptools.dist import Distribution
SETUP_PY = """\
from setuptools import setup
setup(name='foo')
"""
class TestUploadDocsTest(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
setup = os.path.join(self.dir, 'setup.py')
f = open(setup, 'w')
f.write(SETUP_PY)
f.close()
self.old_cwd = os.getcwd()
os.chdir(self.dir)
self.upload_dir = os.path.join(self.dir, 'build')
os.mkdir(self.upload_dir)
# A test document.
f = open(os.path.join(self.upload_dir, 'index.html'), 'w')
f.write("Hello world.")
f.close()
# An empty folder.
os.mkdir(os.path.join(self.upload_dir, 'empty'))
if sys.version >= "2.6":
self.old_base = site.USER_BASE
site.USER_BASE = upload_docs.USER_BASE = tempfile.mkdtemp()
self.old_site = site.USER_SITE
site.USER_SITE = upload_docs.USER_SITE = tempfile.mkdtemp()
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.dir)
if sys.version >= "2.6":
shutil.rmtree(site.USER_BASE)
shutil.rmtree(site.USER_SITE)
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
def test_create_zipfile(self):
# Test to make sure zipfile creation handles common cases.
# This explicitly includes a folder containing an empty folder.
dist = Distribution()
cmd = upload_docs(dist)
cmd.upload_dir = self.upload_dir
cmd.target_dir = self.upload_dir
tmp_dir = tempfile.mkdtemp()
tmp_file = os.path.join(tmp_dir, 'foo.zip')
try:
zip_file = cmd.create_zipfile(tmp_file)
assert zipfile.is_zipfile(tmp_file)
zip_file = zipfile.ZipFile(tmp_file) # woh...
assert zip_file.namelist() == ['index.html']
zip_file.close()
finally:
shutil.rmtree(tmp_dir)
| mit |
mrquim/repository.mrquim | script.module.pycryptodome/lib/Crypto/SelfTest/Cipher/test_CFB.py | 5 | 16124 | # ===================================================================
#
# Copyright (c) 2014, Legrandin <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ===================================================================
import unittest
from Crypto.SelfTest.loader import load_tests
from Crypto.SelfTest.st_common import list_test_cases
from Crypto.Util.py3compat import tobytes, b, unhexlify
from Crypto.Cipher import AES, DES3, DES
from Crypto.Hash import SHAKE128
def get_tag_random(tag, length):
return SHAKE128.new(data=tobytes(tag)).read(length)
from Crypto.SelfTest.Cipher.test_CBC import BlockChainingTests
class CfbTests(BlockChainingTests):
aes_mode = AES.MODE_CFB
des3_mode = DES3.MODE_CFB
# Redefine test_unaligned_data_128/64
def test_unaligned_data_128(self):
plaintexts = [ b("7777777") ] * 100
cipher = AES.new(self.key_128, AES.MODE_CFB, self.iv_128, segment_size=8)
ciphertexts = [ cipher.encrypt(x) for x in plaintexts ]
cipher = AES.new(self.key_128, AES.MODE_CFB, self.iv_128, segment_size=8)
self.assertEqual(b("").join(ciphertexts), cipher.encrypt(b("").join(plaintexts)))
cipher = AES.new(self.key_128, AES.MODE_CFB, self.iv_128, segment_size=128)
ciphertexts = [ cipher.encrypt(x) for x in plaintexts ]
cipher = AES.new(self.key_128, AES.MODE_CFB, self.iv_128, segment_size=128)
self.assertEqual(b("").join(ciphertexts), cipher.encrypt(b("").join(plaintexts)))
def test_unaligned_data_64(self):
plaintexts = [ b("7777777") ] * 100
cipher = DES3.new(self.key_192, DES3.MODE_CFB, self.iv_64, segment_size=8)
ciphertexts = [ cipher.encrypt(x) for x in plaintexts ]
cipher = DES3.new(self.key_192, DES3.MODE_CFB, self.iv_64, segment_size=8)
self.assertEqual(b("").join(ciphertexts), cipher.encrypt(b("").join(plaintexts)))
cipher = DES3.new(self.key_192, DES3.MODE_CFB, self.iv_64, segment_size=64)
ciphertexts = [ cipher.encrypt(x) for x in plaintexts ]
cipher = DES3.new(self.key_192, DES3.MODE_CFB, self.iv_64, segment_size=64)
self.assertEqual(b("").join(ciphertexts), cipher.encrypt(b("").join(plaintexts)))
# Extra
def test_segment_size_128(self):
for bits in xrange(8, 129, 8):
cipher = AES.new(self.key_128, AES.MODE_CFB, self.iv_128,
segment_size=bits)
for bits in 0, 7, 9, 127, 129:
self.assertRaises(ValueError, AES.new, self.key_128, AES.MODE_CFB,
self.iv_128,
segment_size=bits)
def test_segment_size_64(self):
for bits in xrange(8, 65, 8):
cipher = DES3.new(self.key_192, DES3.MODE_CFB, self.iv_64,
segment_size=bits)
for bits in 0, 7, 9, 63, 65:
self.assertRaises(ValueError, DES3.new, self.key_192, AES.MODE_CFB,
self.iv_64,
segment_size=bits)
class NistCfbVectors(unittest.TestCase):
def _do_kat_aes_test(self, file_name, segment_size):
test_vectors = load_tests(("Crypto", "SelfTest", "Cipher", "test_vectors", "AES"),
file_name,
"AES CFB%d KAT" % segment_size,
{ "count" : lambda x: int(x) } )
assert(test_vectors)
direction = None
for tv in test_vectors:
# The test vector file contains some directive lines
if isinstance(tv, basestring):
direction = tv
continue
self.description = tv.desc
cipher = AES.new(tv.key, AES.MODE_CFB, tv.iv,
segment_size=segment_size)
if direction == "[ENCRYPT]":
self.assertEqual(cipher.encrypt(tv.plaintext), tv.ciphertext)
elif direction == "[DECRYPT]":
self.assertEqual(cipher.decrypt(tv.ciphertext), tv.plaintext)
else:
assert False
# See Section 6.4.5 in AESAVS
def _do_mct_aes_test(self, file_name, segment_size):
test_vectors = load_tests(("Crypto", "SelfTest", "Cipher", "test_vectors", "AES"),
file_name,
"AES CFB%d Montecarlo" % segment_size,
{ "count" : lambda x: int(x) } )
assert(test_vectors)
assert(segment_size in (8, 128))
direction = None
for tv in test_vectors:
# The test vector file contains some directive lines
if isinstance(tv, basestring):
direction = tv
continue
self.description = tv.desc
cipher = AES.new(tv.key, AES.MODE_CFB, tv.iv,
segment_size=segment_size)
def get_input(input_text, output_seq, j):
# CFB128
if segment_size == 128:
if j >= 2:
return output_seq[-2]
return [input_text, tv.iv][j]
# CFB8
if j == 0:
return input_text
elif j <= 16:
return tv.iv[j - 1:j]
return output_seq[j - 17]
if direction == '[ENCRYPT]':
cts = []
for j in xrange(1000):
plaintext = get_input(tv.plaintext, cts, j)
cts.append(cipher.encrypt(plaintext))
self.assertEqual(cts[-1], tv.ciphertext)
elif direction == '[DECRYPT]':
pts = []
for j in xrange(1000):
ciphertext = get_input(tv.ciphertext, pts, j)
pts.append(cipher.decrypt(ciphertext))
self.assertEqual(pts[-1], tv.plaintext)
else:
assert False
def _do_tdes_test(self, file_name, segment_size):
test_vectors = load_tests(("Crypto", "SelfTest", "Cipher", "test_vectors", "TDES"),
file_name,
"AES CFB%d KAT" % segment_size,
{ "count" : lambda x: int(x) } )
assert(test_vectors)
direction = None
for tv in test_vectors:
# The test vector file contains some directive lines
if isinstance(tv, basestring):
direction = tv
continue
self.description = tv.desc
if hasattr(tv, "keys"):
cipher = DES.new(tv.keys, DES.MODE_CFB, tv.iv,
segment_size=segment_size)
else:
if tv.key1 != tv.key3:
key = tv.key1 + tv.key2 + tv.key3 # Option 3
else:
key = tv.key1 + tv.key2 # Option 2
cipher = DES3.new(key, DES3.MODE_CFB, tv.iv,
segment_size=segment_size)
if direction == "[ENCRYPT]":
self.assertEqual(cipher.encrypt(tv.plaintext), tv.ciphertext)
elif direction == "[DECRYPT]":
self.assertEqual(cipher.decrypt(tv.ciphertext), tv.plaintext)
else:
assert False
# Create one test method per file
nist_aes_kat_mmt_files = (
# KAT
"CFB?GFSbox128.rsp",
"CFB?GFSbox192.rsp",
"CFB?GFSbox256.rsp",
"CFB?KeySbox128.rsp",
"CFB?KeySbox192.rsp",
"CFB?KeySbox256.rsp",
"CFB?VarKey128.rsp",
"CFB?VarKey192.rsp",
"CFB?VarKey256.rsp",
"CFB?VarTxt128.rsp",
"CFB?VarTxt192.rsp",
"CFB?VarTxt256.rsp",
# MMT
"CFB?MMT128.rsp",
"CFB?MMT192.rsp",
"CFB?MMT256.rsp",
)
nist_aes_mct_files = (
"CFB?MCT128.rsp",
"CFB?MCT192.rsp",
"CFB?MCT256.rsp",
)
for file_gen_name in nist_aes_kat_mmt_files:
for bits in "8", "128":
file_name = file_gen_name.replace("?", bits)
def new_func(self, file_name=file_name, bits=bits):
self._do_kat_aes_test(file_name, int(bits))
setattr(NistCfbVectors, "test_AES_" + file_name, new_func)
for file_gen_name in nist_aes_mct_files:
for bits in "8", "128":
file_name = file_gen_name.replace("?", bits)
def new_func(self, file_name=file_name, bits=bits):
self._do_mct_aes_test(file_name, int(bits))
setattr(NistCfbVectors, "test_AES_" + file_name, new_func)
del file_name, new_func
nist_tdes_files = (
"TCFB?MMT2.rsp", # 2TDES
"TCFB?MMT3.rsp", # 3TDES
"TCFB?invperm.rsp", # Single DES
"TCFB?permop.rsp",
"TCFB?subtab.rsp",
"TCFB?varkey.rsp",
"TCFB?vartext.rsp",
)
for file_gen_name in nist_tdes_files:
for bits in "8", "64":
file_name = file_gen_name.replace("?", bits)
def new_func(self, file_name=file_name, bits=bits):
self._do_tdes_test(file_name, int(bits))
setattr(NistCfbVectors, "test_TDES_" + file_name, new_func)
# END OF NIST CBC TEST VECTORS
class SP800TestVectors(unittest.TestCase):
"""Class exercising the CFB test vectors found in Section F.3
of NIST SP 800-3A"""
def test_aes_128_cfb8(self):
plaintext = '6bc1bee22e409f96e93d7e117393172aae2d'
ciphertext = '3b79424c9c0dd436bace9e0ed4586a4f32b9'
key = '2b7e151628aed2a6abf7158809cf4f3c'
iv = '000102030405060708090a0b0c0d0e0f'
key = unhexlify(key)
iv = unhexlify(iv)
plaintext = unhexlify(plaintext)
ciphertext = unhexlify(ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=8)
self.assertEqual(cipher.encrypt(plaintext), ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=8)
self.assertEqual(cipher.decrypt(ciphertext), plaintext)
def test_aes_192_cfb8(self):
plaintext = '6bc1bee22e409f96e93d7e117393172aae2d'
ciphertext = 'cda2521ef0a905ca44cd057cbf0d47a0678a'
key = '8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b'
iv = '000102030405060708090a0b0c0d0e0f'
key = unhexlify(key)
iv = unhexlify(iv)
plaintext = unhexlify(plaintext)
ciphertext = unhexlify(ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=8)
self.assertEqual(cipher.encrypt(plaintext), ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=8)
self.assertEqual(cipher.decrypt(ciphertext), plaintext)
def test_aes_256_cfb8(self):
plaintext = '6bc1bee22e409f96e93d7e117393172aae2d'
ciphertext = 'dc1f1a8520a64db55fcc8ac554844e889700'
key = '603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4'
iv = '000102030405060708090a0b0c0d0e0f'
key = unhexlify(key)
iv = unhexlify(iv)
plaintext = unhexlify(plaintext)
ciphertext = unhexlify(ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=8)
self.assertEqual(cipher.encrypt(plaintext), ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=8)
self.assertEqual(cipher.decrypt(ciphertext), plaintext)
def test_aes_128_cfb128(self):
plaintext = '6bc1bee22e409f96e93d7e117393172a' +\
'ae2d8a571e03ac9c9eb76fac45af8e51' +\
'30c81c46a35ce411e5fbc1191a0a52ef' +\
'f69f2445df4f9b17ad2b417be66c3710'
ciphertext = '3b3fd92eb72dad20333449f8e83cfb4a' +\
'c8a64537a0b3a93fcde3cdad9f1ce58b' +\
'26751f67a3cbb140b1808cf187a4f4df' +\
'c04b05357c5d1c0eeac4c66f9ff7f2e6'
key = '2b7e151628aed2a6abf7158809cf4f3c'
iv = '000102030405060708090a0b0c0d0e0f'
key = unhexlify(key)
iv = unhexlify(iv)
plaintext = unhexlify(plaintext)
ciphertext = unhexlify(ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
self.assertEqual(cipher.encrypt(plaintext), ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
self.assertEqual(cipher.decrypt(ciphertext), plaintext)
def test_aes_192_cfb128(self):
plaintext = '6bc1bee22e409f96e93d7e117393172a' +\
'ae2d8a571e03ac9c9eb76fac45af8e51' +\
'30c81c46a35ce411e5fbc1191a0a52ef' +\
'f69f2445df4f9b17ad2b417be66c3710'
ciphertext = 'cdc80d6fddf18cab34c25909c99a4174' +\
'67ce7f7f81173621961a2b70171d3d7a' +\
'2e1e8a1dd59b88b1c8e60fed1efac4c9' +\
'c05f9f9ca9834fa042ae8fba584b09ff'
key = '8e73b0f7da0e6452c810f32b809079e562f8ead2522c6b7b'
iv = '000102030405060708090a0b0c0d0e0f'
key = unhexlify(key)
iv = unhexlify(iv)
plaintext = unhexlify(plaintext)
ciphertext = unhexlify(ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
self.assertEqual(cipher.encrypt(plaintext), ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
self.assertEqual(cipher.decrypt(ciphertext), plaintext)
def test_aes_256_cfb128(self):
plaintext = '6bc1bee22e409f96e93d7e117393172a' +\
'ae2d8a571e03ac9c9eb76fac45af8e51' +\
'30c81c46a35ce411e5fbc1191a0a52ef' +\
'f69f2445df4f9b17ad2b417be66c3710'
ciphertext = 'dc7e84bfda79164b7ecd8486985d3860' +\
'39ffed143b28b1c832113c6331e5407b' +\
'df10132415e54b92a13ed0a8267ae2f9' +\
'75a385741ab9cef82031623d55b1e471'
key = '603deb1015ca71be2b73aef0857d77811f352c073b6108d72d9810a30914dff4'
iv = '000102030405060708090a0b0c0d0e0f'
key = unhexlify(key)
iv = unhexlify(iv)
plaintext = unhexlify(plaintext)
ciphertext = unhexlify(ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
self.assertEqual(cipher.encrypt(plaintext), ciphertext)
cipher = AES.new(key, AES.MODE_CFB, iv, segment_size=128)
self.assertEqual(cipher.decrypt(ciphertext), plaintext)
def get_tests(config={}):
tests = []
tests += list_test_cases(CfbTests)
tests += list_test_cases(NistCfbVectors)
tests += list_test_cases(SP800TestVectors)
return tests
if __name__ == '__main__':
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
| gpl-2.0 |
lukleh/TwistedBot | twistedbot/plugins/base.py | 1 | 2342 |
import os
import pkgutil
import abc
from functools import wraps
class PluginMeta(abc.ABCMeta):
def __new__(meta, name, bases, dct):
cls = super(PluginMeta, meta).__new__(meta, name, bases, dct)
cls.handlers = []
for name, obj in cls.__dict__.iteritems():
if hasattr(obj, "__call__") and name.startswith("on_"):
cls.handlers.append(name)
return cls
class PluginBase(object):
__metaclass__ = PluginMeta
def __init__(self, world):
self.world = world
class PluginChatBase(PluginBase):
def send_chat_message(self, msg):
self.world.chat.send_chat_message(msg)
@abc.abstractproperty
def command_verb(self):
pass
@property
def aliases(self):
return []
@abc.abstractproperty
def help(self):
pass
@abc.abstractmethod
def command(self, sender, command, args):
pass
class PluginEventHandlerBase(PluginBase):
pass
class PluginPlannerBase(object):
pass
def load(log, call_file, group):
plugs = []
path = [os.path.dirname(os.path.realpath(call_file))]
for loader, name, _ in list(pkgutil.iter_modules(path=path)):
try:
mpath = ".".join([__package__, group, name])
module = loader.find_module(mpath).load_module(mpath)
if not getattr(module, "plugin", False):
log.msg("module %s does not include plugin" % module.__name__)
continue
plugin_class = module.plugin
plugin_path = "%s.%s" % (module.__name__, plugin_class.__name__)
if issubclass(plugin_class, PluginChatBase):
log.msg("loaded chat plugin %s" % plugin_path)
plugs.append(plugin_class)
elif issubclass(plugin_class, PluginEventHandlerBase):
log.msg("loaded event plugin %s" % plugin_path)
plugs.append(plugin_class)
elif issubclass(plugin_class, PluginPlannerBase):
log.msg("loaded planner plugin %s" % plugin_path)
plugs.append(plugin_class)
else:
log.msg("class %s is not plugin" % plugin_path)
except Exception as e:
log.err(_stuff=e, _why="could not load plugin %s.py" % os.path.join(path[0], name))
return plugs
| mit |
Amechi101/concepteur-market-app | venv/lib/python2.7/site-packages/gunicorn/arbiter.py | 24 | 17047 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
from __future__ import with_statement
import errno
import os
import random
import select
import signal
import sys
import time
import traceback
from gunicorn.errors import HaltServer, AppImportError
from gunicorn.pidfile import Pidfile
from gunicorn.sock import create_sockets
from gunicorn import util
from gunicorn import __version__, SERVER_SOFTWARE
class Arbiter(object):
"""
Arbiter maintain the workers processes alive. It launches or
kills them if needed. It also manages application reloading
via SIGHUP/USR2.
"""
# A flag indicating if a worker failed to
# to boot. If a worker process exist with
# this error code, the arbiter will terminate.
WORKER_BOOT_ERROR = 3
# A flag indicating if an application failed to be loaded
APP_LOAD_ERROR = 4
START_CTX = {}
LISTENERS = []
WORKERS = {}
PIPE = []
# I love dynamic languages
SIG_QUEUE = []
SIGNALS = [getattr(signal, "SIG%s" % x) \
for x in "HUP QUIT INT TERM TTIN TTOU USR1 USR2 WINCH".split()]
SIG_NAMES = dict(
(getattr(signal, name), name[3:].lower()) for name in dir(signal)
if name[:3] == "SIG" and name[3] != "_"
)
def __init__(self, app):
os.environ["SERVER_SOFTWARE"] = SERVER_SOFTWARE
self._num_workers = None
self.setup(app)
self.pidfile = None
self.worker_age = 0
self.reexec_pid = 0
self.master_name = "Master"
cwd = util.getcwd()
args = sys.argv[:]
args.insert(0, sys.executable)
# init start context
self.START_CTX = {
"args": args,
"cwd": cwd,
0: sys.executable
}
def _get_num_workers(self):
return self._num_workers
def _set_num_workers(self, value):
old_value = self._num_workers
self._num_workers = value
self.cfg.nworkers_changed(self, value, old_value)
num_workers = property(_get_num_workers, _set_num_workers)
def setup(self, app):
self.app = app
self.cfg = app.cfg
self.log = self.cfg.logger_class(app.cfg)
# reopen files
if 'GUNICORN_FD' in os.environ:
self.log.reopen_files()
self.address = self.cfg.address
self.num_workers = self.cfg.workers
self.debug = self.cfg.debug
self.timeout = self.cfg.timeout
self.proc_name = self.cfg.proc_name
self.worker_class = self.cfg.worker_class
if self.cfg.debug:
self.log.debug("Current configuration:")
for config, value in sorted(self.cfg.settings.items(),
key=lambda setting: setting[1]):
self.log.debug(" %s: %s", config, value.value)
if self.cfg.preload_app:
if not self.cfg.debug:
self.app.wsgi()
else:
self.log.warning("debug mode: app isn't preloaded.")
def start(self):
"""\
Initialize the arbiter. Start listening and set pidfile if needed.
"""
self.log.info("Starting gunicorn %s", __version__)
self.pid = os.getpid()
if self.cfg.pidfile is not None:
self.pidfile = Pidfile(self.cfg.pidfile)
self.pidfile.create(self.pid)
self.cfg.on_starting(self)
# set enviroment' variables
if self.cfg.env:
for k, v in self.cfg.env.items():
os.environ[k] = v
self.init_signals()
if not self.LISTENERS:
self.LISTENERS = create_sockets(self.cfg, self.log)
listeners_str = ",".join([str(l) for l in self.LISTENERS])
self.log.debug("Arbiter booted")
self.log.info("Listening at: %s (%s)", listeners_str, self.pid)
self.log.info("Using worker: %s",
self.cfg.settings['worker_class'].get())
self.cfg.when_ready(self)
def init_signals(self):
"""\
Initialize master signal handling. Most of the signals
are queued. Child signals only wake up the master.
"""
# close old PIPE
if self.PIPE:
[os.close(p) for p in self.PIPE]
# initialize the pipe
self.PIPE = pair = os.pipe()
for p in pair:
util.set_non_blocking(p)
util.close_on_exec(p)
self.log.close_on_exec()
# initialize all signals
[signal.signal(s, self.signal) for s in self.SIGNALS]
signal.signal(signal.SIGCHLD, self.handle_chld)
def signal(self, sig, frame):
if len(self.SIG_QUEUE) < 5:
self.SIG_QUEUE.append(sig)
self.wakeup()
def run(self):
"Main master loop."
self.start()
util._setproctitle("master [%s]" % self.proc_name)
self.manage_workers()
while True:
try:
self.reap_workers()
sig = self.SIG_QUEUE.pop(0) if len(self.SIG_QUEUE) else None
if sig is None:
self.sleep()
self.murder_workers()
self.manage_workers()
continue
if sig not in self.SIG_NAMES:
self.log.info("Ignoring unknown signal: %s", sig)
continue
signame = self.SIG_NAMES.get(sig)
handler = getattr(self, "handle_%s" % signame, None)
if not handler:
self.log.error("Unhandled signal: %s", signame)
continue
self.log.info("Handling signal: %s", signame)
handler()
self.wakeup()
except StopIteration:
self.halt()
except KeyboardInterrupt:
self.halt()
except HaltServer as inst:
self.halt(reason=inst.reason, exit_status=inst.exit_status)
except SystemExit:
raise
except Exception:
self.log.info("Unhandled exception in main loop:\n%s",
traceback.format_exc())
self.stop(False)
if self.pidfile is not None:
self.pidfile.unlink()
sys.exit(-1)
def handle_chld(self, sig, frame):
"SIGCHLD handling"
self.wakeup()
def handle_hup(self):
"""\
HUP handling.
- Reload configuration
- Start the new worker processes with a new configuration
- Gracefully shutdown the old worker processes
"""
self.log.info("Hang up: %s", self.master_name)
self.reload()
def handle_quit(self):
"SIGQUIT handling"
raise StopIteration
def handle_int(self):
"SIGINT handling"
self.stop(False)
raise StopIteration
def handle_term(self):
"SIGTERM handling"
self.stop(False)
raise StopIteration
def handle_ttin(self):
"""\
SIGTTIN handling.
Increases the number of workers by one.
"""
self.num_workers += 1
self.manage_workers()
def handle_ttou(self):
"""\
SIGTTOU handling.
Decreases the number of workers by one.
"""
if self.num_workers <= 1:
return
self.num_workers -= 1
self.manage_workers()
def handle_usr1(self):
"""\
SIGUSR1 handling.
Kill all workers by sending them a SIGUSR1
"""
self.kill_workers(signal.SIGUSR1)
self.log.reopen_files()
def handle_usr2(self):
"""\
SIGUSR2 handling.
Creates a new master/worker set as a slave of the current
master without affecting old workers. Use this to do live
deployment with the ability to backout a change.
"""
self.reexec()
def handle_winch(self):
"SIGWINCH handling"
if self.cfg.daemon:
self.log.info("graceful stop of workers")
self.num_workers = 0
self.kill_workers(signal.SIGQUIT)
else:
self.log.info("SIGWINCH ignored. Not daemonized")
def wakeup(self):
"""\
Wake up the arbiter by writing to the PIPE
"""
try:
os.write(self.PIPE[1], b'.')
except IOError as e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise
def halt(self, reason=None, exit_status=0):
""" halt arbiter """
self.stop()
self.log.info("Shutting down: %s", self.master_name)
if reason is not None:
self.log.info("Reason: %s", reason)
if self.pidfile is not None:
self.pidfile.unlink()
sys.exit(exit_status)
def sleep(self):
"""\
Sleep until PIPE is readable or we timeout.
A readable PIPE means a signal occurred.
"""
try:
ready = select.select([self.PIPE[0]], [], [], 1.0)
if not ready[0]:
return
while os.read(self.PIPE[0], 1):
pass
except select.error as e:
if e.args[0] not in [errno.EAGAIN, errno.EINTR]:
raise
except OSError as e:
if e.errno not in [errno.EAGAIN, errno.EINTR]:
raise
except KeyboardInterrupt:
sys.exit()
def stop(self, graceful=True):
"""\
Stop workers
:attr graceful: boolean, If True (the default) workers will be
killed gracefully (ie. trying to wait for the current connection)
"""
self.LISTENERS = []
sig = signal.SIGQUIT
if not graceful:
sig = signal.SIGTERM
limit = time.time() + self.cfg.graceful_timeout
while self.WORKERS and time.time() < limit:
self.kill_workers(sig)
time.sleep(0.1)
self.reap_workers()
self.kill_workers(signal.SIGKILL)
def reexec(self):
"""\
Relaunch the master and workers.
"""
if self.pidfile is not None:
self.pidfile.rename("%s.oldbin" % self.pidfile.fname)
self.reexec_pid = os.fork()
if self.reexec_pid != 0:
self.master_name = "Old Master"
return
environ = self.cfg.env_orig.copy()
fds = [l.fileno() for l in self.LISTENERS]
environ['GUNICORN_FD'] = ",".join([str(fd) for fd in fds])
os.chdir(self.START_CTX['cwd'])
self.cfg.pre_exec(self)
# exec the process using the original environnement
os.execvpe(self.START_CTX[0], self.START_CTX['args'], environ)
def reload(self):
old_address = self.cfg.address
# reset old environement
for k in self.cfg.env:
if k in self.cfg.env_orig:
# reset the key to the value it had before
# we launched gunicorn
os.environ[k] = self.cfg.env_orig[k]
else:
# delete the value set by gunicorn
try:
del os.environ[k]
except KeyError:
pass
# reload conf
self.app.reload()
self.setup(self.app)
# reopen log files
self.log.reopen_files()
# do we need to change listener ?
if old_address != self.cfg.address:
# close all listeners
[l.close() for l in self.LISTENERS]
# init new listeners
self.LISTENERS = create_sockets(self.cfg, self.log)
self.log.info("Listening at: %s", ",".join(str(self.LISTENERS)))
# do some actions on reload
self.cfg.on_reload(self)
# unlink pidfile
if self.pidfile is not None:
self.pidfile.unlink()
# create new pidfile
if self.cfg.pidfile is not None:
self.pidfile = Pidfile(self.cfg.pidfile)
self.pidfile.create(self.pid)
# set new proc_name
util._setproctitle("master [%s]" % self.proc_name)
# spawn new workers
for i in range(self.cfg.workers):
self.spawn_worker()
# manage workers
self.manage_workers()
def murder_workers(self):
"""\
Kill unused/idle workers
"""
if not self.timeout:
return
for (pid, worker) in self.WORKERS.items():
try:
if time.time() - worker.tmp.last_update() <= self.timeout:
continue
except ValueError:
continue
self.log.critical("WORKER TIMEOUT (pid:%s)", pid)
self.kill_worker(pid, signal.SIGKILL)
def reap_workers(self):
"""\
Reap workers to avoid zombie processes
"""
try:
while True:
wpid, status = os.waitpid(-1, os.WNOHANG)
if not wpid:
break
if self.reexec_pid == wpid:
self.reexec_pid = 0
else:
# A worker said it cannot boot. We'll shutdown
# to avoid infinite start/stop cycles.
exitcode = status >> 8
if exitcode == self.WORKER_BOOT_ERROR:
reason = "Worker failed to boot."
raise HaltServer(reason, self.WORKER_BOOT_ERROR)
if exitcode == self.APP_LOAD_ERROR:
reason = "App failed to load."
raise HaltServer(reason, self.APP_LOAD_ERROR)
worker = self.WORKERS.pop(wpid, None)
if not worker:
continue
worker.tmp.close()
except OSError as e:
if e.errno == errno.ECHILD:
pass
def manage_workers(self):
"""\
Maintain the number of workers by spawning or killing
as required.
"""
if len(self.WORKERS.keys()) < self.num_workers:
self.spawn_workers()
workers = self.WORKERS.items()
workers = sorted(workers, key=lambda w: w[1].age)
while len(workers) > self.num_workers:
(pid, _) = workers.pop(0)
self.kill_worker(pid, signal.SIGQUIT)
def spawn_worker(self):
self.worker_age += 1
worker = self.worker_class(self.worker_age, self.pid, self.LISTENERS,
self.app, self.timeout / 2.0,
self.cfg, self.log)
self.cfg.pre_fork(self, worker)
pid = os.fork()
if pid != 0:
self.WORKERS[pid] = worker
return pid
# Process Child
worker_pid = os.getpid()
try:
util._setproctitle("worker [%s]" % self.proc_name)
self.log.info("Booting worker with pid: %s", worker_pid)
self.cfg.post_fork(self, worker)
worker.init_process()
sys.exit(0)
except SystemExit:
raise
except AppImportError as e:
self.log.debug("Exception while loading the application: \n%s",
traceback.format_exc())
sys.stderr.write("%s\n" % e)
sys.stderr.flush()
sys.exit(self.APP_LOAD_ERROR)
except:
self.log.exception("Exception in worker process:\n%s",
traceback.format_exc())
if not worker.booted:
sys.exit(self.WORKER_BOOT_ERROR)
sys.exit(-1)
finally:
self.log.info("Worker exiting (pid: %s)", worker_pid)
try:
worker.tmp.close()
self.cfg.worker_exit(self, worker)
except:
pass
def spawn_workers(self):
"""\
Spawn new workers as needed.
This is where a worker process leaves the main loop
of the master process.
"""
for i in range(self.num_workers - len(self.WORKERS.keys())):
self.spawn_worker()
time.sleep(0.1 * random.random())
def kill_workers(self, sig):
"""\
Kill all workers with the signal `sig`
:attr sig: `signal.SIG*` value
"""
for pid in self.WORKERS.keys():
self.kill_worker(pid, sig)
def kill_worker(self, pid, sig):
"""\
Kill a worker
:attr pid: int, worker pid
:attr sig: `signal.SIG*` value
"""
try:
os.kill(pid, sig)
except OSError as e:
if e.errno == errno.ESRCH:
try:
worker = self.WORKERS.pop(pid)
worker.tmp.close()
self.cfg.worker_exit(self, worker)
return
except (KeyError, OSError):
return
raise
| mit |
nikesh-mahalka/cinder | cinder/api/views/cgsnapshots.py | 23 | 2446 | # Copyright (C) 2012 - 2014 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from cinder.api import common
LOG = logging.getLogger(__name__)
class ViewBuilder(common.ViewBuilder):
"""Model cgsnapshot API responses as a python dictionary."""
_collection_name = "cgsnapshots"
def __init__(self):
"""Initialize view builder."""
super(ViewBuilder, self).__init__()
def summary_list(self, request, cgsnapshots):
"""Show a list of cgsnapshots without many details."""
return self._list_view(self.summary, request, cgsnapshots)
def detail_list(self, request, cgsnapshots):
"""Detailed view of a list of cgsnapshots ."""
return self._list_view(self.detail, request, cgsnapshots)
def summary(self, request, cgsnapshot):
"""Generic, non-detailed view of a cgsnapshot."""
return {
'cgsnapshot': {
'id': cgsnapshot['id'],
'name': cgsnapshot['name']
}
}
def detail(self, request, cgsnapshot):
"""Detailed view of a single cgsnapshot."""
return {
'cgsnapshot': {
'id': cgsnapshot.get('id'),
'consistencygroup_id': cgsnapshot.get('consistencygroup_id'),
'status': cgsnapshot.get('status'),
'created_at': cgsnapshot.get('created_at'),
'name': cgsnapshot.get('name'),
'description': cgsnapshot.get('description')
}
}
def _list_view(self, func, request, cgsnapshots):
"""Provide a view for a list of cgsnapshots."""
cgsnapshots_list = [func(request, cgsnapshot)['cgsnapshot']
for cgsnapshot in cgsnapshots]
cgsnapshots_dict = dict(cgsnapshots=cgsnapshots_list)
return cgsnapshots_dict
| apache-2.0 |
vivianli32/TravelConnect | flask/lib/python3.4/site-packages/openid/extension.py | 13 | 1703 | import warnings
from openid import message as message_module
class Extension(object):
"""An interface for OpenID extensions.
@ivar ns_uri: The namespace to which to add the arguments for this
extension
"""
ns_uri = None
ns_alias = None
def getExtensionArgs(self):
"""Get the string arguments that should be added to an OpenID
message for this extension.
@returns: A dictionary of completely non-namespaced arguments
to be added. For example, if the extension's alias is
'uncle', and this method returns {'meat':'Hot Rats'}, the
final message will contain {'openid.uncle.meat':'Hot Rats'}
"""
raise NotImplementedError()
def toMessage(self, message=None):
"""Add the arguments from this extension to the provided
message, or create a new message containing only those
arguments.
@returns: The message with the extension arguments added
"""
if message is None:
warnings.warn(
'Passing None to Extension.toMessage is deprecated. '
'Creating a message assuming you want OpenID 2.',
DeprecationWarning, stacklevel=2)
message = message_module.Message(message_module.OPENID2_NS)
implicit = message.isOpenID1()
try:
message.namespaces.addAlias(self.ns_uri, self.ns_alias,
implicit=implicit)
except KeyError:
if message.namespaces.getAlias(self.ns_uri) != self.ns_alias:
raise
message.updateArgs(self.ns_uri, self.getExtensionArgs())
return message
| mit |
the76thHunter/tmdbsimple | tmdbsimple/search.py | 8 | 6919 | # -*- coding: utf-8 -*-
"""
tmdbsimple.search
~~~~~~~~~~~~~~~~~
This module implements the Search functionality of tmdbsimple.
Created by Celia Oakley on 2013-10-31.
:copyright: (c) 2013-2014 by Celia Oakley
:license: GPLv3, see LICENSE for more details
"""
from .base import TMDB
class Search(TMDB):
"""
Search functionality
See: http://docs.themoviedb.apiary.io/#search
"""
BASE_PATH = 'search'
URLS = {
'movie': '/movie',
'collection': '/collection',
'tv': '/tv',
'person': '/person',
'list': '/list',
'company': '/company',
'keyword': '/keyword',
'multi': '/multi'
}
def movie(self, **kwargs):
"""
Search for movies by title.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
include_adult: (optional) Toggle the inclusion of adult titles.
Expected value is True or False.
year: (optional) Filter the results release dates to matches that
include this value.
primary_release_year: (optional) Filter the results so that only
the primary release dates have this value.
search_type: (optional) By default, the search type is 'phrase'.
This is almost guaranteed the option you will want.
It's a great all purpose search type and by far the
most tuned for every day querying. For those wanting
more of an "autocomplete" type search, set this
option to 'ngram'.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('movie')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def collection(self, **kwargs):
"""
Search for collections by name.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('collection')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def tv(self, **kwargs):
"""
Search for TV shows by title.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
first_air_date_year: (optional) Filter the results to only match
shows that have a air date with with value.
search_type: (optional) By default, the search type is 'phrase'.
This is almost guaranteed the option you will want.
It's a great all purpose search type and by far the
most tuned for every day querying. For those wanting
more of an "autocomplete" type search, set this
option to 'ngram'.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('tv')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def person(self, **kwargs):
"""
Search for people by name.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
include_adult: (optional) Toggle the inclusion of adult titles.
Expected value is True or False.
search_type: (optional) By default, the search type is 'phrase'.
This is almost guaranteed the option you will want.
It's a great all purpose search type and by far the
most tuned for every day querying. For those wanting
more of an "autocomplete" type search, set this
option to 'ngram'.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('person')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def list(self, **kwargs):
"""
Search for lists by name and description.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
include_adult: (optional) Toggle the inclusion of adult titles.
Expected value is True or False.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('list')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def company(self, **kwargs):
"""
Search for companies by name.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('company')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def keyword(self, **kwargs):
"""
Search for keywords by name.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('keyword')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response
def multi(self, **kwargs):
"""
Search the movie, tv show and person collections with a single query.
Args:
query: CGI escpaed string.
page: (optional) Minimum value of 1. Expected value is an integer.
language: (optional) ISO 639-1 code.
include_adult: (optional) Toggle the inclusion of adult titles.
Expected value is True or False.
Returns:
A dict respresentation of the JSON returned from the API.
"""
path = self._get_path('multi')
response = self._GET(path, kwargs)
self._set_attrs_to_values(response)
return response | gpl-3.0 |
gripped2/xbmc | addons/service.xbmc.versioncheck/lib/common.py | 82 | 7008 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2013 Team-XBMC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import xbmc
import xbmcaddon
import xbmcgui
import xbmcvfs
__addon__ = xbmcaddon.Addon()
__addonversion__ = __addon__.getAddonInfo('version')
__addonname__ = __addon__.getAddonInfo('name')
__addonpath__ = __addon__.getAddonInfo('path').decode('utf-8')
__addonprofile__ = xbmc.translatePath( __addon__.getAddonInfo('profile') ).decode('utf-8')
__icon__ = __addon__.getAddonInfo('icon')
# Fixes unicode problems
def string_unicode(text, encoding='utf-8'):
try:
text = unicode( text, encoding )
except:
pass
return text
def normalize_string(text):
try:
text = unicodedata.normalize('NFKD', string_unicode(text)).encode('ascii', 'ignore')
except:
pass
return text
def localise(id):
string = normalize_string(__addon__.getLocalizedString(id))
return string
def log(txt):
if isinstance (txt,str):
txt = txt.decode("utf-8")
message = u'%s: %s' % ("Version Check", txt)
xbmc.log(msg=message.encode("utf-8"), level=xbmc.LOGDEBUG)
def get_password_from_user():
keyboard = xbmc.Keyboard("", __addonname__ + "," +localise(32022), True)
keyboard.doModal()
if (keyboard.isConfirmed()):
pwd = keyboard.getText()
return pwd
def message_upgrade_success():
xbmc.executebuiltin("XBMC.Notification(%s, %s, %d, %s)" %(__addonname__,
localise(32013),
15000,
__icon__))
def message_restart():
if dialog_yesno(32014):
xbmc.executebuiltin("RestartApp")
def dialog_yesno(line1 = 0, line2 = 0):
return xbmcgui.Dialog().yesno(__addonname__,
localise(line1),
localise(line2))
def upgrade_message(msg, oldversion, upgrade, msg_current, msg_available):
# Don't show while watching a video
while(xbmc.Player().isPlayingVideo() and not xbmc.abortRequested):
xbmc.sleep(1000)
i = 0
while(i < 5 and not xbmc.abortRequested):
xbmc.sleep(1000)
i += 1
if __addon__.getSetting("lastnotified_version") < __addonversion__:
xbmcgui.Dialog().ok(__addonname__,
localise(msg),
localise(32001),
localise(32002))
#__addon__.setSetting("lastnotified_version", __addonversion__)
else:
log("Already notified one time for upgrading.")
def upgrade_message2( version_installed, version_available, version_stable, oldversion, upgrade,):
# shorten releasecandidate to rc
if version_installed['tag'] == 'releasecandidate':
version_installed['tag'] = 'rc'
if version_available['tag'] == 'releasecandidate':
version_available['tag'] = 'rc'
# convert json-rpc result to strings for usage
msg_current = '%i.%i %s%s' %(version_installed['major'],
version_installed['minor'],
version_installed['tag'],
version_installed.get('tagversion',''))
msg_available = version_available['major'] + '.' + version_available['minor'] + ' ' + version_available['tag'] + version_available.get('tagversion','')
msg_stable = version_stable['major'] + '.' + version_stable['minor'] + ' ' + version_stable['tag'] + version_stable.get('tagversion','')
msg = localise(32034) %(msg_current, msg_available)
# Don't show notify while watching a video
while(xbmc.Player().isPlayingVideo() and not xbmc.abortRequested):
xbmc.sleep(1000)
i = 0
while(i < 10 and not xbmc.abortRequested):
xbmc.sleep(1000)
i += 1
# hack: convert current version number to stable string
# so users don't get notified again. remove in future
if __addon__.getSetting("lastnotified_version") == '0.1.24':
__addon__.setSetting("lastnotified_stable", msg_stable)
# Show different dialogs depending if there's a newer stable available.
# Also split them between xbmc and kodi notifications to reduce possible confusion.
# People will find out once they visit the website.
# For stable only notify once and when there's a newer stable available.
# Ignore any add-on updates as those only count for != stable
if oldversion == 'stable' and __addon__.getSetting("lastnotified_stable") != msg_stable:
if xbmcaddon.Addon('xbmc.addon').getAddonInfo('version') < "13.9.0":
xbmcgui.Dialog().ok(__addonname__,
msg,
localise(32030),
localise(32031))
else:
xbmcgui.Dialog().ok(__addonname__,
msg,
localise(32032),
localise(32033))
__addon__.setSetting("lastnotified_stable", msg_stable)
elif oldversion != 'stable' and __addon__.getSetting("lastnotified_version") != msg_available:
if xbmcaddon.Addon('xbmc.addon').getAddonInfo('version') < "13.9.0":
# point them to xbmc.org
xbmcgui.Dialog().ok(__addonname__,
msg,
localise(32035),
localise(32031))
else:
#use kodi.tv
xbmcgui.Dialog().ok(__addonname__,
msg,
localise(32035),
localise(32033))
# older skins don't support a text field in the OK dialog.
# let's use split lines for now. see code above.
'''
msg = localise(32034) %(msg_current, msg_available)
if oldversion == 'stable':
msg = msg + ' ' + localise(32030)
else:
msg = msg + ' ' + localise(32035)
msg = msg + ' ' + localise(32031)
xbmcgui.Dialog().ok(__addonname__, msg)
#__addon__.setSetting("lastnotified_version", __addonversion__)
'''
__addon__.setSetting("lastnotified_version", msg_available)
else:
log("Already notified one time for upgrading.") | gpl-2.0 |
MQQiang/kbengine | kbe/res/scripts/common/Lib/site-packages/setuptools/command/easy_install.py | 206 | 72706 | #!/usr/bin/env python
"""
Easy Install
------------
A tool for doing automatic download/extract/build of distutils-based Python
packages. For detailed documentation, see the accompanying EasyInstall.txt
file, or visit the `EasyInstall home page`__.
__ https://pythonhosted.org/setuptools/easy_install.html
"""
import sys
import os
import zipimport
import shutil
import tempfile
import zipfile
import re
import stat
import random
import platform
import textwrap
import warnings
import site
import struct
from glob import glob
from distutils import log, dir_util
import pkg_resources
from setuptools import Command, _dont_write_bytecode
from setuptools.sandbox import run_setup
from setuptools.py31compat import get_path, get_config_vars
from distutils.util import get_platform
from distutils.util import convert_path, subst_vars
from distutils.errors import DistutilsArgError, DistutilsOptionError, \
DistutilsError, DistutilsPlatformError
from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS
from setuptools.command import setopt
from setuptools.archive_util import unpack_archive
from setuptools.package_index import PackageIndex
from setuptools.package_index import URL_SCHEME
from setuptools.command import bdist_egg, egg_info
from setuptools.compat import (iteritems, maxsize, basestring, unicode,
reraise)
from pkg_resources import (
yield_lines, normalize_path, resource_string, ensure_directory,
get_distribution, find_distributions, Environment, Requirement,
Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound,
VersionConflict, DEVELOP_DIST,
)
sys_executable = os.environ.get('__VENV_LAUNCHER__',
os.path.normpath(sys.executable))
__all__ = [
'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg',
'main', 'get_exe_prefixes',
]
def is_64bit():
return struct.calcsize("P") == 8
def samefile(p1, p2):
both_exist = os.path.exists(p1) and os.path.exists(p2)
use_samefile = hasattr(os.path, 'samefile') and both_exist
if use_samefile:
return os.path.samefile(p1, p2)
norm_p1 = os.path.normpath(os.path.normcase(p1))
norm_p2 = os.path.normpath(os.path.normcase(p2))
return norm_p1 == norm_p2
if sys.version_info <= (3,):
def _to_ascii(s):
return s
def isascii(s):
try:
unicode(s, 'ascii')
return True
except UnicodeError:
return False
else:
def _to_ascii(s):
return s.encode('ascii')
def isascii(s):
try:
s.encode('ascii')
return True
except UnicodeError:
return False
class easy_install(Command):
"""Manage a download/build/install process"""
description = "Find/get/install Python packages"
command_consumes_arguments = True
user_options = [
('prefix=', None, "installation prefix"),
("zip-ok", "z", "install package as a zipfile"),
("multi-version", "m", "make apps have to require() a version"),
("upgrade", "U", "force upgrade (searches PyPI for latest versions)"),
("install-dir=", "d", "install package to DIR"),
("script-dir=", "s", "install scripts to DIR"),
("exclude-scripts", "x", "Don't install scripts"),
("always-copy", "a", "Copy all needed packages to install dir"),
("index-url=", "i", "base URL of Python Package Index"),
("find-links=", "f", "additional URL(s) to search for packages"),
("build-directory=", "b",
"download/extract/build in DIR; keep the results"),
('optimize=', 'O',
"also compile with optimization: -O1 for \"python -O\", "
"-O2 for \"python -OO\", and -O0 to disable [default: -O0]"),
('record=', None,
"filename in which to record list of installed files"),
('always-unzip', 'Z', "don't install as a zipfile, no matter what"),
('site-dirs=','S',"list of directories where .pth files work"),
('editable', 'e', "Install specified packages in editable form"),
('no-deps', 'N', "don't install dependencies"),
('allow-hosts=', 'H', "pattern(s) that hostnames must match"),
('local-snapshots-ok', 'l',
"allow building eggs from local checkouts"),
('version', None, "print version information and exit"),
('no-find-links', None,
"Don't load find-links defined in packages being installed")
]
boolean_options = [
'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy',
'editable',
'no-deps', 'local-snapshots-ok', 'version'
]
if site.ENABLE_USER_SITE:
help_msg = "install in user site-package '%s'" % site.USER_SITE
user_options.append(('user', None, help_msg))
boolean_options.append('user')
negative_opt = {'always-unzip': 'zip-ok'}
create_index = PackageIndex
def initialize_options(self):
if site.ENABLE_USER_SITE:
whereami = os.path.abspath(__file__)
self.user = whereami.startswith(site.USER_SITE)
else:
self.user = 0
self.zip_ok = self.local_snapshots_ok = None
self.install_dir = self.script_dir = self.exclude_scripts = None
self.index_url = None
self.find_links = None
self.build_directory = None
self.args = None
self.optimize = self.record = None
self.upgrade = self.always_copy = self.multi_version = None
self.editable = self.no_deps = self.allow_hosts = None
self.root = self.prefix = self.no_report = None
self.version = None
self.install_purelib = None # for pure module distributions
self.install_platlib = None # non-pure (dists w/ extensions)
self.install_headers = None # for C/C++ headers
self.install_lib = None # set to either purelib or platlib
self.install_scripts = None
self.install_data = None
self.install_base = None
self.install_platbase = None
if site.ENABLE_USER_SITE:
self.install_userbase = site.USER_BASE
self.install_usersite = site.USER_SITE
else:
self.install_userbase = None
self.install_usersite = None
self.no_find_links = None
# Options not specifiable via command line
self.package_index = None
self.pth_file = self.always_copy_from = None
self.site_dirs = None
self.installed_projects = {}
self.sitepy_installed = False
# Always read easy_install options, even if we are subclassed, or have
# an independent instance created. This ensures that defaults will
# always come from the standard configuration file(s)' "easy_install"
# section, even if this is a "develop" or "install" command, or some
# other embedding.
self._dry_run = None
self.verbose = self.distribution.verbose
self.distribution._set_command_options(
self, self.distribution.get_option_dict('easy_install')
)
def delete_blockers(self, blockers):
for filename in blockers:
if os.path.exists(filename) or os.path.islink(filename):
log.info("Deleting %s", filename)
if not self.dry_run:
if os.path.isdir(filename) and not os.path.islink(filename):
rmtree(filename)
else:
os.unlink(filename)
def finalize_options(self):
if self.version:
print('setuptools %s' % get_distribution('setuptools').version)
sys.exit()
py_version = sys.version.split()[0]
prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix')
self.config_vars = {
'dist_name': self.distribution.get_name(),
'dist_version': self.distribution.get_version(),
'dist_fullname': self.distribution.get_fullname(),
'py_version': py_version,
'py_version_short': py_version[0:3],
'py_version_nodot': py_version[0] + py_version[2],
'sys_prefix': prefix,
'prefix': prefix,
'sys_exec_prefix': exec_prefix,
'exec_prefix': exec_prefix,
# Only python 3.2+ has abiflags
'abiflags': getattr(sys, 'abiflags', ''),
}
if site.ENABLE_USER_SITE:
self.config_vars['userbase'] = self.install_userbase
self.config_vars['usersite'] = self.install_usersite
# fix the install_dir if "--user" was used
#XXX: duplicate of the code in the setup command
if self.user and site.ENABLE_USER_SITE:
self.create_home_path()
if self.install_userbase is None:
raise DistutilsPlatformError(
"User base directory is not specified")
self.install_base = self.install_platbase = self.install_userbase
if os.name == 'posix':
self.select_scheme("unix_user")
else:
self.select_scheme(os.name + "_user")
self.expand_basedirs()
self.expand_dirs()
self._expand('install_dir','script_dir','build_directory','site_dirs')
# If a non-default installation directory was specified, default the
# script directory to match it.
if self.script_dir is None:
self.script_dir = self.install_dir
if self.no_find_links is None:
self.no_find_links = False
# Let install_dir get set by install_lib command, which in turn
# gets its info from the install command, and takes into account
# --prefix and --home and all that other crud.
self.set_undefined_options('install_lib',
('install_dir','install_dir')
)
# Likewise, set default script_dir from 'install_scripts.install_dir'
self.set_undefined_options('install_scripts',
('install_dir', 'script_dir')
)
if self.user and self.install_purelib:
self.install_dir = self.install_purelib
self.script_dir = self.install_scripts
# default --record from the install command
self.set_undefined_options('install', ('record', 'record'))
# Should this be moved to the if statement below? It's not used
# elsewhere
normpath = map(normalize_path, sys.path)
self.all_site_dirs = get_site_dirs()
if self.site_dirs is not None:
site_dirs = [
os.path.expanduser(s.strip()) for s in self.site_dirs.split(',')
]
for d in site_dirs:
if not os.path.isdir(d):
log.warn("%s (in --site-dirs) does not exist", d)
elif normalize_path(d) not in normpath:
raise DistutilsOptionError(
d+" (in --site-dirs) is not on sys.path"
)
else:
self.all_site_dirs.append(normalize_path(d))
if not self.editable: self.check_site_dir()
self.index_url = self.index_url or "https://pypi.python.org/simple"
self.shadow_path = self.all_site_dirs[:]
for path_item in self.install_dir, normalize_path(self.script_dir):
if path_item not in self.shadow_path:
self.shadow_path.insert(0, path_item)
if self.allow_hosts is not None:
hosts = [s.strip() for s in self.allow_hosts.split(',')]
else:
hosts = ['*']
if self.package_index is None:
self.package_index = self.create_index(
self.index_url, search_path = self.shadow_path, hosts=hosts,
)
self.local_index = Environment(self.shadow_path+sys.path)
if self.find_links is not None:
if isinstance(self.find_links, basestring):
self.find_links = self.find_links.split()
else:
self.find_links = []
if self.local_snapshots_ok:
self.package_index.scan_egg_links(self.shadow_path+sys.path)
if not self.no_find_links:
self.package_index.add_find_links(self.find_links)
self.set_undefined_options('install_lib', ('optimize','optimize'))
if not isinstance(self.optimize,int):
try:
self.optimize = int(self.optimize)
if not (0 <= self.optimize <= 2): raise ValueError
except ValueError:
raise DistutilsOptionError("--optimize must be 0, 1, or 2")
if self.editable and not self.build_directory:
raise DistutilsArgError(
"Must specify a build directory (-b) when using --editable"
)
if not self.args:
raise DistutilsArgError(
"No urls, filenames, or requirements specified (see --help)")
self.outputs = []
def _expand_attrs(self, attrs):
for attr in attrs:
val = getattr(self, attr)
if val is not None:
if os.name == 'posix' or os.name == 'nt':
val = os.path.expanduser(val)
val = subst_vars(val, self.config_vars)
setattr(self, attr, val)
def expand_basedirs(self):
"""Calls `os.path.expanduser` on install_base, install_platbase and
root."""
self._expand_attrs(['install_base', 'install_platbase', 'root'])
def expand_dirs(self):
"""Calls `os.path.expanduser` on install dirs."""
self._expand_attrs(['install_purelib', 'install_platlib',
'install_lib', 'install_headers',
'install_scripts', 'install_data',])
def run(self):
if self.verbose != self.distribution.verbose:
log.set_verbosity(self.verbose)
try:
for spec in self.args:
self.easy_install(spec, not self.no_deps)
if self.record:
outputs = self.outputs
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
from distutils import file_util
self.execute(
file_util.write_file, (self.record, outputs),
"writing list of installed files to '%s'" %
self.record
)
self.warn_deprecated_options()
finally:
log.set_verbosity(self.distribution.verbose)
def pseudo_tempname(self):
"""Return a pseudo-tempname base in the install directory.
This code is intentionally naive; if a malicious party can write to
the target directory you're already in deep doodoo.
"""
try:
pid = os.getpid()
except:
pid = random.randint(0, maxsize)
return os.path.join(self.install_dir, "test-easy-install-%s" % pid)
def warn_deprecated_options(self):
pass
def check_site_dir(self):
"""Verify that self.install_dir is .pth-capable dir, if needed"""
instdir = normalize_path(self.install_dir)
pth_file = os.path.join(instdir,'easy-install.pth')
# Is it a configured, PYTHONPATH, implicit, or explicit site dir?
is_site_dir = instdir in self.all_site_dirs
if not is_site_dir and not self.multi_version:
# No? Then directly test whether it does .pth file processing
is_site_dir = self.check_pth_processing()
else:
# make sure we can write to target dir
testfile = self.pseudo_tempname()+'.write-test'
test_exists = os.path.exists(testfile)
try:
if test_exists: os.unlink(testfile)
open(testfile,'w').close()
os.unlink(testfile)
except (OSError,IOError):
self.cant_write_to_target()
if not is_site_dir and not self.multi_version:
# Can't install non-multi to non-site dir
raise DistutilsError(self.no_default_version_msg())
if is_site_dir:
if self.pth_file is None:
self.pth_file = PthDistributions(pth_file, self.all_site_dirs)
else:
self.pth_file = None
PYTHONPATH = os.environ.get('PYTHONPATH','').split(os.pathsep)
if instdir not in map(normalize_path, [_f for _f in PYTHONPATH if _f]):
# only PYTHONPATH dirs need a site.py, so pretend it's there
self.sitepy_installed = True
elif self.multi_version and not os.path.exists(pth_file):
self.sitepy_installed = True # don't need site.py in this case
self.pth_file = None # and don't create a .pth file
self.install_dir = instdir
def cant_write_to_target(self):
template = """can't create or remove files in install directory
The following error occurred while trying to add or remove files in the
installation directory:
%s
The installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
"""
msg = template % (sys.exc_info()[1], self.install_dir,)
if not os.path.exists(self.install_dir):
msg += """
This directory does not currently exist. Please create it and try again, or
choose a different installation directory (using the -d or --install-dir
option).
"""
else:
msg += """
Perhaps your account does not have write access to this directory? If the
installation directory is a system-owned directory, you may need to sign in
as the administrator or "root" account. If you do not have administrative
access to this machine, you may wish to choose a different installation
directory, preferably one that is listed in your PYTHONPATH environment
variable.
For information on other options, you may wish to consult the
documentation at:
https://pythonhosted.org/setuptools/easy_install.html
Please make the appropriate changes for your system and try again.
"""
raise DistutilsError(msg)
def check_pth_processing(self):
"""Empirically verify whether .pth files are supported in inst. dir"""
instdir = self.install_dir
log.info("Checking .pth file support in %s", instdir)
pth_file = self.pseudo_tempname()+".pth"
ok_file = pth_file+'.ok'
ok_exists = os.path.exists(ok_file)
try:
if ok_exists: os.unlink(ok_file)
dirname = os.path.dirname(ok_file)
if not os.path.exists(dirname):
os.makedirs(dirname)
f = open(pth_file,'w')
except (OSError,IOError):
self.cant_write_to_target()
else:
try:
f.write("import os; f = open(%r, 'w'); f.write('OK'); f.close()\n" % (ok_file,))
f.close()
f=None
executable = sys.executable
if os.name=='nt':
dirname,basename = os.path.split(executable)
alt = os.path.join(dirname,'pythonw.exe')
if basename.lower()=='python.exe' and os.path.exists(alt):
# use pythonw.exe to avoid opening a console window
executable = alt
from distutils.spawn import spawn
spawn([executable,'-E','-c','pass'],0)
if os.path.exists(ok_file):
log.info(
"TEST PASSED: %s appears to support .pth files",
instdir
)
return True
finally:
if f:
f.close()
if os.path.exists(ok_file):
os.unlink(ok_file)
if os.path.exists(pth_file):
os.unlink(pth_file)
if not self.multi_version:
log.warn("TEST FAILED: %s does NOT support .pth files", instdir)
return False
def install_egg_scripts(self, dist):
"""Write all the scripts for `dist`, unless scripts are excluded"""
if not self.exclude_scripts and dist.metadata_isdir('scripts'):
for script_name in dist.metadata_listdir('scripts'):
if dist.metadata_isdir('scripts/' + script_name):
# The "script" is a directory, likely a Python 3
# __pycache__ directory, so skip it.
continue
self.install_script(
dist, script_name,
dist.get_metadata('scripts/'+script_name)
)
self.install_wrapper_scripts(dist)
def add_output(self, path):
if os.path.isdir(path):
for base, dirs, files in os.walk(path):
for filename in files:
self.outputs.append(os.path.join(base,filename))
else:
self.outputs.append(path)
def not_editable(self, spec):
if self.editable:
raise DistutilsArgError(
"Invalid argument %r: you can't use filenames or URLs "
"with --editable (except via the --find-links option)."
% (spec,)
)
def check_editable(self,spec):
if not self.editable:
return
if os.path.exists(os.path.join(self.build_directory, spec.key)):
raise DistutilsArgError(
"%r already exists in %s; can't do a checkout there" %
(spec.key, self.build_directory)
)
def easy_install(self, spec, deps=False):
tmpdir = tempfile.mkdtemp(prefix="easy_install-")
download = None
if not self.editable: self.install_site_py()
try:
if not isinstance(spec,Requirement):
if URL_SCHEME(spec):
# It's a url, download it to tmpdir and process
self.not_editable(spec)
download = self.package_index.download(spec, tmpdir)
return self.install_item(None, download, tmpdir, deps, True)
elif os.path.exists(spec):
# Existing file or directory, just process it directly
self.not_editable(spec)
return self.install_item(None, spec, tmpdir, deps, True)
else:
spec = parse_requirement_arg(spec)
self.check_editable(spec)
dist = self.package_index.fetch_distribution(
spec, tmpdir, self.upgrade, self.editable, not self.always_copy,
self.local_index
)
if dist is None:
msg = "Could not find suitable distribution for %r" % spec
if self.always_copy:
msg+=" (--always-copy skips system and development eggs)"
raise DistutilsError(msg)
elif dist.precedence==DEVELOP_DIST:
# .egg-info dists don't need installing, just process deps
self.process_distribution(spec, dist, deps, "Using")
return dist
else:
return self.install_item(spec, dist.location, tmpdir, deps)
finally:
if os.path.exists(tmpdir):
rmtree(tmpdir)
def install_item(self, spec, download, tmpdir, deps, install_needed=False):
# Installation is also needed if file in tmpdir or is not an egg
install_needed = install_needed or self.always_copy
install_needed = install_needed or os.path.dirname(download) == tmpdir
install_needed = install_needed or not download.endswith('.egg')
install_needed = install_needed or (
self.always_copy_from is not None and
os.path.dirname(normalize_path(download)) ==
normalize_path(self.always_copy_from)
)
if spec and not install_needed:
# at this point, we know it's a local .egg, we just don't know if
# it's already installed.
for dist in self.local_index[spec.project_name]:
if dist.location==download:
break
else:
install_needed = True # it's not in the local index
log.info("Processing %s", os.path.basename(download))
if install_needed:
dists = self.install_eggs(spec, download, tmpdir)
for dist in dists:
self.process_distribution(spec, dist, deps)
else:
dists = [self.egg_distribution(download)]
self.process_distribution(spec, dists[0], deps, "Using")
if spec is not None:
for dist in dists:
if dist in spec:
return dist
def select_scheme(self, name):
"""Sets the install directories by applying the install schemes."""
# it's the caller's problem if they supply a bad name!
scheme = INSTALL_SCHEMES[name]
for key in SCHEME_KEYS:
attrname = 'install_' + key
if getattr(self, attrname) is None:
setattr(self, attrname, scheme[key])
def process_distribution(self, requirement, dist, deps=True, *info):
self.update_pth(dist)
self.package_index.add(dist)
self.local_index.add(dist)
self.install_egg_scripts(dist)
self.installed_projects[dist.key] = dist
log.info(self.installation_report(requirement, dist, *info))
if (dist.has_metadata('dependency_links.txt') and
not self.no_find_links):
self.package_index.add_find_links(
dist.get_metadata_lines('dependency_links.txt')
)
if not deps and not self.always_copy:
return
elif requirement is not None and dist.key != requirement.key:
log.warn("Skipping dependencies for %s", dist)
return # XXX this is not the distribution we were looking for
elif requirement is None or dist not in requirement:
# if we wound up with a different version, resolve what we've got
distreq = dist.as_requirement()
requirement = requirement or distreq
requirement = Requirement(
distreq.project_name, distreq.specs, requirement.extras
)
log.info("Processing dependencies for %s", requirement)
try:
distros = WorkingSet([]).resolve(
[requirement], self.local_index, self.easy_install
)
except DistributionNotFound:
e = sys.exc_info()[1]
raise DistutilsError(
"Could not find required distribution %s" % e.args
)
except VersionConflict:
e = sys.exc_info()[1]
raise DistutilsError(
"Installed distribution %s conflicts with requirement %s"
% e.args
)
if self.always_copy or self.always_copy_from:
# Force all the relevant distros to be copied or activated
for dist in distros:
if dist.key not in self.installed_projects:
self.easy_install(dist.as_requirement())
log.info("Finished processing dependencies for %s", requirement)
def should_unzip(self, dist):
if self.zip_ok is not None:
return not self.zip_ok
if dist.has_metadata('not-zip-safe'):
return True
if not dist.has_metadata('zip-safe'):
return True
return False
def maybe_move(self, spec, dist_filename, setup_base):
dst = os.path.join(self.build_directory, spec.key)
if os.path.exists(dst):
msg = "%r already exists in %s; build directory %s will not be kept"
log.warn(msg, spec.key, self.build_directory, setup_base)
return setup_base
if os.path.isdir(dist_filename):
setup_base = dist_filename
else:
if os.path.dirname(dist_filename)==setup_base:
os.unlink(dist_filename) # get it out of the tmp dir
contents = os.listdir(setup_base)
if len(contents)==1:
dist_filename = os.path.join(setup_base,contents[0])
if os.path.isdir(dist_filename):
# if the only thing there is a directory, move it instead
setup_base = dist_filename
ensure_directory(dst)
shutil.move(setup_base, dst)
return dst
def install_wrapper_scripts(self, dist):
if not self.exclude_scripts:
for args in get_script_args(dist):
self.write_script(*args)
def install_script(self, dist, script_name, script_text, dev_path=None):
"""Generate a legacy script wrapper and install it"""
spec = str(dist.as_requirement())
is_script = is_python_script(script_text, script_name)
def get_template(filename):
"""
There are a couple of template scripts in the package. This
function loads one of them and prepares it for use.
These templates use triple-quotes to escape variable
substitutions so the scripts get the 2to3 treatment when build
on Python 3. The templates cannot use triple-quotes naturally.
"""
raw_bytes = resource_string('setuptools', template_name)
template_str = raw_bytes.decode('utf-8')
clean_template = template_str.replace('"""', '')
return clean_template
if is_script:
template_name = 'script template.py'
if dev_path:
template_name = template_name.replace('.py', ' (dev).py')
script_text = (get_script_header(script_text) +
get_template(template_name) % locals())
self.write_script(script_name, _to_ascii(script_text), 'b')
def write_script(self, script_name, contents, mode="t", blockers=()):
"""Write an executable file to the scripts directory"""
self.delete_blockers( # clean up old .py/.pyw w/o a script
[os.path.join(self.script_dir,x) for x in blockers])
log.info("Installing %s script to %s", script_name, self.script_dir)
target = os.path.join(self.script_dir, script_name)
self.add_output(target)
mask = current_umask()
if not self.dry_run:
ensure_directory(target)
if os.path.exists(target):
os.unlink(target)
f = open(target,"w"+mode)
f.write(contents)
f.close()
chmod(target, 0x1FF-mask) # 0777
def install_eggs(self, spec, dist_filename, tmpdir):
# .egg dirs or files are already built, so just return them
if dist_filename.lower().endswith('.egg'):
return [self.install_egg(dist_filename, tmpdir)]
elif dist_filename.lower().endswith('.exe'):
return [self.install_exe(dist_filename, tmpdir)]
# Anything else, try to extract and build
setup_base = tmpdir
if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'):
unpack_archive(dist_filename, tmpdir, self.unpack_progress)
elif os.path.isdir(dist_filename):
setup_base = os.path.abspath(dist_filename)
if (setup_base.startswith(tmpdir) # something we downloaded
and self.build_directory and spec is not None):
setup_base = self.maybe_move(spec, dist_filename, setup_base)
# Find the setup.py file
setup_script = os.path.join(setup_base, 'setup.py')
if not os.path.exists(setup_script):
setups = glob(os.path.join(setup_base, '*', 'setup.py'))
if not setups:
raise DistutilsError(
"Couldn't find a setup script in %s" % os.path.abspath(dist_filename)
)
if len(setups)>1:
raise DistutilsError(
"Multiple setup scripts in %s" % os.path.abspath(dist_filename)
)
setup_script = setups[0]
# Now run it, and return the result
if self.editable:
log.info(self.report_editable(spec, setup_script))
return []
else:
return self.build_and_install(setup_script, setup_base)
def egg_distribution(self, egg_path):
if os.path.isdir(egg_path):
metadata = PathMetadata(egg_path,os.path.join(egg_path,'EGG-INFO'))
else:
metadata = EggMetadata(zipimport.zipimporter(egg_path))
return Distribution.from_filename(egg_path,metadata=metadata)
def install_egg(self, egg_path, tmpdir):
destination = os.path.join(self.install_dir,os.path.basename(egg_path))
destination = os.path.abspath(destination)
if not self.dry_run:
ensure_directory(destination)
dist = self.egg_distribution(egg_path)
if not samefile(egg_path, destination):
if os.path.isdir(destination) and not os.path.islink(destination):
dir_util.remove_tree(destination, dry_run=self.dry_run)
elif os.path.exists(destination):
self.execute(os.unlink,(destination,),"Removing "+destination)
uncache_zipdir(destination)
if os.path.isdir(egg_path):
if egg_path.startswith(tmpdir):
f,m = shutil.move, "Moving"
else:
f,m = shutil.copytree, "Copying"
elif self.should_unzip(dist):
self.mkpath(destination)
f,m = self.unpack_and_compile, "Extracting"
elif egg_path.startswith(tmpdir):
f,m = shutil.move, "Moving"
else:
f,m = shutil.copy2, "Copying"
self.execute(f, (egg_path, destination),
(m+" %s to %s") %
(os.path.basename(egg_path),os.path.dirname(destination)))
self.add_output(destination)
return self.egg_distribution(destination)
def install_exe(self, dist_filename, tmpdir):
# See if it's valid, get data
cfg = extract_wininst_cfg(dist_filename)
if cfg is None:
raise DistutilsError(
"%s is not a valid distutils Windows .exe" % dist_filename
)
# Create a dummy distribution object until we build the real distro
dist = Distribution(
None,
project_name=cfg.get('metadata','name'),
version=cfg.get('metadata','version'), platform=get_platform(),
)
# Convert the .exe to an unpacked egg
egg_path = dist.location = os.path.join(tmpdir, dist.egg_name()+'.egg')
egg_tmp = egg_path + '.tmp'
_egg_info = os.path.join(egg_tmp, 'EGG-INFO')
pkg_inf = os.path.join(_egg_info, 'PKG-INFO')
ensure_directory(pkg_inf) # make sure EGG-INFO dir exists
dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX
self.exe_to_egg(dist_filename, egg_tmp)
# Write EGG-INFO/PKG-INFO
if not os.path.exists(pkg_inf):
f = open(pkg_inf,'w')
f.write('Metadata-Version: 1.0\n')
for k,v in cfg.items('metadata'):
if k != 'target_version':
f.write('%s: %s\n' % (k.replace('_','-').title(), v))
f.close()
script_dir = os.path.join(_egg_info,'scripts')
self.delete_blockers( # delete entry-point scripts to avoid duping
[os.path.join(script_dir,args[0]) for args in get_script_args(dist)]
)
# Build .egg file from tmpdir
bdist_egg.make_zipfile(
egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run
)
# install the .egg
return self.install_egg(egg_path, tmpdir)
def exe_to_egg(self, dist_filename, egg_tmp):
"""Extract a bdist_wininst to the directories an egg would use"""
# Check for .pth file and set up prefix translations
prefixes = get_exe_prefixes(dist_filename)
to_compile = []
native_libs = []
top_level = {}
def process(src,dst):
s = src.lower()
for old,new in prefixes:
if s.startswith(old):
src = new+src[len(old):]
parts = src.split('/')
dst = os.path.join(egg_tmp, *parts)
dl = dst.lower()
if dl.endswith('.pyd') or dl.endswith('.dll'):
parts[-1] = bdist_egg.strip_module(parts[-1])
top_level[os.path.splitext(parts[0])[0]] = 1
native_libs.append(src)
elif dl.endswith('.py') and old!='SCRIPTS/':
top_level[os.path.splitext(parts[0])[0]] = 1
to_compile.append(dst)
return dst
if not src.endswith('.pth'):
log.warn("WARNING: can't process %s", src)
return None
# extract, tracking .pyd/.dll->native_libs and .py -> to_compile
unpack_archive(dist_filename, egg_tmp, process)
stubs = []
for res in native_libs:
if res.lower().endswith('.pyd'): # create stubs for .pyd's
parts = res.split('/')
resource = parts[-1]
parts[-1] = bdist_egg.strip_module(parts[-1])+'.py'
pyfile = os.path.join(egg_tmp, *parts)
to_compile.append(pyfile)
stubs.append(pyfile)
bdist_egg.write_stub(resource, pyfile)
self.byte_compile(to_compile) # compile .py's
bdist_egg.write_safety_flag(os.path.join(egg_tmp,'EGG-INFO'),
bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag
for name in 'top_level','native_libs':
if locals()[name]:
txt = os.path.join(egg_tmp, 'EGG-INFO', name+'.txt')
if not os.path.exists(txt):
f = open(txt,'w')
f.write('\n'.join(locals()[name])+'\n')
f.close()
def installation_report(self, req, dist, what="Installed"):
"""Helpful installation message for display to package users"""
msg = "\n%(what)s %(eggloc)s%(extras)s"
if self.multi_version and not self.no_report:
msg += """
Because this distribution was installed --multi-version, before you can
import modules from this package in an application, you will need to
'import pkg_resources' and then use a 'require()' call similar to one of
these examples, in order to select the desired version:
pkg_resources.require("%(name)s") # latest installed version
pkg_resources.require("%(name)s==%(version)s") # this exact version
pkg_resources.require("%(name)s>=%(version)s") # this version or higher
"""
if self.install_dir not in map(normalize_path,sys.path):
msg += """
Note also that the installation directory must be on sys.path at runtime for
this to work. (e.g. by being the application's script directory, by being on
PYTHONPATH, or by being added to sys.path by your code.)
"""
eggloc = dist.location
name = dist.project_name
version = dist.version
extras = '' # TODO: self.report_extras(req, dist)
return msg % locals()
def report_editable(self, spec, setup_script):
dirname = os.path.dirname(setup_script)
python = sys.executable
return """\nExtracted editable version of %(spec)s to %(dirname)s
If it uses setuptools in its setup script, you can activate it in
"development" mode by going to that directory and running::
%(python)s setup.py develop
See the setuptools documentation for the "develop" command for more info.
""" % locals()
def run_setup(self, setup_script, setup_base, args):
sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg)
sys.modules.setdefault('distutils.command.egg_info', egg_info)
args = list(args)
if self.verbose>2:
v = 'v' * (self.verbose - 1)
args.insert(0,'-'+v)
elif self.verbose<2:
args.insert(0,'-q')
if self.dry_run:
args.insert(0,'-n')
log.info(
"Running %s %s", setup_script[len(setup_base)+1:], ' '.join(args)
)
try:
run_setup(setup_script, args)
except SystemExit:
v = sys.exc_info()[1]
raise DistutilsError("Setup script exited with %s" % (v.args[0],))
def build_and_install(self, setup_script, setup_base):
args = ['bdist_egg', '--dist-dir']
dist_dir = tempfile.mkdtemp(
prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script)
)
try:
self._set_fetcher_options(os.path.dirname(setup_script))
args.append(dist_dir)
self.run_setup(setup_script, setup_base, args)
all_eggs = Environment([dist_dir])
eggs = []
for key in all_eggs:
for dist in all_eggs[key]:
eggs.append(self.install_egg(dist.location, setup_base))
if not eggs and not self.dry_run:
log.warn("No eggs found in %s (setup script problem?)",
dist_dir)
return eggs
finally:
rmtree(dist_dir)
log.set_verbosity(self.verbose) # restore our log verbosity
def _set_fetcher_options(self, base):
"""
When easy_install is about to run bdist_egg on a source dist, that
source dist might have 'setup_requires' directives, requiring
additional fetching. Ensure the fetcher options given to easy_install
are available to that command as well.
"""
# find the fetch options from easy_install and write them out
# to the setup.cfg file.
ei_opts = self.distribution.get_option_dict('easy_install').copy()
fetch_directives = (
'find_links', 'site_dirs', 'index_url', 'optimize',
'site_dirs', 'allow_hosts',
)
fetch_options = {}
for key, val in ei_opts.items():
if key not in fetch_directives: continue
fetch_options[key.replace('_', '-')] = val[1]
# create a settings dictionary suitable for `edit_config`
settings = dict(easy_install=fetch_options)
cfg_filename = os.path.join(base, 'setup.cfg')
setopt.edit_config(cfg_filename, settings)
def update_pth(self, dist):
if self.pth_file is None:
return
for d in self.pth_file[dist.key]: # drop old entries
if self.multi_version or d.location != dist.location:
log.info("Removing %s from easy-install.pth file", d)
self.pth_file.remove(d)
if d.location in self.shadow_path:
self.shadow_path.remove(d.location)
if not self.multi_version:
if dist.location in self.pth_file.paths:
log.info(
"%s is already the active version in easy-install.pth",
dist
)
else:
log.info("Adding %s to easy-install.pth file", dist)
self.pth_file.add(dist) # add new entry
if dist.location not in self.shadow_path:
self.shadow_path.append(dist.location)
if not self.dry_run:
self.pth_file.save()
if dist.key=='setuptools':
# Ensure that setuptools itself never becomes unavailable!
# XXX should this check for latest version?
filename = os.path.join(self.install_dir,'setuptools.pth')
if os.path.islink(filename): os.unlink(filename)
f = open(filename, 'wt')
f.write(self.pth_file.make_relative(dist.location)+'\n')
f.close()
def unpack_progress(self, src, dst):
# Progress filter for unpacking
log.debug("Unpacking %s to %s", src, dst)
return dst # only unpack-and-compile skips files for dry run
def unpack_and_compile(self, egg_path, destination):
to_compile = []
to_chmod = []
def pf(src, dst):
if dst.endswith('.py') and not src.startswith('EGG-INFO/'):
to_compile.append(dst)
elif dst.endswith('.dll') or dst.endswith('.so'):
to_chmod.append(dst)
self.unpack_progress(src,dst)
return not self.dry_run and dst or None
unpack_archive(egg_path, destination, pf)
self.byte_compile(to_compile)
if not self.dry_run:
for f in to_chmod:
mode = ((os.stat(f)[stat.ST_MODE]) | 0x16D) & 0xFED # 0555, 07755
chmod(f, mode)
def byte_compile(self, to_compile):
if _dont_write_bytecode:
self.warn('byte-compiling is disabled, skipping.')
return
from distutils.util import byte_compile
try:
# try to make the byte compile messages quieter
log.set_verbosity(self.verbose - 1)
byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run)
if self.optimize:
byte_compile(
to_compile, optimize=self.optimize, force=1,
dry_run=self.dry_run
)
finally:
log.set_verbosity(self.verbose) # restore original verbosity
def no_default_version_msg(self):
template = """bad install directory or PYTHONPATH
You are attempting to install a package to a directory that is not
on PYTHONPATH and which Python does not read ".pth" files from. The
installation directory you specified (via --install-dir, --prefix, or
the distutils default setting) was:
%s
and your PYTHONPATH environment variable currently contains:
%r
Here are some of your options for correcting the problem:
* You can choose a different installation directory, i.e., one that is
on PYTHONPATH or supports .pth files
* You can add the installation directory to the PYTHONPATH environment
variable. (It must then also be on PYTHONPATH whenever you run
Python and want to use the package(s) you are installing.)
* You can set up the installation directory to support ".pth" files by
using one of the approaches described here:
https://pythonhosted.org/setuptools/easy_install.html#custom-installation-locations
Please make the appropriate changes for your system and try again."""
return template % (self.install_dir, os.environ.get('PYTHONPATH',''))
def install_site_py(self):
"""Make sure there's a site.py in the target dir, if needed"""
if self.sitepy_installed:
return # already did it, or don't need to
sitepy = os.path.join(self.install_dir, "site.py")
source = resource_string("setuptools", "site-patch.py")
current = ""
if os.path.exists(sitepy):
log.debug("Checking existing site.py in %s", self.install_dir)
f = open(sitepy,'rb')
current = f.read()
# we want str, not bytes
if sys.version_info >= (3,):
current = current.decode()
f.close()
if not current.startswith('def __boot():'):
raise DistutilsError(
"%s is not a setuptools-generated site.py; please"
" remove it." % sitepy
)
if current != source:
log.info("Creating %s", sitepy)
if not self.dry_run:
ensure_directory(sitepy)
f = open(sitepy,'wb')
f.write(source)
f.close()
self.byte_compile([sitepy])
self.sitepy_installed = True
def create_home_path(self):
"""Create directories under ~."""
if not self.user:
return
home = convert_path(os.path.expanduser("~"))
for name, path in iteritems(self.config_vars):
if path.startswith(home) and not os.path.isdir(path):
self.debug_print("os.makedirs('%s', 0700)" % path)
os.makedirs(path, 0x1C0) # 0700
INSTALL_SCHEMES = dict(
posix = dict(
install_dir = '$base/lib/python$py_version_short/site-packages',
script_dir = '$base/bin',
),
)
DEFAULT_SCHEME = dict(
install_dir = '$base/Lib/site-packages',
script_dir = '$base/Scripts',
)
def _expand(self, *attrs):
config_vars = self.get_finalized_command('install').config_vars
if self.prefix:
# Set default install_dir/scripts from --prefix
config_vars = config_vars.copy()
config_vars['base'] = self.prefix
scheme = self.INSTALL_SCHEMES.get(os.name,self.DEFAULT_SCHEME)
for attr,val in scheme.items():
if getattr(self,attr,None) is None:
setattr(self,attr,val)
from distutils.util import subst_vars
for attr in attrs:
val = getattr(self, attr)
if val is not None:
val = subst_vars(val, config_vars)
if os.name == 'posix':
val = os.path.expanduser(val)
setattr(self, attr, val)
def get_site_dirs():
# return a list of 'site' dirs
sitedirs = [_f for _f in os.environ.get('PYTHONPATH',
'').split(os.pathsep) if _f]
prefixes = [sys.prefix]
if sys.exec_prefix != sys.prefix:
prefixes.append(sys.exec_prefix)
for prefix in prefixes:
if prefix:
if sys.platform in ('os2emx', 'riscos'):
sitedirs.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitedirs.extend([os.path.join(prefix,
"lib",
"python" + sys.version[:3],
"site-packages"),
os.path.join(prefix, "lib", "site-python")])
else:
sitedirs.extend(
[prefix, os.path.join(prefix, "lib", "site-packages")]
)
if sys.platform == 'darwin':
# for framework builds *only* we add the standard Apple
# locations. Currently only per-user, but /Library and
# /Network/Library could be added too
if 'Python.framework' in prefix:
home = os.environ.get('HOME')
if home:
sitedirs.append(
os.path.join(home,
'Library',
'Python',
sys.version[:3],
'site-packages'))
lib_paths = get_path('purelib'), get_path('platlib')
for site_lib in lib_paths:
if site_lib not in sitedirs: sitedirs.append(site_lib)
if site.ENABLE_USER_SITE:
sitedirs.append(site.USER_SITE)
sitedirs = list(map(normalize_path, sitedirs))
return sitedirs
def expand_paths(inputs):
"""Yield sys.path directories that might contain "old-style" packages"""
seen = {}
for dirname in inputs:
dirname = normalize_path(dirname)
if dirname in seen:
continue
seen[dirname] = 1
if not os.path.isdir(dirname):
continue
files = os.listdir(dirname)
yield dirname, files
for name in files:
if not name.endswith('.pth'):
# We only care about the .pth files
continue
if name in ('easy-install.pth','setuptools.pth'):
# Ignore .pth files that we control
continue
# Read the .pth file
f = open(os.path.join(dirname,name))
lines = list(yield_lines(f))
f.close()
# Yield existing non-dupe, non-import directory lines from it
for line in lines:
if not line.startswith("import"):
line = normalize_path(line.rstrip())
if line not in seen:
seen[line] = 1
if not os.path.isdir(line):
continue
yield line, os.listdir(line)
def extract_wininst_cfg(dist_filename):
"""Extract configuration data from a bdist_wininst .exe
Returns a ConfigParser.RawConfigParser, or None
"""
f = open(dist_filename,'rb')
try:
endrec = zipfile._EndRecData(f)
if endrec is None:
return None
prepended = (endrec[9] - endrec[5]) - endrec[6]
if prepended < 12: # no wininst data here
return None
f.seek(prepended-12)
from setuptools.compat import StringIO, ConfigParser
import struct
tag, cfglen, bmlen = struct.unpack("<iii",f.read(12))
if tag not in (0x1234567A, 0x1234567B):
return None # not a valid tag
f.seek(prepended-(12+cfglen))
cfg = ConfigParser.RawConfigParser({'version':'','target_version':''})
try:
part = f.read(cfglen)
# part is in bytes, but we need to read up to the first null
# byte.
if sys.version_info >= (2,6):
null_byte = bytes([0])
else:
null_byte = chr(0)
config = part.split(null_byte, 1)[0]
# Now the config is in bytes, but for RawConfigParser, it should
# be text, so decode it.
config = config.decode(sys.getfilesystemencoding())
cfg.readfp(StringIO(config))
except ConfigParser.Error:
return None
if not cfg.has_section('metadata') or not cfg.has_section('Setup'):
return None
return cfg
finally:
f.close()
def get_exe_prefixes(exe_filename):
"""Get exe->egg path translations for a given .exe file"""
prefixes = [
('PURELIB/', ''), ('PLATLIB/pywin32_system32', ''),
('PLATLIB/', ''),
('SCRIPTS/', 'EGG-INFO/scripts/'),
('DATA/lib/site-packages', ''),
]
z = zipfile.ZipFile(exe_filename)
try:
for info in z.infolist():
name = info.filename
parts = name.split('/')
if len(parts)==3 and parts[2]=='PKG-INFO':
if parts[1].endswith('.egg-info'):
prefixes.insert(0,('/'.join(parts[:2]), 'EGG-INFO/'))
break
if len(parts) != 2 or not name.endswith('.pth'):
continue
if name.endswith('-nspkg.pth'):
continue
if parts[0].upper() in ('PURELIB','PLATLIB'):
contents = z.read(name)
if sys.version_info >= (3,):
contents = contents.decode()
for pth in yield_lines(contents):
pth = pth.strip().replace('\\','/')
if not pth.startswith('import'):
prefixes.append((('%s/%s/' % (parts[0],pth)), ''))
finally:
z.close()
prefixes = [(x.lower(),y) for x, y in prefixes]
prefixes.sort()
prefixes.reverse()
return prefixes
def parse_requirement_arg(spec):
try:
return Requirement.parse(spec)
except ValueError:
raise DistutilsError(
"Not a URL, existing file, or requirement spec: %r" % (spec,)
)
class PthDistributions(Environment):
"""A .pth file with Distribution paths in it"""
dirty = False
def __init__(self, filename, sitedirs=()):
self.filename = filename
self.sitedirs = list(map(normalize_path, sitedirs))
self.basedir = normalize_path(os.path.dirname(self.filename))
self._load()
Environment.__init__(self, [], None, None)
for path in yield_lines(self.paths):
list(map(self.add, find_distributions(path, True)))
def _load(self):
self.paths = []
saw_import = False
seen = dict.fromkeys(self.sitedirs)
if os.path.isfile(self.filename):
f = open(self.filename,'rt')
for line in f:
if line.startswith('import'):
saw_import = True
continue
path = line.rstrip()
self.paths.append(path)
if not path.strip() or path.strip().startswith('#'):
continue
# skip non-existent paths, in case somebody deleted a package
# manually, and duplicate paths as well
path = self.paths[-1] = normalize_path(
os.path.join(self.basedir,path)
)
if not os.path.exists(path) or path in seen:
self.paths.pop() # skip it
self.dirty = True # we cleaned up, so we're dirty now :)
continue
seen[path] = 1
f.close()
if self.paths and not saw_import:
self.dirty = True # ensure anything we touch has import wrappers
while self.paths and not self.paths[-1].strip():
self.paths.pop()
def save(self):
"""Write changed .pth file back to disk"""
if not self.dirty:
return
data = '\n'.join(map(self.make_relative,self.paths))
if data:
log.debug("Saving %s", self.filename)
data = (
"import sys; sys.__plen = len(sys.path)\n"
"%s\n"
"import sys; new=sys.path[sys.__plen:];"
" del sys.path[sys.__plen:];"
" p=getattr(sys,'__egginsert',0); sys.path[p:p]=new;"
" sys.__egginsert = p+len(new)\n"
) % data
if os.path.islink(self.filename):
os.unlink(self.filename)
f = open(self.filename,'wt')
f.write(data)
f.close()
elif os.path.exists(self.filename):
log.debug("Deleting empty %s", self.filename)
os.unlink(self.filename)
self.dirty = False
def add(self, dist):
"""Add `dist` to the distribution map"""
if (dist.location not in self.paths and (
dist.location not in self.sitedirs or
dist.location == os.getcwd() # account for '.' being in PYTHONPATH
)):
self.paths.append(dist.location)
self.dirty = True
Environment.add(self, dist)
def remove(self, dist):
"""Remove `dist` from the distribution map"""
while dist.location in self.paths:
self.paths.remove(dist.location)
self.dirty = True
Environment.remove(self, dist)
def make_relative(self,path):
npath, last = os.path.split(normalize_path(path))
baselen = len(self.basedir)
parts = [last]
sep = os.altsep=='/' and '/' or os.sep
while len(npath)>=baselen:
if npath==self.basedir:
parts.append(os.curdir)
parts.reverse()
return sep.join(parts)
npath, last = os.path.split(npath)
parts.append(last)
else:
return path
def get_script_header(script_text, executable=sys_executable, wininst=False):
"""Create a #! line, getting options (if any) from script_text"""
from distutils.command.build_scripts import first_line_re
# first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern.
if not isinstance(first_line_re.pattern, str):
first_line_re = re.compile(first_line_re.pattern.decode())
first = (script_text+'\n').splitlines()[0]
match = first_line_re.match(first)
options = ''
if match:
options = match.group(1) or ''
if options: options = ' '+options
if wininst:
executable = "python.exe"
else:
executable = nt_quote_arg(executable)
hdr = "#!%(executable)s%(options)s\n" % locals()
if not isascii(hdr):
# Non-ascii path to sys.executable, use -x to prevent warnings
if options:
if options.strip().startswith('-'):
options = ' -x'+options.strip()[1:]
# else: punt, we can't do it, let the warning happen anyway
else:
options = ' -x'
executable = fix_jython_executable(executable, options)
hdr = "#!%(executable)s%(options)s\n" % locals()
return hdr
def auto_chmod(func, arg, exc):
if func is os.remove and os.name=='nt':
chmod(arg, stat.S_IWRITE)
return func(arg)
et, ev, _ = sys.exc_info()
reraise(et, (ev[0], ev[1] + (" %s %s" % (func,arg))))
def uncache_zipdir(path):
"""Ensure that the importer caches dont have stale info for `path`"""
from zipimport import _zip_directory_cache as zdc
_uncache(path, zdc)
_uncache(path, sys.path_importer_cache)
def _uncache(path, cache):
if path in cache:
del cache[path]
else:
path = normalize_path(path)
for p in cache:
if normalize_path(p)==path:
del cache[p]
return
def is_python(text, filename='<string>'):
"Is this string a valid Python script?"
try:
compile(text, filename, 'exec')
except (SyntaxError, TypeError):
return False
else:
return True
def is_sh(executable):
"""Determine if the specified executable is a .sh (contains a #! line)"""
try:
fp = open(executable)
magic = fp.read(2)
fp.close()
except (OSError,IOError): return executable
return magic == '#!'
def nt_quote_arg(arg):
"""Quote a command line argument according to Windows parsing rules"""
result = []
needquote = False
nb = 0
needquote = (" " in arg) or ("\t" in arg)
if needquote:
result.append('"')
for c in arg:
if c == '\\':
nb += 1
elif c == '"':
# double preceding backslashes, then add a \"
result.append('\\' * (nb*2) + '\\"')
nb = 0
else:
if nb:
result.append('\\' * nb)
nb = 0
result.append(c)
if nb:
result.append('\\' * nb)
if needquote:
result.append('\\' * nb) # double the trailing backslashes
result.append('"')
return ''.join(result)
def is_python_script(script_text, filename):
"""Is this text, as a whole, a Python script? (as opposed to shell/bat/etc.
"""
if filename.endswith('.py') or filename.endswith('.pyw'):
return True # extension says it's Python
if is_python(script_text, filename):
return True # it's syntactically valid Python
if script_text.startswith('#!'):
# It begins with a '#!' line, so check if 'python' is in it somewhere
return 'python' in script_text.splitlines()[0].lower()
return False # Not any Python I can recognize
try:
from os import chmod as _chmod
except ImportError:
# Jython compatibility
def _chmod(*args): pass
def chmod(path, mode):
log.debug("changing mode of %s to %o", path, mode)
try:
_chmod(path, mode)
except os.error:
e = sys.exc_info()[1]
log.debug("chmod failed: %s", e)
def fix_jython_executable(executable, options):
if sys.platform.startswith('java') and is_sh(executable):
# Workaround for Jython is not needed on Linux systems.
import java
if java.lang.System.getProperty("os.name") == "Linux":
return executable
# Workaround Jython's sys.executable being a .sh (an invalid
# shebang line interpreter)
if options:
# Can't apply the workaround, leave it broken
log.warn(
"WARNING: Unable to adapt shebang line for Jython,"
" the following script is NOT executable\n"
" see http://bugs.jython.org/issue1112 for"
" more information.")
else:
return '/usr/bin/env %s' % executable
return executable
class ScriptWriter(object):
"""
Encapsulates behavior around writing entry point scripts for console and
gui apps.
"""
template = textwrap.dedent("""
# EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r
__requires__ = %(spec)r
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.exit(
load_entry_point(%(spec)r, %(group)r, %(name)r)()
)
""").lstrip()
@classmethod
def get_script_args(cls, dist, executable=sys_executable, wininst=False):
"""
Yield write_script() argument tuples for a distribution's entrypoints
"""
gen_class = cls.get_writer(wininst)
spec = str(dist.as_requirement())
header = get_script_header("", executable, wininst)
for type_ in 'console', 'gui':
group = type_ + '_scripts'
for name, ep in dist.get_entry_map(group).items():
script_text = gen_class.template % locals()
for res in gen_class._get_script_args(type_, name, header,
script_text):
yield res
@classmethod
def get_writer(cls, force_windows):
if force_windows or sys.platform=='win32':
return WindowsScriptWriter.get_writer()
return cls
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
# Simply write the stub with no extension.
yield (name, header+script_text)
class WindowsScriptWriter(ScriptWriter):
@classmethod
def get_writer(cls):
"""
Get a script writer suitable for Windows
"""
writer_lookup = dict(
executable=WindowsExecutableLauncherWriter,
natural=cls,
)
# for compatibility, use the executable launcher by default
launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable')
return writer_lookup[launcher]
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"For Windows, add a .py extension"
ext = dict(console='.pya', gui='.pyw')[type_]
if ext not in os.environ['PATHEXT'].lower().split(';'):
warnings.warn("%s not listed in PATHEXT; scripts will not be "
"recognized as executables." % ext, UserWarning)
old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe']
old.remove(ext)
header = cls._adjust_header(type_, header)
blockers = [name+x for x in old]
yield name+ext, header+script_text, 't', blockers
@staticmethod
def _adjust_header(type_, orig_header):
"""
Make sure 'pythonw' is used for gui and and 'python' is used for
console (regardless of what sys.executable is).
"""
pattern = 'pythonw.exe'
repl = 'python.exe'
if type_ == 'gui':
pattern, repl = repl, pattern
pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE)
new_header = pattern_ob.sub(string=orig_header, repl=repl)
clean_header = new_header[2:-1].strip('"')
if sys.platform == 'win32' and not os.path.exists(clean_header):
# the adjusted version doesn't exist, so return the original
return orig_header
return new_header
class WindowsExecutableLauncherWriter(WindowsScriptWriter):
@classmethod
def _get_script_args(cls, type_, name, header, script_text):
"""
For Windows, add a .py extension and an .exe launcher
"""
if type_=='gui':
launcher_type = 'gui'
ext = '-script.pyw'
old = ['.pyw']
else:
launcher_type = 'cli'
ext = '-script.py'
old = ['.py','.pyc','.pyo']
hdr = cls._adjust_header(type_, header)
blockers = [name+x for x in old]
yield (name+ext, hdr+script_text, 't', blockers)
yield (
name+'.exe', get_win_launcher(launcher_type),
'b' # write in binary mode
)
if not is_64bit():
# install a manifest for the launcher to prevent Windows
# from detecting it as an installer (which it will for
# launchers like easy_install.exe). Consider only
# adding a manifest for launchers detected as installers.
# See Distribute #143 for details.
m_name = name + '.exe.manifest'
yield (m_name, load_launcher_manifest(name), 't')
# for backward-compatibility
get_script_args = ScriptWriter.get_script_args
def get_win_launcher(type):
"""
Load the Windows launcher (executable) suitable for launching a script.
`type` should be either 'cli' or 'gui'
Returns the executable as a byte string.
"""
launcher_fn = '%s.exe' % type
if platform.machine().lower()=='arm':
launcher_fn = launcher_fn.replace(".", "-arm.")
if is_64bit():
launcher_fn = launcher_fn.replace(".", "-64.")
else:
launcher_fn = launcher_fn.replace(".", "-32.")
return resource_string('setuptools', launcher_fn)
def load_launcher_manifest(name):
manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml')
if sys.version_info[0] < 3:
return manifest % vars()
else:
return manifest.decode('utf-8') % vars()
def rmtree(path, ignore_errors=False, onerror=auto_chmod):
"""Recursively delete a directory tree.
This code is taken from the Python 2.4 version of 'shutil', because
the 2.3 version doesn't really work right.
"""
if ignore_errors:
def onerror(*args):
pass
elif onerror is None:
def onerror(*args):
raise
names = []
try:
names = os.listdir(path)
except os.error:
onerror(os.listdir, path, sys.exc_info())
for name in names:
fullname = os.path.join(path, name)
try:
mode = os.lstat(fullname).st_mode
except os.error:
mode = 0
if stat.S_ISDIR(mode):
rmtree(fullname, ignore_errors, onerror)
else:
try:
os.remove(fullname)
except os.error:
onerror(os.remove, fullname, sys.exc_info())
try:
os.rmdir(path)
except os.error:
onerror(os.rmdir, path, sys.exc_info())
def current_umask():
tmp = os.umask(0x12) # 022
os.umask(tmp)
return tmp
def bootstrap():
# This function is called when setuptools*.egg is run using /bin/sh
import setuptools
argv0 = os.path.dirname(setuptools.__path__[0])
sys.argv[0] = argv0
sys.argv.append(argv0)
main()
def main(argv=None, **kw):
from setuptools import setup
from setuptools.dist import Distribution
import distutils.core
USAGE = """\
usage: %(script)s [options] requirement_or_url ...
or: %(script)s --help
"""
def gen_usage(script_name):
return USAGE % dict(
script=os.path.basename(script_name),
)
def with_ei_usage(f):
old_gen_usage = distutils.core.gen_usage
try:
distutils.core.gen_usage = gen_usage
return f()
finally:
distutils.core.gen_usage = old_gen_usage
class DistributionWithoutHelpCommands(Distribution):
common_usage = ""
def _show_help(self,*args,**kw):
with_ei_usage(lambda: Distribution._show_help(self,*args,**kw))
if argv is None:
argv = sys.argv[1:]
with_ei_usage(lambda:
setup(
script_args = ['-q','easy_install', '-v']+argv,
script_name = sys.argv[0] or 'easy_install',
distclass=DistributionWithoutHelpCommands, **kw
)
)
| lgpl-3.0 |
jquacinella/IS602_Project | web/gluon/rocket.py | 19 | 58903 | # -*- coding: utf-8 -*-
# This file is part of the Rocket Web Server
# Copyright (c) 2011 Timothy Farrell
# Modified by Massimo Di Pierro
# Import System Modules
import sys
import errno
import socket
import logging
import platform
# Define Constants
VERSION = '1.2.6'
SERVER_NAME = socket.gethostname()
SERVER_SOFTWARE = 'Rocket %s' % VERSION
HTTP_SERVER_SOFTWARE = '%s Python/%s' % (
SERVER_SOFTWARE, sys.version.split(' ')[0])
BUF_SIZE = 16384
SOCKET_TIMEOUT = 10 # in secs
THREAD_STOP_CHECK_INTERVAL = 1 # in secs, How often should threads check for a server stop message?
IS_JYTHON = platform.system() == 'Java' # Handle special cases for Jython
IGNORE_ERRORS_ON_CLOSE = set([errno.ECONNABORTED, errno.ECONNRESET])
DEFAULT_LISTEN_QUEUE_SIZE = 5
DEFAULT_MIN_THREADS = 10
DEFAULT_MAX_THREADS = 0
DEFAULTS = dict(LISTEN_QUEUE_SIZE=DEFAULT_LISTEN_QUEUE_SIZE,
MIN_THREADS=DEFAULT_MIN_THREADS,
MAX_THREADS=DEFAULT_MAX_THREADS)
PY3K = sys.version_info[0] > 2
class NullHandler(logging.Handler):
"A Logging handler to prevent library errors."
def emit(self, record):
pass
if PY3K:
def b(val):
""" Convert string/unicode/bytes literals into bytes. This allows for
the same code to run on Python 2.x and 3.x. """
if isinstance(val, str):
return val.encode()
else:
return val
def u(val, encoding="us-ascii"):
""" Convert bytes into string/unicode. This allows for the
same code to run on Python 2.x and 3.x. """
if isinstance(val, bytes):
return val.decode(encoding)
else:
return val
else:
def b(val):
""" Convert string/unicode/bytes literals into bytes. This allows for
the same code to run on Python 2.x and 3.x. """
if isinstance(val, unicode):
return val.encode()
else:
return val
def u(val, encoding="us-ascii"):
""" Convert bytes into string/unicode. This allows for the
same code to run on Python 2.x and 3.x. """
if isinstance(val, str):
return val.decode(encoding)
else:
return val
# Import Package Modules
# package imports removed in monolithic build
__all__ = ['VERSION', 'SERVER_SOFTWARE', 'HTTP_SERVER_SOFTWARE', 'BUF_SIZE',
'IS_JYTHON', 'IGNORE_ERRORS_ON_CLOSE', 'DEFAULTS', 'PY3K', 'b', 'u',
'Rocket', 'CherryPyWSGIServer', 'SERVER_NAME', 'NullHandler']
# Monolithic build...end of module: rocket/__init__.py
# Monolithic build...start of module: rocket/connection.py
# Import System Modules
import sys
import time
import socket
try:
import ssl
has_ssl = True
except ImportError:
has_ssl = False
# Import Package Modules
# package imports removed in monolithic build
# TODO - This part is still very experimental.
#from .filelike import FileLikeSocket
class Connection(object):
__slots__ = [
'setblocking',
'sendall',
'shutdown',
'makefile',
'fileno',
'client_addr',
'client_port',
'server_port',
'socket',
'start_time',
'ssl',
'secure',
'recv',
'send',
'read',
'write'
]
def __init__(self, sock_tuple, port, secure=False):
self.client_addr, self.client_port = sock_tuple[1][:2]
self.server_port = port
self.socket = sock_tuple[0]
self.start_time = time.time()
self.ssl = has_ssl and isinstance(self.socket, ssl.SSLSocket)
self.secure = secure
if IS_JYTHON:
# In Jython we must set TCP_NODELAY here since it does not
# inherit from the listening socket.
# See: http://bugs.jython.org/issue1309
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.socket.settimeout(SOCKET_TIMEOUT)
self.shutdown = self.socket.shutdown
self.fileno = self.socket.fileno
self.setblocking = self.socket.setblocking
self.recv = self.socket.recv
self.send = self.socket.send
self.makefile = self.socket.makefile
if sys.platform == 'darwin':
self.sendall = self._sendall_darwin
else:
self.sendall = self.socket.sendall
def _sendall_darwin(self, buf):
pending = len(buf)
offset = 0
while pending:
try:
sent = self.socket.send(buf[offset:])
pending -= sent
offset += sent
except socket.error:
import errno
info = sys.exc_info()
if info[1].args[0] != errno.EAGAIN:
raise
return offset
# FIXME - this is not ready for prime-time yet.
# def makefile(self, buf_size=BUF_SIZE):
# return FileLikeSocket(self, buf_size)
def close(self):
if hasattr(self.socket, '_sock'):
try:
self.socket._sock.close()
except socket.error:
info = sys.exc_info()
if info[1].args[0] != socket.EBADF:
raise info[1]
else:
pass
self.socket.close()
# Monolithic build...end of module: rocket/connection.py
# Monolithic build...start of module: rocket/filelike.py
# Import System Modules
import socket
try:
from io import StringIO
except ImportError:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# Import Package Modules
# package imports removed in monolithic build
class FileLikeSocket(object):
def __init__(self, conn, buf_size=BUF_SIZE):
self.conn = conn
self.buf_size = buf_size
self.buffer = StringIO()
self.content_length = None
if self.conn.socket.gettimeout() == 0.0:
self.read = self.non_blocking_read
else:
self.read = self.blocking_read
def __iter__(self):
return self
def recv(self, size):
while True:
try:
return self.conn.recv(size)
except socket.error:
exc = sys.exc_info()
e = exc[1]
# FIXME - Don't raise socket_errors_nonblocking or socket_error_eintr
if (e.args[0] not in set()):
raise
def next(self):
data = self.readline()
if data == '':
raise StopIteration
return data
def non_blocking_read(self, size=None):
# Shamelessly adapted from Cherrypy!
bufr = self.buffer
bufr.seek(0, 2)
if size is None:
while True:
data = self.recv(self.buf_size)
if not data:
break
bufr.write(data)
self.buffer = StringIO()
return bufr.getvalue()
else:
buf_len = self.buffer.tell()
if buf_len >= size:
bufr.seek(0)
data = bufr.read(size)
self.buffer = StringIO(bufr.read())
return data
self.buffer = StringIO()
while True:
remaining = size - buf_len
data = self.recv(remaining)
if not data:
break
n = len(data)
if n == size and not buf_len:
return data
if n == remaining:
bufr.write(data)
del data
break
bufr.write(data)
buf_len += n
del data
return bufr.getvalue()
def blocking_read(self, length=None):
if length is None:
if self.content_length is not None:
length = self.content_length
else:
length = 1
try:
data = self.conn.recv(length)
except:
data = b('')
return data
def readline(self):
data = b("")
char = self.read(1)
while char != b('\n') and char is not b(''):
line = repr(char)
data += char
char = self.read(1)
data += char
return data
def readlines(self, hint="ignored"):
return list(self)
def close(self):
self.conn = None
self.content_length = None
# Monolithic build...end of module: rocket/filelike.py
# Monolithic build...start of module: rocket/futures.py
# Import System Modules
import time
try:
from concurrent.futures import Future, ThreadPoolExecutor
from concurrent.futures.thread import _WorkItem
has_futures = True
except ImportError:
has_futures = False
class Future:
pass
class ThreadPoolExecutor:
pass
class _WorkItem:
pass
class WSGIFuture(Future):
def __init__(self, f_dict, *args, **kwargs):
Future.__init__(self, *args, **kwargs)
self.timeout = None
self._mem_dict = f_dict
self._lifespan = 30
self._name = None
self._start_time = time.time()
def set_running_or_notify_cancel(self):
if time.time() - self._start_time >= self._lifespan:
self.cancel()
else:
return super(WSGIFuture, self).set_running_or_notify_cancel()
def remember(self, name, lifespan=None):
self._lifespan = lifespan or self._lifespan
if name in self._mem_dict:
raise NameError('Cannot remember future by name "%s". ' % name +
'A future already exists with that name.')
self._name = name
self._mem_dict[name] = self
return self
def forget(self):
if self._name in self._mem_dict and self._mem_dict[self._name] is self:
del self._mem_dict[self._name]
self._name = None
class _WorkItem(object):
def __init__(self, future, fn, args, kwargs):
self.future = future
self.fn = fn
self.args = args
self.kwargs = kwargs
def run(self):
if not self.future.set_running_or_notify_cancel():
return
try:
result = self.fn(*self.args, **self.kwargs)
except BaseException:
e = sys.exc_info()[1]
self.future.set_exception(e)
else:
self.future.set_result(result)
class WSGIExecutor(ThreadPoolExecutor):
multithread = True
multiprocess = False
def __init__(self, *args, **kwargs):
ThreadPoolExecutor.__init__(self, *args, **kwargs)
self.futures = dict()
def submit(self, fn, *args, **kwargs):
if self._shutdown_lock.acquire():
if self._shutdown:
self._shutdown_lock.release()
raise RuntimeError(
'Cannot schedule new futures after shutdown')
f = WSGIFuture(self.futures)
w = _WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
self._adjust_thread_count()
self._shutdown_lock.release()
return f
else:
return False
class FuturesMiddleware(object):
"Futures middleware that adds a Futures Executor to the environment"
def __init__(self, app, threads=5):
self.app = app
self.executor = WSGIExecutor(threads)
def __call__(self, environ, start_response):
environ["wsgiorg.executor"] = self.executor
environ["wsgiorg.futures"] = self.executor.futures
return self.app(environ, start_response)
# Monolithic build...end of module: rocket/futures.py
# Monolithic build...start of module: rocket/listener.py
# Import System Modules
import os
import socket
import logging
import traceback
from threading import Thread
try:
import ssl
from ssl import SSLError
has_ssl = True
except ImportError:
has_ssl = False
class SSLError(socket.error):
pass
# Import Package Modules
# package imports removed in monolithic build
class Listener(Thread):
"""The Listener class is a class responsible for accepting connections
and queuing them to be processed by a worker thread."""
def __init__(self, interface, queue_size, active_queue, *args, **kwargs):
Thread.__init__(self, *args, **kwargs)
# Instance variables
self.active_queue = active_queue
self.interface = interface
self.addr = interface[0]
self.port = interface[1]
self.secure = len(interface) >= 4
self.clientcert_req = (len(interface) == 5 and interface[4])
self.thread = None
self.ready = False
# Error Log
self.err_log = logging.getLogger('Rocket.Errors.Port%i' % self.port)
self.err_log.addHandler(NullHandler())
# Build the socket
if ':' in self.addr:
listener = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
else:
listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if not listener:
self.err_log.error("Failed to get socket.")
return
if self.secure:
if not has_ssl:
self.err_log.error("ssl module required to serve HTTPS.")
return
elif not os.path.exists(interface[2]):
data = (interface[2], interface[0], interface[1])
self.err_log.error("Cannot find key file "
"'%s'. Cannot bind to %s:%s" % data)
return
elif not os.path.exists(interface[3]):
data = (interface[3], interface[0], interface[1])
self.err_log.error("Cannot find certificate file "
"'%s'. Cannot bind to %s:%s" % data)
return
if self.clientcert_req and not os.path.exists(interface[4]):
data = (interface[4], interface[0], interface[1])
self.err_log.error("Cannot find root ca certificate file "
"'%s'. Cannot bind to %s:%s" % data)
return
# Set socket options
try:
listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except:
msg = "Cannot share socket. Using %s:%i exclusively."
self.err_log.warning(msg % (self.addr, self.port))
try:
if not IS_JYTHON:
listener.setsockopt(socket.IPPROTO_TCP,
socket.TCP_NODELAY,
1)
except:
msg = "Cannot set TCP_NODELAY, things might run a little slower"
self.err_log.warning(msg)
try:
listener.bind((self.addr, self.port))
except:
msg = "Socket %s:%i in use by other process and it won't share."
self.err_log.error(msg % (self.addr, self.port))
else:
# We want socket operations to timeout periodically so we can
# check if the server is shutting down
listener.settimeout(THREAD_STOP_CHECK_INTERVAL)
# Listen for new connections allowing queue_size number of
# connections to wait before rejecting a connection.
listener.listen(queue_size)
self.listener = listener
self.ready = True
def wrap_socket(self, sock):
try:
if self.clientcert_req:
ca_certs = self.interface[4]
cert_reqs = ssl.CERT_OPTIONAL
sock = ssl.wrap_socket(sock,
keyfile=self.interface[2],
certfile=self.interface[3],
server_side=True,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
ssl_version=ssl.PROTOCOL_SSLv23)
else:
sock = ssl.wrap_socket(sock,
keyfile=self.interface[2],
certfile=self.interface[3],
server_side=True,
ssl_version=ssl.PROTOCOL_SSLv23)
except SSLError:
# Generally this happens when an HTTP request is received on a
# secure socket. We don't do anything because it will be detected
# by Worker and dealt with appropriately.
pass
return sock
def start(self):
if not self.ready:
self.err_log.warning('Listener started when not ready.')
return
if self.thread is not None and self.thread.isAlive():
self.err_log.warning('Listener already running.')
return
self.thread = Thread(target=self.listen, name="Port" + str(self.port))
self.thread.start()
def isAlive(self):
if self.thread is None:
return False
return self.thread.isAlive()
def join(self):
if self.thread is None:
return
self.ready = False
self.thread.join()
del self.thread
self.thread = None
self.ready = True
def listen(self):
if __debug__:
self.err_log.debug('Entering main loop.')
while True:
try:
sock, addr = self.listener.accept()
if self.secure:
sock = self.wrap_socket(sock)
self.active_queue.put(((sock, addr),
self.interface[1],
self.secure))
except socket.timeout:
# socket.timeout will be raised every
# THREAD_STOP_CHECK_INTERVAL seconds. When that happens,
# we check if it's time to die.
if not self.ready:
if __debug__:
self.err_log.debug('Listener exiting.')
return
else:
continue
except:
self.err_log.error(traceback.format_exc())
# Monolithic build...end of module: rocket/listener.py
# Monolithic build...start of module: rocket/main.py
# Import System Modules
import sys
import time
import socket
import logging
import traceback
from threading import Lock
try:
from queue import Queue
except ImportError:
from Queue import Queue
# Import Package Modules
# package imports removed in monolithic build
# Setup Logging
log = logging.getLogger('Rocket')
log.addHandler(NullHandler())
class Rocket(object):
"""The Rocket class is responsible for handling threads and accepting and
dispatching connections."""
def __init__(self,
interfaces=('127.0.0.1', 8000),
method='wsgi',
app_info=None,
min_threads=None,
max_threads=None,
queue_size=None,
timeout=600,
handle_signals=True):
self.handle_signals = handle_signals
self.startstop_lock = Lock()
self.timeout = timeout
if not isinstance(interfaces, list):
self.interfaces = [interfaces]
else:
self.interfaces = interfaces
if min_threads is None:
min_threads = DEFAULTS['MIN_THREADS']
if max_threads is None:
max_threads = DEFAULTS['MAX_THREADS']
if not queue_size:
if hasattr(socket, 'SOMAXCONN'):
queue_size = socket.SOMAXCONN
else:
queue_size = DEFAULTS['LISTEN_QUEUE_SIZE']
if max_threads and queue_size > max_threads:
queue_size = max_threads
if isinstance(app_info, dict):
app_info['server_software'] = SERVER_SOFTWARE
self.monitor_queue = Queue()
self.active_queue = Queue()
self._threadpool = ThreadPool(get_method(method),
app_info=app_info,
active_queue=self.active_queue,
monitor_queue=self.monitor_queue,
min_threads=min_threads,
max_threads=max_threads)
# Build our socket listeners
self.listeners = [Listener(
i, queue_size, self.active_queue) for i in self.interfaces]
for ndx in range(len(self.listeners) - 1, 0, -1):
if not self.listeners[ndx].ready:
del self.listeners[ndx]
if not self.listeners:
log.critical("No interfaces to listen on...closing.")
sys.exit(1)
def _sigterm(self, signum, frame):
log.info('Received SIGTERM')
self.stop()
def _sighup(self, signum, frame):
log.info('Received SIGHUP')
self.restart()
def start(self, background=False):
log.info('Starting %s' % SERVER_SOFTWARE)
self.startstop_lock.acquire()
try:
# Set up our shutdown signals
if self.handle_signals:
try:
import signal
signal.signal(signal.SIGTERM, self._sigterm)
signal.signal(signal.SIGUSR1, self._sighup)
except:
log.debug('This platform does not support signals.')
# Start our worker threads
self._threadpool.start()
# Start our monitor thread
self._monitor = Monitor(self.monitor_queue,
self.active_queue,
self.timeout,
self._threadpool)
self._monitor.setDaemon(True)
self._monitor.start()
# I know that EXPR and A or B is bad but I'm keeping it for Py2.4
# compatibility.
str_extract = lambda l: (l.addr, l.port, l.secure and '*' or '')
msg = 'Listening on sockets: '
msg += ', '.join(
['%s:%i%s' % str_extract(l) for l in self.listeners])
log.info(msg)
for l in self.listeners:
l.start()
finally:
self.startstop_lock.release()
if background:
return
while self._monitor.isAlive():
try:
time.sleep(THREAD_STOP_CHECK_INTERVAL)
except KeyboardInterrupt:
# Capture a keyboard interrupt when running from a console
break
except:
if self._monitor.isAlive():
log.error(traceback.format_exc())
continue
return self.stop()
def stop(self, stoplogging=False):
log.info('Stopping %s' % SERVER_SOFTWARE)
self.startstop_lock.acquire()
try:
# Stop listeners
for l in self.listeners:
l.ready = False
# Encourage a context switch
time.sleep(0.01)
for l in self.listeners:
if l.isAlive():
l.join()
# Stop Monitor
self._monitor.stop()
if self._monitor.isAlive():
self._monitor.join()
# Stop Worker threads
self._threadpool.stop()
if stoplogging:
logging.shutdown()
msg = "Calling logging.shutdown() is now the responsibility of \
the application developer. Please update your \
applications to no longer call rocket.stop(True)"
try:
import warnings
raise warnings.DeprecationWarning(msg)
except ImportError:
raise RuntimeError(msg)
finally:
self.startstop_lock.release()
def restart(self):
self.stop()
self.start()
def CherryPyWSGIServer(bind_addr,
wsgi_app,
numthreads=10,
server_name=None,
max=-1,
request_queue_size=5,
timeout=10,
shutdown_timeout=5):
""" A Cherrypy wsgiserver-compatible wrapper. """
max_threads = max
if max_threads < 0:
max_threads = 0
return Rocket(bind_addr, 'wsgi', {'wsgi_app': wsgi_app},
min_threads=numthreads,
max_threads=max_threads,
queue_size=request_queue_size,
timeout=timeout)
# Monolithic build...end of module: rocket/main.py
# Monolithic build...start of module: rocket/monitor.py
# Import System Modules
import time
import logging
import select
from threading import Thread
# Import Package Modules
# package imports removed in monolithic build
class Monitor(Thread):
# Monitor worker class.
def __init__(self,
monitor_queue,
active_queue,
timeout,
threadpool,
*args,
**kwargs):
Thread.__init__(self, *args, **kwargs)
self._threadpool = threadpool
# Instance Variables
self.monitor_queue = monitor_queue
self.active_queue = active_queue
self.timeout = timeout
self.log = logging.getLogger('Rocket.Monitor')
self.log.addHandler(NullHandler())
self.connections = set()
self.active = False
def run(self):
self.active = True
conn_list = list()
list_changed = False
# We need to make sure the queue is empty before we start
while not self.monitor_queue.empty():
self.monitor_queue.get()
if __debug__:
self.log.debug('Entering monitor loop.')
# Enter thread main loop
while self.active:
# Move the queued connections to the selection pool
while not self.monitor_queue.empty():
if __debug__:
self.log.debug('In "receive timed-out connections" loop.')
c = self.monitor_queue.get()
if c is None:
# A non-client is a signal to die
if __debug__:
self.log.debug('Received a death threat.')
self.stop()
break
self.log.debug('Received a timed out connection.')
if __debug__:
assert(c not in self.connections)
if IS_JYTHON:
# Jython requires a socket to be in Non-blocking mode in
# order to select on it.
c.setblocking(False)
if __debug__:
self.log.debug('Adding connection to monitor list.')
self.connections.add(c)
list_changed = True
# Wait on those connections
if list_changed:
conn_list = list(self.connections)
list_changed = False
try:
if len(conn_list):
readable = select.select(conn_list,
[],
[],
THREAD_STOP_CHECK_INTERVAL)[0]
else:
time.sleep(THREAD_STOP_CHECK_INTERVAL)
readable = []
if not self.active:
break
# If we have any readable connections, put them back
for r in readable:
if __debug__:
self.log.debug('Restoring readable connection')
if IS_JYTHON:
# Jython requires a socket to be in Non-blocking mode in
# order to select on it, but the rest of the code requires
# that it be in blocking mode.
r.setblocking(True)
r.start_time = time.time()
self.active_queue.put(r)
self.connections.remove(r)
list_changed = True
except:
if self.active:
raise
else:
break
# If we have any stale connections, kill them off.
if self.timeout:
now = time.time()
stale = set()
for c in self.connections:
if (now - c.start_time) >= self.timeout:
stale.add(c)
for c in stale:
if __debug__:
# "EXPR and A or B" kept for Py2.4 compatibility
data = (
c.client_addr, c.server_port, c.ssl and '*' or '')
self.log.debug(
'Flushing stale connection: %s:%i%s' % data)
self.connections.remove(c)
list_changed = True
try:
c.close()
finally:
del c
# Dynamically resize the threadpool to adapt to our changing needs.
self._threadpool.dynamic_resize()
def stop(self):
self.active = False
if __debug__:
self.log.debug('Flushing waiting connections')
while self.connections:
c = self.connections.pop()
try:
c.close()
finally:
del c
if __debug__:
self.log.debug('Flushing queued connections')
while not self.monitor_queue.empty():
c = self.monitor_queue.get()
if c is None:
continue
try:
c.close()
finally:
del c
# Place a None sentry value to cause the monitor to die.
self.monitor_queue.put(None)
# Monolithic build...end of module: rocket/monitor.py
# Monolithic build...start of module: rocket/threadpool.py
# Import System Modules
import logging
# Import Package Modules
# package imports removed in monolithic build
# Setup Logging
log = logging.getLogger('Rocket.Errors.ThreadPool')
log.addHandler(NullHandler())
class ThreadPool:
"""The ThreadPool class is a container class for all the worker threads. It
manages the number of actively running threads."""
def __init__(self,
method,
app_info,
active_queue,
monitor_queue,
min_threads=DEFAULTS['MIN_THREADS'],
max_threads=DEFAULTS['MAX_THREADS'],
):
if __debug__:
log.debug("Initializing ThreadPool.")
self.check_for_dead_threads = 0
self.active_queue = active_queue
self.worker_class = method
self.min_threads = min_threads
self.max_threads = max_threads
self.monitor_queue = monitor_queue
self.stop_server = False
self.alive = False
# TODO - Optimize this based on some real-world usage data
self.grow_threshold = int(max_threads / 10) + 2
if not isinstance(app_info, dict):
app_info = dict()
if has_futures and app_info.get('futures'):
app_info['executor'] = WSGIExecutor(max([DEFAULTS['MIN_THREADS'],
2]))
app_info.update(max_threads=max_threads,
min_threads=min_threads)
self.min_threads = min_threads
self.app_info = app_info
self.threads = set()
def start(self):
self.stop_server = False
if __debug__:
log.debug("Starting threads.")
self.grow(self.min_threads)
self.alive = True
def stop(self):
self.alive = False
if __debug__:
log.debug("Stopping threads.")
self.stop_server = True
# Prompt the threads to die
self.shrink(len(self.threads))
# Stop futures initially
if has_futures and self.app_info.get('futures'):
if __debug__:
log.debug("Future executor is present. Python will not "
"exit until all jobs have finished.")
self.app_info['executor'].shutdown(wait=False)
# Give them the gun
#active_threads = [t for t in self.threads if t.isAlive()]
#while active_threads:
# t = active_threads.pop()
# t.kill()
# Wait until they pull the trigger
for t in self.threads:
if t.isAlive():
t.join()
# Clean up the mess
self.bring_out_your_dead()
def bring_out_your_dead(self):
# Remove dead threads from the pool
dead_threads = [t for t in self.threads if not t.isAlive()]
for t in dead_threads:
if __debug__:
log.debug("Removing dead thread: %s." % t.getName())
try:
# Py2.4 complains here so we put it in a try block
self.threads.remove(t)
except:
pass
self.check_for_dead_threads -= len(dead_threads)
def grow(self, amount=None):
if self.stop_server:
return
if not amount:
amount = self.max_threads
if self.alive:
amount = min([amount, self.max_threads - len(self.threads)])
if __debug__:
log.debug("Growing by %i." % amount)
for x in range(amount):
worker = self.worker_class(self.app_info,
self.active_queue,
self.monitor_queue)
worker.setDaemon(True)
self.threads.add(worker)
worker.start()
def shrink(self, amount=1):
if __debug__:
log.debug("Shrinking by %i." % amount)
self.check_for_dead_threads += amount
for x in range(amount):
self.active_queue.put(None)
def dynamic_resize(self):
if (self.max_threads > self.min_threads or self.max_threads == 0):
if self.check_for_dead_threads > 0:
self.bring_out_your_dead()
queueSize = self.active_queue.qsize()
threadCount = len(self.threads)
if __debug__:
log.debug("Examining ThreadPool. %i threads and %i Q'd conxions"
% (threadCount, queueSize))
if queueSize == 0 and threadCount > self.min_threads:
self.shrink()
elif queueSize > self.grow_threshold:
self.grow(queueSize)
# Monolithic build...end of module: rocket/threadpool.py
# Monolithic build...start of module: rocket/worker.py
# Import System Modules
import re
import sys
import socket
import logging
import traceback
from wsgiref.headers import Headers
from threading import Thread
from datetime import datetime
try:
from urllib import unquote
except ImportError:
from urllib.parse import unquote
try:
from io import StringIO
except ImportError:
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
try:
from ssl import SSLError
except ImportError:
class SSLError(socket.error):
pass
# Import Package Modules
# package imports removed in monolithic build
# Define Constants
re_SLASH = re.compile('%2F', re.IGNORECASE)
re_REQUEST_LINE = re.compile(r"""^
(?P<method>OPTIONS|GET|HEAD|POST|PUT|DELETE|TRACE|CONNECT) # Request Method
\ # (single space)
(
(?P<scheme>[^:/]+) # Scheme
(://) #
(?P<host>[^/]+) # Host
)? #
(?P<path>(\*|/[^ \?]*)) # Path
(\? (?P<query_string>[^ ]*))? # Query String
\ # (single space)
(?P<protocol>HTTPS?/1\.[01]) # Protocol
$
""", re.X)
LOG_LINE = '%(client_ip)s - "%(request_line)s" - %(status)s %(size)s'
RESPONSE = '''\
%s %s
Content-Length: %i
Content-Type: %s
%s
'''
if IS_JYTHON:
HTTP_METHODS = set(['OPTIONS', 'GET', 'HEAD', 'POST', 'PUT',
'DELETE', 'TRACE', 'CONNECT'])
class Worker(Thread):
"""The Worker class is a base class responsible for receiving connections
and (a subclass) will run an application to process the the connection """
def __init__(self,
app_info,
active_queue,
monitor_queue,
*args,
**kwargs):
Thread.__init__(self, *args, **kwargs)
# Instance Variables
self.app_info = app_info
self.active_queue = active_queue
self.monitor_queue = monitor_queue
self.size = 0
self.status = "200 OK"
self.closeConnection = True
self.request_line = ""
self.protocol = 'HTTP/1.1'
# Request Log
self.req_log = logging.getLogger('Rocket.Requests')
self.req_log.addHandler(NullHandler())
# Error Log
self.err_log = logging.getLogger('Rocket.Errors.' + self.getName())
self.err_log.addHandler(NullHandler())
def _handleError(self, typ, val, tb):
if typ == SSLError:
if 'timed out' in str(val.args[0]):
typ = SocketTimeout
if typ == SocketTimeout:
if __debug__:
self.err_log.debug('Socket timed out')
self.monitor_queue.put(self.conn)
return True
if typ == SocketClosed:
self.closeConnection = True
if __debug__:
self.err_log.debug('Client closed socket')
return False
if typ == BadRequest:
self.closeConnection = True
if __debug__:
self.err_log.debug('Client sent a bad request')
return True
if typ == socket.error:
self.closeConnection = True
if val.args[0] in IGNORE_ERRORS_ON_CLOSE:
if __debug__:
self.err_log.debug('Ignorable socket Error received...'
'closing connection.')
return False
else:
self.status = "999 Utter Server Failure"
tb_fmt = traceback.format_exception(typ, val, tb)
self.err_log.error('Unhandled Error when serving '
'connection:\n' + '\n'.join(tb_fmt))
return False
self.closeConnection = True
tb_fmt = traceback.format_exception(typ, val, tb)
self.err_log.error('\n'.join(tb_fmt))
self.send_response('500 Server Error')
return False
def run(self):
if __debug__:
self.err_log.debug('Entering main loop.')
# Enter thread main loop
while True:
conn = self.active_queue.get()
if not conn:
# A non-client is a signal to die
if __debug__:
self.err_log.debug('Received a death threat.')
return conn
if isinstance(conn, tuple):
conn = Connection(*conn)
self.conn = conn
if conn.ssl != conn.secure:
self.err_log.info('Received HTTP connection on HTTPS port.')
self.send_response('400 Bad Request')
self.closeConnection = True
conn.close()
continue
else:
if __debug__:
self.err_log.debug('Received a connection.')
self.closeConnection = False
# Enter connection serve loop
while True:
if __debug__:
self.err_log.debug('Serving a request')
try:
self.run_app(conn)
except:
exc = sys.exc_info()
handled = self._handleError(*exc)
if handled:
break
finally:
if self.request_line:
log_info = dict(client_ip=conn.client_addr,
time=datetime.now().strftime('%c'),
status=self.status.split(' ')[0],
size=self.size,
request_line=self.request_line)
self.req_log.info(LOG_LINE % log_info)
if self.closeConnection:
try:
conn.close()
except:
self.err_log.error(str(traceback.format_exc()))
break
def run_app(self, conn):
# Must be overridden with a method reads the request from the socket
# and sends a response.
self.closeConnection = True
raise NotImplementedError('Overload this method!')
def send_response(self, status):
stat_msg = status.split(' ', 1)[1]
msg = RESPONSE % (self.protocol,
status,
len(stat_msg),
'text/plain',
stat_msg)
try:
self.conn.sendall(b(msg))
except socket.timeout:
self.closeConnection = True
msg = 'Tried to send "%s" to client but received timeout error'
self.err_log.error(msg % status)
except socket.error:
self.closeConnection = True
msg = 'Tried to send "%s" to client but received socket error'
self.err_log.error(msg % status)
def read_request_line(self, sock_file):
self.request_line = ''
try:
# Grab the request line
d = sock_file.readline()
if PY3K:
d = d.decode('ISO-8859-1')
if d == '\r\n':
# Allow an extra NEWLINE at the beginning per HTTP 1.1 spec
if __debug__:
self.err_log.debug('Client sent newline')
d = sock_file.readline()
if PY3K:
d = d.decode('ISO-8859-1')
except socket.timeout:
raise SocketTimeout('Socket timed out before request.')
except TypeError:
raise SocketClosed(
'SSL bug caused closure of socket. See '
'"https://groups.google.com/d/topic/web2py/P_Gw0JxWzCs".')
d = d.strip()
if not d:
if __debug__:
self.err_log.debug(
'Client did not send a recognizable request.')
raise SocketClosed('Client closed socket.')
self.request_line = d
# NOTE: I've replaced the traditional method of procedurally breaking
# apart the request line with a (rather unsightly) regular expression.
# However, Java's regexp support sucks so bad that it actually takes
# longer in Jython to process the regexp than procedurally. So I've
# left the old code here for Jython's sake...for now.
if IS_JYTHON:
return self._read_request_line_jython(d)
match = re_REQUEST_LINE.match(d)
if not match:
self.send_response('400 Bad Request')
raise BadRequest
req = match.groupdict()
for k, v in req.iteritems():
if not v:
req[k] = ""
if k == 'path':
req['path'] = r'%2F'.join(
[unquote(x) for x in re_SLASH.split(v)])
self.protocol = req['protocol']
return req
def _read_request_line_jython(self, d):
d = d.strip()
try:
method, uri, proto = d.split(' ')
if not proto.startswith('HTTP') or \
proto[-3:] not in ('1.0', '1.1') or \
method not in HTTP_METHODS:
self.send_response('400 Bad Request')
raise BadRequest
except ValueError:
self.send_response('400 Bad Request')
raise BadRequest
req = dict(method=method, protocol=proto)
scheme = ''
host = ''
if uri == '*' or uri.startswith('/'):
path = uri
elif '://' in uri:
scheme, rest = uri.split('://')
host, path = rest.split('/', 1)
path = '/' + path
else:
self.send_response('400 Bad Request')
raise BadRequest
query_string = ''
if '?' in path:
path, query_string = path.split('?', 1)
path = r'%2F'.join([unquote(x) for x in re_SLASH.split(path)])
req.update(path=path,
query_string=query_string,
scheme=scheme.lower(),
host=host)
return req
def read_headers(self, sock_file):
try:
headers = dict()
lname = None
lval = None
while True:
l = sock_file.readline()
if PY3K:
try:
l = str(l, 'ISO-8859-1')
except UnicodeDecodeError:
self.err_log.warning(
'Client sent invalid header: ' + repr(l))
if l.strip().replace('\0', '') == '':
break
if l[0] in ' \t' and lname:
# Some headers take more than one line
lval += ' ' + l.strip()
else:
# HTTP header values are latin-1 encoded
l = l.split(':', 1)
# HTTP header names are us-ascii encoded
lname = l[0].strip().upper().replace('-', '_')
lval = l[-1].strip()
headers[str(lname)] = str(lval)
except socket.timeout:
raise SocketTimeout("Socket timed out before request.")
return headers
class SocketTimeout(Exception):
"Exception for when a socket times out between requests."
pass
class BadRequest(Exception):
"Exception for when a client sends an incomprehensible request."
pass
class SocketClosed(Exception):
"Exception for when a socket is closed by the client."
pass
class ChunkedReader(object):
def __init__(self, sock_file):
self.stream = sock_file
self.chunk_size = 0
def _read_header(self):
chunk_len = ""
try:
while "" == chunk_len:
chunk_len = self.stream.readline().strip()
return int(chunk_len, 16)
except ValueError:
return 0
def read(self, size):
data = b('')
chunk_size = self.chunk_size
while size:
if not chunk_size:
chunk_size = self._read_header()
if size < chunk_size:
data += self.stream.read(size)
chunk_size -= size
break
else:
if not chunk_size:
break
data += self.stream.read(chunk_size)
size -= chunk_size
chunk_size = 0
self.chunk_size = chunk_size
return data
def readline(self):
data = b('')
c = self.read(1)
while c and c != b('\n'):
data += c
c = self.read(1)
data += c
return data
def readlines(self):
yield self.readline()
def get_method(method):
methods = dict(wsgi=WSGIWorker)
return methods[method.lower()]
# Monolithic build...end of module: rocket/worker.py
# Monolithic build...start of module: rocket/methods/__init__.py
# Monolithic build...end of module: rocket/methods/__init__.py
# Monolithic build...start of module: rocket/methods/wsgi.py
# Import System Modules
import sys
import socket
from wsgiref.headers import Headers
from wsgiref.util import FileWrapper
# Import Package Modules
# package imports removed in monolithic build
if PY3K:
from email.utils import formatdate
else:
# Caps Utils for Py2.4 compatibility
from email.Utils import formatdate
# Define Constants
NEWLINE = b('\r\n')
HEADER_RESPONSE = '''HTTP/1.1 %s\r\n%s'''
BASE_ENV = {'SERVER_NAME': SERVER_NAME,
'SCRIPT_NAME': '', # Direct call WSGI does not need a name
'wsgi.errors': sys.stderr,
'wsgi.version': (1, 0),
'wsgi.multiprocess': False,
'wsgi.run_once': False,
'wsgi.file_wrapper': FileWrapper
}
class WSGIWorker(Worker):
def __init__(self, *args, **kwargs):
"""Builds some instance variables that will last the life of the
thread."""
Worker.__init__(self, *args, **kwargs)
if isinstance(self.app_info, dict):
multithreaded = self.app_info.get('max_threads') != 1
else:
multithreaded = False
self.base_environ = dict(
{'SERVER_SOFTWARE': self.app_info['server_software'],
'wsgi.multithread': multithreaded,
})
self.base_environ.update(BASE_ENV)
# Grab our application
self.app = self.app_info.get('wsgi_app')
if not hasattr(self.app, "__call__"):
raise TypeError("The wsgi_app specified (%s) is not a valid WSGI application." % repr(self.app))
# Enable futures
if has_futures and self.app_info.get('futures'):
executor = self.app_info['executor']
self.base_environ.update({"wsgiorg.executor": executor,
"wsgiorg.futures": executor.futures})
def build_environ(self, sock_file, conn):
""" Build the execution environment. """
# Grab the request line
request = self.read_request_line(sock_file)
# Copy the Base Environment
environ = self.base_environ.copy()
# Grab the headers
for k, v in self.read_headers(sock_file).iteritems():
environ[str('HTTP_' + k)] = v
# Add CGI Variables
environ['REQUEST_METHOD'] = request['method']
environ['PATH_INFO'] = request['path']
environ['SERVER_PROTOCOL'] = request['protocol']
environ['SERVER_PORT'] = str(conn.server_port)
environ['REMOTE_PORT'] = str(conn.client_port)
environ['REMOTE_ADDR'] = str(conn.client_addr)
environ['QUERY_STRING'] = request['query_string']
if 'HTTP_CONTENT_LENGTH' in environ:
environ['CONTENT_LENGTH'] = environ['HTTP_CONTENT_LENGTH']
if 'HTTP_CONTENT_TYPE' in environ:
environ['CONTENT_TYPE'] = environ['HTTP_CONTENT_TYPE']
# Save the request method for later
self.request_method = environ['REQUEST_METHOD']
# Add Dynamic WSGI Variables
if conn.ssl:
environ['wsgi.url_scheme'] = 'https'
environ['HTTPS'] = 'on'
try:
peercert = conn.socket.getpeercert(binary_form=True)
environ['SSL_CLIENT_RAW_CERT'] = \
peercert and ssl.DER_cert_to_PEM_cert(peercert)
except Exception:
print sys.exc_info()[1]
else:
environ['wsgi.url_scheme'] = 'http'
if environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked':
environ['wsgi.input'] = ChunkedReader(sock_file)
else:
environ['wsgi.input'] = sock_file
return environ
def send_headers(self, data, sections):
h_set = self.header_set
# Does the app want us to send output chunked?
self.chunked = h_set.get('Transfer-Encoding', '').lower() == 'chunked'
# Add a Date header if it's not there already
if not 'Date' in h_set:
h_set['Date'] = formatdate(usegmt=True)
# Add a Server header if it's not there already
if not 'Server' in h_set:
h_set['Server'] = HTTP_SERVER_SOFTWARE
if 'Content-Length' in h_set:
self.size = int(h_set['Content-Length'])
else:
s = int(self.status.split(' ')[0])
if (s < 200 or s not in (204, 205, 304)) and not self.chunked:
if sections == 1 or self.protocol != 'HTTP/1.1':
# Add a Content-Length header because it's not there
self.size = len(data)
h_set['Content-Length'] = str(self.size)
else:
# If they sent us more than one section, we blow chunks
h_set['Transfer-Encoding'] = 'Chunked'
self.chunked = True
if __debug__:
self.err_log.debug('Adding header...'
'Transfer-Encoding: Chunked')
if 'Connection' not in h_set:
# If the application did not provide a connection header,
# fill it in
client_conn = self.environ.get('HTTP_CONNECTION', '').lower()
if self.environ['SERVER_PROTOCOL'] == 'HTTP/1.1':
# HTTP = 1.1 defaults to keep-alive connections
if client_conn:
h_set['Connection'] = client_conn
else:
h_set['Connection'] = 'keep-alive'
else:
# HTTP < 1.1 supports keep-alive but it's quirky
# so we don't support it
h_set['Connection'] = 'close'
# Close our connection if we need to.
self.closeConnection = h_set.get('Connection', '').lower() == 'close'
# Build our output headers
header_data = HEADER_RESPONSE % (self.status, str(h_set))
# Send the headers
if __debug__:
self.err_log.debug('Sending Headers: %s' % repr(header_data))
self.conn.sendall(b(header_data))
self.headers_sent = True
def write_warning(self, data, sections=None):
self.err_log.warning('WSGI app called write method directly. This is '
'deprecated behavior. Please update your app.')
return self.write(data, sections)
def write(self, data, sections=None):
""" Write the data to the output socket. """
if self.error[0]:
self.status = self.error[0]
data = b(self.error[1])
if not self.headers_sent:
self.send_headers(data, sections)
if self.request_method != 'HEAD':
try:
if self.chunked:
self.conn.sendall(b('%x\r\n%s\r\n' % (len(data), data)))
else:
self.conn.sendall(data)
except socket.timeout:
self.closeConnection = True
except socket.error:
# But some clients will close the connection before that
# resulting in a socket error.
self.closeConnection = True
def start_response(self, status, response_headers, exc_info=None):
""" Store the HTTP status and headers to be sent when self.write is
called. """
if exc_info:
try:
if self.headers_sent:
# Re-raise original exception if headers sent
# because this violates WSGI specification.
raise
finally:
exc_info = None
elif self.header_set:
raise AssertionError("Headers already set!")
if PY3K and not isinstance(status, str):
self.status = str(status, 'ISO-8859-1')
else:
self.status = status
# Make sure headers are bytes objects
try:
self.header_set = Headers(response_headers)
except UnicodeDecodeError:
self.error = ('500 Internal Server Error',
'HTTP Headers should be bytes')
self.err_log.error('Received HTTP Headers from client that contain'
' invalid characters for Latin-1 encoding.')
return self.write_warning
def run_app(self, conn):
self.size = 0
self.header_set = Headers([])
self.headers_sent = False
self.error = (None, None)
self.chunked = False
sections = None
output = None
if __debug__:
self.err_log.debug('Getting sock_file')
# Build our file-like object
if PY3K:
sock_file = conn.makefile(mode='rb', buffering=BUF_SIZE)
else:
sock_file = conn.makefile(BUF_SIZE)
try:
# Read the headers and build our WSGI environment
self.environ = environ = self.build_environ(sock_file, conn)
# Handle 100 Continue
if environ.get('HTTP_EXPECT', '') == '100-continue':
res = environ['SERVER_PROTOCOL'] + ' 100 Continue\r\n\r\n'
conn.sendall(b(res))
# Send it to our WSGI application
output = self.app(environ, self.start_response)
if not hasattr(output, '__len__') and not hasattr(output, '__iter__'):
self.error = ('500 Internal Server Error',
'WSGI applications must return a list or '
'generator type.')
if hasattr(output, '__len__'):
sections = len(output)
for data in output:
# Don't send headers until body appears
if data:
self.write(data, sections)
if self.chunked:
# If chunked, send our final chunk length
self.conn.sendall(b('0\r\n\r\n'))
elif not self.headers_sent:
# Send headers if the body was empty
self.send_headers('', sections)
# Don't capture exceptions here. The Worker class handles
# them appropriately.
finally:
if __debug__:
self.err_log.debug('Finally closing output and sock_file')
if hasattr(output, 'close'):
output.close()
sock_file.close()
# Monolithic build...end of module: rocket/methods/wsgi.py
| gpl-2.0 |
MSylvia/pyNES | pynes/tests/adc_test.py | 27 | 4340 | # -*- coding: utf-8 -*-
'''
ADC, Add with Carry Test
This is an arithmetic instruction of the 6502.
'''
import unittest
from pynes.tests import MetaInstructionCase
class AdcImmTest(unittest.TestCase):
'''
Test the arithmetic operation ADC between decimal 16
and the content of the accumulator.
'''
__metaclass__ = MetaInstructionCase
asm = 'ADC #$10'
lex = [('T_INSTRUCTION', 'ADC'), ('T_HEX_NUMBER', '#$10')]
syn = ['S_IMMEDIATE']
code = [0x69, 0x10]
class AdcImmWithDecimalTest(unittest.TestCase):
'''
Test the arithmetic operation ADC between decimal 10
and the content of the accumulator.
'''
__metaclass__ = MetaInstructionCase
asm = 'ADC #10'
lex = [('T_INSTRUCTION', 'ADC'), ('T_DECIMAL_NUMBER', '#10')]
syn = ['S_IMMEDIATE']
code = [0x69, 0x0A]
class AdcImmWithBinaryTest(unittest.TestCase):
'''
Test the arithmetic operation ADC between binary %00000100
(Decimal 4) and the content of the accumulator.
'''
__metaclass__ = MetaInstructionCase
asm = 'ADC #%00000100'
lex = [('T_INSTRUCTION', 'ADC'), ('T_BINARY_NUMBER', '#%00000100')]
syn = ['S_IMMEDIATE']
code = [0x69, 0x04]
class AdcZpTest(unittest.TestCase):
'''
Test the arithmetic operation ADC between the content of
the accumulator and the content of the zero page address.
'''
__metaclass__ = MetaInstructionCase
asm = 'ADC $00'
lex = [('T_INSTRUCTION', 'ADC'), ('T_ADDRESS', '$00')]
syn = ['S_ZEROPAGE']
code = [0x65, 0x00]
class AdcZpxTest(unittest.TestCase):
'''
Test the arithmetic operation ADC between the content of the
accumulator and the content of the zero page with address
calculated from $10 adding content of X.
'''
__metaclass__ = MetaInstructionCase
asm = 'ADC $10,X'
lex = [('T_INSTRUCTION', 'ADC'), ('T_ADDRESS', '$10'),
('T_SEPARATOR', ','),('T_REGISTER','X')]
syn = ['S_ZEROPAGE_X']
code = [0x75, 0x10]
class AdcAbsTest(unittest.TestCase):
'''
Test the arithmetic operation ADC between the content of
the accumulator and the content located at address $1234.
'''
__metaclass__ = MetaInstructionCase
asm = 'ADC $1234'
lex = [('T_INSTRUCTION', 'ADC'), ('T_ADDRESS', '$1234')]
syn = ['S_ABSOLUTE']
code = [0x6d, 0x34, 0x12]
class AdcAbsx(unittest.TestCase):
'''
Test the arithmetic operation ADC between the content of the
accumulator and the content located at address $1234
adding the content of X.
'''
__metaclass__ = MetaInstructionCase
asm = 'ADC $1234,X'
lex = [('T_INSTRUCTION', 'ADC'), ('T_ADDRESS', '$1234'),
('T_SEPARATOR', ','), ('T_REGISTER', 'X')]
syn = ['S_ABSOLUTE_X']
code = [0x7d, 0x34, 0x12]
class AdcAbsy(unittest.TestCase):
'''
Test the arithmetic operation ADC between the content of the
accumulator and the content located at address $1234
adding the content of Y.
'''
__metaclass__ = MetaInstructionCase
asm = 'ADC $1234,Y'
lex = [('T_INSTRUCTION', 'ADC'), ('T_ADDRESS', '$1234'),
('T_SEPARATOR', ','), ('T_REGISTER', 'Y')]
syn = ['S_ABSOLUTE_Y']
code = [0x79, 0x34, 0x12]
class AdcIndx(unittest.TestCase):
'''
Test the arithmetic ADC operation between the content of the
accumulator and the content located at the address
obtained from the address calculated from the value
stored in the address $20 adding the content of Y.
'''
__metaclass__ = MetaInstructionCase
asm = 'ADC ($20,X)'
lex = [('T_INSTRUCTION', 'ADC'), ('T_OPEN', '('),
('T_ADDRESS', '$20'), ('T_SEPARATOR', ','),
('T_REGISTER', 'X'), ('T_CLOSE', ')')]
syn = ['S_INDIRECT_X']
code = [0x61, 0x20]
class AdcIndy(unittest.TestCase):
'''
Test arithmetic operation ADC between the content of the
accumulator and the content located at the address
obtained from the address calculated from the value
stored in the address $20 adding the content of Y.
'''
__metaclass__ = MetaInstructionCase
asm = 'ADC ($20),Y'
lex = [('T_INSTRUCTION', 'ADC'), ('T_OPEN', '('),
('T_ADDRESS', '$20'), ('T_CLOSE', ')'),
('T_SEPARATOR', ','), ('T_REGISTER', 'Y')]
syn = ['S_INDIRECT_Y']
code = [0x71, 0x20]
| bsd-3-clause |
awalls-cx18/gnuradio | gr-blocks/python/blocks/qa_skiphead.py | 2 | 4748 | #!/usr/bin/env python
#
# Copyright 2007,2010,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks
import pmt
import numpy
def make_tag(key, value, offset, srcid=None):
tag = gr.tag_t()
tag.key = pmt.string_to_symbol(key)
tag.value = pmt.to_pmt(value)
tag.offset = offset
if srcid is not None:
tag.srcid = pmt.to_pmt(srcid)
return tag
class test_skiphead(gr_unittest.TestCase):
def setUp(self):
self.tb = gr.top_block()
self.src_data = [int(x) for x in range(65536)]
def tearDown(self):
self.tb = None
def test_skip_0(self):
skip_cnt = 0
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_1(self):
skip_cnt = 1
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_1023(self):
skip_cnt = 1023
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_6339(self):
skip_cnt = 6339
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_12678(self):
skip_cnt = 12678
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_all(self):
skip_cnt = len(self.src_data)
expected_result = tuple(self.src_data[skip_cnt:])
src1 = blocks.vector_source_i(self.src_data)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
def test_skip_tags(self):
skip_cnt = 25
expected_result = tuple(self.src_data[skip_cnt:])
src_tags = tuple([make_tag('foo', 'bar', 1, 'src'),
make_tag('baz', 'qux', 50, 'src')])
src1 = blocks.vector_source_i(self.src_data, tags=src_tags)
op = blocks.skiphead(gr.sizeof_int, skip_cnt)
dst1 = blocks.vector_sink_i()
self.tb.connect(src1, op, dst1)
self.tb.run()
dst_data = dst1.data()
self.assertEqual(expected_result, dst_data)
self.assertEqual(dst1.tags()[0].offset, 25, "Tag offset is incorrect")
self.assertEqual(len(dst1.tags()), 1, "Wrong number of tags received")
self.assertEqual(pmt.to_python(
dst1.tags()[0].key), "baz", "Tag key is incorrect")
self.assertEqual(pmt.to_python(
dst1.tags()[0].value), "qux", "Tag value is incorrect")
if __name__ == '__main__':
gr_unittest.run(test_skiphead, "test_skiphead.xml")
| gpl-3.0 |
materialsproject/pymatgen | pymatgen/io/xtb/inputs.py | 1 | 4045 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Classes for writing XTB input files
"""
import logging
import os
from typing import Dict, Optional, Union, List
from monty.json import MSONable
from pymatgen.core import Molecule
__author__ = "Alex Epstein"
__copyright__ = "Copyright 2020, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Alex Epstein"
__email__ = "[email protected]"
__credits__ = "Sam Blau, Evan Spotte-Smith"
logger = logging.getLogger(__name__)
class CRESTInput(MSONable):
"""
An object representing CREST input files.
Because CREST is controlled through command line flags and external
files, the CRESTInput class mainly consists of methods for containing
and writing external files.
"""
def __init__(
self,
molecule: Molecule,
working_dir: str = ".",
coords_filename: Optional[str] = "crest_in.xyz",
constraints: Optional[Dict[str, Union[List[int], float]]] = None,
):
"""
:param molecule (pymatgen Molecule object):
Input molecule, the only required CREST input.
:param working_dir (str):
Location to write input files, defaults to current directory
:param coords_filename (str):
Name of input coordinates file
:param constraints (Dict):
Dictionary of common editable parameters for .constrains file.
{"atoms": [List of 1-indexed atoms to fix], "force_constant":
float]
"""
self.molecule = molecule
self.coords_filename = coords_filename
self.constraints = constraints
self.working_dir = working_dir
def write_input_files(self):
"""
Write input files to working directory
"""
self.molecule.to(filename=os.path.join(self.working_dir, self.coords_filename))
if self.constraints:
constrains_string = self.constrains_template(
molecule=self.molecule,
reference_fnm=self.coords_filename,
constraints=self.constraints,
)
with open(".constrains", "w") as f:
f.write(constrains_string)
@staticmethod
def constrains_template(molecule, reference_fnm, constraints) -> str:
"""
:param molecule (pymatgen Molecule):
Molecule the constraints will be performed on
:param reference_fnm:
Name of file containing reference structure in same directory
:param constraints:
Dictionary of common editable parameters for .constrains file.
{"atoms": [List of 1-indexed atoms to fix], "force_constant":
float]
:return:
String for .constrains file
"""
atoms_to_constrain = constraints["atoms"]
force_constant = constraints["force_constant"]
reference_fnm = reference_fnm
mol = molecule
atoms_for_mtd = [i for i in range(1, len(mol.sites) + 1) if i not in atoms_to_constrain]
# Write as 1-3,5 instead of 1,2,3,5
interval_list = [atoms_for_mtd[0]]
for i, v in enumerate(atoms_for_mtd):
if v + 1 not in atoms_for_mtd:
interval_list.append(v)
if i != len(atoms_for_mtd) - 1:
interval_list.append(atoms_for_mtd[i + 1])
force_constant = force_constant
allowed_mtd_string = ",".join(
["{}-{}".format(interval_list[i], interval_list[i + 1]) for i in range(len(interval_list)) if i % 2 == 0]
)
constrains_file_string = (
"$constrain\n"
+ " atoms: {}\n".format(",".join([str(i) for i in atoms_to_constrain]))
+ " force constant={}\n".format(force_constant)
+ " reference={}\n".format(reference_fnm)
+ "$metadyn\n"
+ " atoms: {}\n".format(allowed_mtd_string)
+ "$end"
)
return constrains_file_string
| mit |
shayneholmes/plover | plover/machine/base.py | 7 | 5860 | # Copyright (c) 2010-2011 Joshua Harlan Lifton.
# See LICENSE.txt for details.
# TODO: add tests for all machines
# TODO: add tests for new status callbacks
"""Base classes for machine types. Do not use directly."""
import serial
import threading
from plover.exception import SerialPortException
import collections
STATE_STOPPED = 'closed'
STATE_INITIALIZING = 'initializing'
STATE_RUNNING = 'connected'
STATE_ERROR = 'disconnected'
class StenotypeBase(object):
"""The base class for all Stenotype classes."""
def __init__(self):
self.stroke_subscribers = []
self.state_subscribers = []
self.state = STATE_STOPPED
self.suppress = None
def start_capture(self):
"""Begin listening for output from the stenotype machine."""
pass
def stop_capture(self):
"""Stop listening for output from the stenotype machine."""
pass
def add_stroke_callback(self, callback):
"""Subscribe to output from the stenotype machine.
Argument:
callback -- The function to call whenever there is output from
the stenotype machine and output is being captured.
"""
self.stroke_subscribers.append(callback)
def remove_stroke_callback(self, callback):
"""Unsubscribe from output from the stenotype machine.
Argument:
callback -- A function that was previously subscribed.
"""
self.stroke_subscribers.remove(callback)
def add_state_callback(self, callback):
self.state_subscribers.append(callback)
def remove_state_callback(self, callback):
self.state_subscribers.remove(callback)
def _notify(self, steno_keys):
"""Invoke the callback of each subscriber with the given argument."""
# If the stroke matches a command while the keyboard is not suppressed
# then the stroke needs to be suppressed after the fact. One of the
# handlers will set the suppress function. This function is passed in to
# prevent threading issues with the gui.
self.suppress = None
for callback in self.stroke_subscribers:
callback(steno_keys)
if self.suppress:
self._post_suppress(self.suppress, steno_keys)
def _post_suppress(self, suppress, steno_keys):
"""This is a complicated way for the application to tell the machine to
suppress this stroke after the fact. This only currently has meaning for
the keyboard machine so it can backspace over the last stroke when used
to issue a command when plover is 'off'.
"""
pass
def _set_state(self, state):
self.state = state
for callback in self.state_subscribers:
callback(state)
def _stopped(self):
self._set_state(STATE_STOPPED)
def _initializing(self):
self._set_state(STATE_INITIALIZING)
def _ready(self):
self._set_state(STATE_RUNNING)
def _error(self):
self._set_state(STATE_ERROR)
@staticmethod
def get_option_info():
"""Get the default options for this machine."""
return {}
class ThreadedStenotypeBase(StenotypeBase, threading.Thread):
"""Base class for thread based machines.
Subclasses should override run.
"""
def __init__(self):
threading.Thread.__init__(self)
StenotypeBase.__init__(self)
self.finished = threading.Event()
def run(self):
"""This method should be overridden by a subclass."""
pass
def start_capture(self):
"""Begin listening for output from the stenotype machine."""
self.finished.clear()
self._initializing()
self.start()
def stop_capture(self):
"""Stop listening for output from the stenotype machine."""
self.finished.set()
try:
self.join()
except RuntimeError:
pass
self._stopped()
class SerialStenotypeBase(ThreadedStenotypeBase):
"""For use with stenotype machines that connect via serial port.
This class implements the three methods necessary for a standard
stenotype interface: start_capture, stop_capture, and
add_callback.
"""
def __init__(self, serial_params):
"""Monitor the stenotype over a serial port.
Keyword arguments are the same as the keyword arguments for a
serial.Serial object.
"""
ThreadedStenotypeBase.__init__(self)
self.serial_port = None
self.serial_params = serial_params
def start_capture(self):
if self.serial_port:
self.serial_port.close()
try:
self.serial_port = serial.Serial(**self.serial_params)
except (serial.SerialException, OSError) as e:
print e
self._error()
return
if self.serial_port is None or not self.serial_port.isOpen():
self._error()
return
return ThreadedStenotypeBase.start_capture(self)
def stop_capture(self):
"""Stop listening for output from the stenotype machine."""
ThreadedStenotypeBase.stop_capture(self)
if self.serial_port:
self.serial_port.close()
@staticmethod
def get_option_info():
"""Get the default options for this machine."""
bool_converter = lambda s: s == 'True'
sb = lambda s: int(float(s)) if float(s).is_integer() else float(s)
return {
'port': (None, str), # TODO: make first port default
'baudrate': (9600, int),
'bytesize': (8, int),
'parity': ('N', str),
'stopbits': (1, sb),
'timeout': (2.0, float),
'xonxoff': (False, bool_converter),
'rtscts': (False, bool_converter)
}
| gpl-2.0 |
google-code/android-scripting | python/src/Lib/test/test_curses.py | 55 | 8582 | #
# Test script for the curses module
#
# This script doesn't actually display anything very coherent. but it
# does call every method and function.
#
# Functions not tested: {def,reset}_{shell,prog}_mode, getch(), getstr(),
# init_color()
# Only called, not tested: getmouse(), ungetmouse()
#
import curses, sys, tempfile, os
import curses.panel
# Optionally test curses module. This currently requires that the
# 'curses' resource be given on the regrtest command line using the -u
# option. If not available, nothing after this line will be executed.
from test.test_support import requires, TestSkipped
requires('curses')
# XXX: if newterm was supported we could use it instead of initscr and not exit
term = os.environ.get('TERM')
if not term or term == 'unknown':
raise TestSkipped, "$TERM=%r, calling initscr() may cause exit" % term
if sys.platform == "cygwin":
raise TestSkipped("cygwin's curses mostly just hangs")
def window_funcs(stdscr):
"Test the methods of windows"
win = curses.newwin(10,10)
win = curses.newwin(5,5, 5,5)
win2 = curses.newwin(15,15, 5,5)
for meth in [stdscr.addch, stdscr.addstr]:
for args in [('a'), ('a', curses.A_BOLD),
(4,4, 'a'), (5,5, 'a', curses.A_BOLD)]:
meth(*args)
for meth in [stdscr.box, stdscr.clear, stdscr.clrtobot,
stdscr.clrtoeol, stdscr.cursyncup, stdscr.delch,
stdscr.deleteln, stdscr.erase, stdscr.getbegyx,
stdscr.getbkgd, stdscr.getkey, stdscr.getmaxyx,
stdscr.getparyx, stdscr.getyx, stdscr.inch,
stdscr.insertln, stdscr.instr, stdscr.is_wintouched,
win.noutrefresh, stdscr.redrawwin, stdscr.refresh,
stdscr.standout, stdscr.standend, stdscr.syncdown,
stdscr.syncup, stdscr.touchwin, stdscr.untouchwin]:
meth()
stdscr.addnstr('1234', 3)
stdscr.addnstr('1234', 3, curses.A_BOLD)
stdscr.addnstr(4,4, '1234', 3)
stdscr.addnstr(5,5, '1234', 3, curses.A_BOLD)
stdscr.attron(curses.A_BOLD)
stdscr.attroff(curses.A_BOLD)
stdscr.attrset(curses.A_BOLD)
stdscr.bkgd(' ')
stdscr.bkgd(' ', curses.A_REVERSE)
stdscr.bkgdset(' ')
stdscr.bkgdset(' ', curses.A_REVERSE)
win.border(65, 66, 67, 68,
69, 70, 71, 72)
win.border('|', '!', '-', '_',
'+', '\\', '#', '/')
try:
win.border(65, 66, 67, 68,
69, [], 71, 72)
except TypeError:
pass
else:
raise RuntimeError, "Expected win.border() to raise TypeError"
stdscr.clearok(1)
win4 = stdscr.derwin(2,2)
win4 = stdscr.derwin(1,1, 5,5)
win4.mvderwin(9,9)
stdscr.echochar('a')
stdscr.echochar('a', curses.A_BOLD)
stdscr.hline('-', 5)
stdscr.hline('-', 5, curses.A_BOLD)
stdscr.hline(1,1,'-', 5)
stdscr.hline(1,1,'-', 5, curses.A_BOLD)
stdscr.idcok(1)
stdscr.idlok(1)
stdscr.immedok(1)
stdscr.insch('c')
stdscr.insdelln(1)
stdscr.insnstr('abc', 3)
stdscr.insnstr('abc', 3, curses.A_BOLD)
stdscr.insnstr(5, 5, 'abc', 3)
stdscr.insnstr(5, 5, 'abc', 3, curses.A_BOLD)
stdscr.insstr('def')
stdscr.insstr('def', curses.A_BOLD)
stdscr.insstr(5, 5, 'def')
stdscr.insstr(5, 5, 'def', curses.A_BOLD)
stdscr.is_linetouched(0)
stdscr.keypad(1)
stdscr.leaveok(1)
stdscr.move(3,3)
win.mvwin(2,2)
stdscr.nodelay(1)
stdscr.notimeout(1)
win2.overlay(win)
win2.overwrite(win)
win2.overlay(win, 1, 2, 3, 3, 2, 1)
win2.overwrite(win, 1, 2, 3, 3, 2, 1)
stdscr.redrawln(1,2)
stdscr.scrollok(1)
stdscr.scroll()
stdscr.scroll(2)
stdscr.scroll(-3)
stdscr.move(12, 2)
stdscr.setscrreg(10,15)
win3 = stdscr.subwin(10,10)
win3 = stdscr.subwin(10,10, 5,5)
stdscr.syncok(1)
stdscr.timeout(5)
stdscr.touchline(5,5)
stdscr.touchline(5,5,0)
stdscr.vline('a', 3)
stdscr.vline('a', 3, curses.A_STANDOUT)
stdscr.chgat(5, 2, 3, curses.A_BLINK)
stdscr.chgat(3, curses.A_BOLD)
stdscr.chgat(5, 8, curses.A_UNDERLINE)
stdscr.chgat(curses.A_BLINK)
stdscr.refresh()
stdscr.vline(1,1, 'a', 3)
stdscr.vline(1,1, 'a', 3, curses.A_STANDOUT)
if hasattr(curses, 'resize'):
stdscr.resize()
if hasattr(curses, 'enclose'):
stdscr.enclose()
def module_funcs(stdscr):
"Test module-level functions"
for func in [curses.baudrate, curses.beep, curses.can_change_color,
curses.cbreak, curses.def_prog_mode, curses.doupdate,
curses.filter, curses.flash, curses.flushinp,
curses.has_colors, curses.has_ic, curses.has_il,
curses.isendwin, curses.killchar, curses.longname,
curses.nocbreak, curses.noecho, curses.nonl,
curses.noqiflush, curses.noraw,
curses.reset_prog_mode, curses.termattrs,
curses.termname, curses.erasechar, curses.getsyx]:
func()
# Functions that actually need arguments
if curses.tigetstr("cnorm"):
curses.curs_set(1)
curses.delay_output(1)
curses.echo() ; curses.echo(1)
f = tempfile.TemporaryFile()
stdscr.putwin(f)
f.seek(0)
curses.getwin(f)
f.close()
curses.halfdelay(1)
curses.intrflush(1)
curses.meta(1)
curses.napms(100)
curses.newpad(50,50)
win = curses.newwin(5,5)
win = curses.newwin(5,5, 1,1)
curses.nl() ; curses.nl(1)
curses.putp('abc')
curses.qiflush()
curses.raw() ; curses.raw(1)
curses.setsyx(5,5)
curses.tigetflag('hc')
curses.tigetnum('co')
curses.tigetstr('cr')
curses.tparm('cr')
curses.typeahead(sys.__stdin__.fileno())
curses.unctrl('a')
curses.ungetch('a')
curses.use_env(1)
# Functions only available on a few platforms
if curses.has_colors():
curses.start_color()
curses.init_pair(2, 1,1)
curses.color_content(1)
curses.color_pair(2)
curses.pair_content(curses.COLOR_PAIRS - 1)
curses.pair_number(0)
if hasattr(curses, 'use_default_colors'):
curses.use_default_colors()
if hasattr(curses, 'keyname'):
curses.keyname(13)
if hasattr(curses, 'has_key'):
curses.has_key(13)
if hasattr(curses, 'getmouse'):
(availmask, oldmask) = curses.mousemask(curses.BUTTON1_PRESSED)
# availmask indicates that mouse stuff not available.
if availmask != 0:
curses.mouseinterval(10)
# just verify these don't cause errors
m = curses.getmouse()
curses.ungetmouse(*m)
if hasattr(curses, 'is_term_resized'):
curses.is_term_resized(*stdscr.getmaxyx())
if hasattr(curses, 'resizeterm'):
curses.resizeterm(*stdscr.getmaxyx())
if hasattr(curses, 'resize_term'):
curses.resize_term(*stdscr.getmaxyx())
def unit_tests():
from curses import ascii
for ch, expected in [('a', 'a'), ('A', 'A'),
(';', ';'), (' ', ' '),
('\x7f', '^?'), ('\n', '^J'), ('\0', '^@'),
# Meta-bit characters
('\x8a', '!^J'), ('\xc1', '!A'),
]:
if ascii.unctrl(ch) != expected:
print 'curses.unctrl fails on character', repr(ch)
def test_userptr_without_set(stdscr):
w = curses.newwin(10, 10)
p = curses.panel.new_panel(w)
# try to access userptr() before calling set_userptr() -- segfaults
try:
p.userptr()
raise RuntimeError, 'userptr should fail since not set'
except curses.panel.error:
pass
def test_resize_term(stdscr):
if hasattr(curses, 'resizeterm'):
lines, cols = curses.LINES, curses.COLS
curses.resizeterm(lines - 1, cols + 1)
if curses.LINES != lines - 1 or curses.COLS != cols + 1:
raise RuntimeError, "Expected resizeterm to update LINES and COLS"
def main(stdscr):
curses.savetty()
try:
module_funcs(stdscr)
window_funcs(stdscr)
test_userptr_without_set(stdscr)
test_resize_term(stdscr)
finally:
curses.resetty()
if __name__ == '__main__':
curses.wrapper(main)
unit_tests()
else:
# testing setupterm() inside initscr/endwin
# causes terminal breakage
curses.setupterm(fd=sys.__stdout__.fileno())
try:
stdscr = curses.initscr()
main(stdscr)
finally:
curses.endwin()
unit_tests()
| apache-2.0 |
msarana/selenium_python | ENV/Lib/site-packages/pip/utils/ui.py | 25 | 11320 | from __future__ import absolute_import
from __future__ import division
import itertools
import sys
from signal import signal, SIGINT, default_int_handler
import time
import contextlib
import logging
from pip.compat import WINDOWS
from pip.utils import format_size
from pip.utils.logging import get_indentation
from pip._vendor import six
from pip._vendor.progress.bar import Bar, IncrementalBar
from pip._vendor.progress.helpers import (WritelnMixin,
HIDE_CURSOR, SHOW_CURSOR)
from pip._vendor.progress.spinner import Spinner
try:
from pip._vendor import colorama
# Lots of different errors can come from this, including SystemError and
# ImportError.
except Exception:
colorama = None
logger = logging.getLogger(__name__)
def _select_progress_class(preferred, fallback):
encoding = getattr(preferred.file, "encoding", None)
# If we don't know what encoding this file is in, then we'll just assume
# that it doesn't support unicode and use the ASCII bar.
if not encoding:
return fallback
# Collect all of the possible characters we want to use with the preferred
# bar.
characters = [
getattr(preferred, "empty_fill", six.text_type()),
getattr(preferred, "fill", six.text_type()),
]
characters += list(getattr(preferred, "phases", []))
# Try to decode the characters we're using for the bar using the encoding
# of the given file, if this works then we'll assume that we can use the
# fancier bar and if not we'll fall back to the plaintext bar.
try:
six.text_type().join(characters).encode(encoding)
except UnicodeEncodeError:
return fallback
else:
return preferred
_BaseBar = _select_progress_class(IncrementalBar, Bar)
class InterruptibleMixin(object):
"""
Helper to ensure that self.finish() gets called on keyboard interrupt.
This allows downloads to be interrupted without leaving temporary state
(like hidden cursors) behind.
This class is similar to the progress library's existing SigIntMixin
helper, but as of version 1.2, that helper has the following problems:
1. It calls sys.exit().
2. It discards the existing SIGINT handler completely.
3. It leaves its own handler in place even after an uninterrupted finish,
which will have unexpected delayed effects if the user triggers an
unrelated keyboard interrupt some time after a progress-displaying
download has already completed, for example.
"""
def __init__(self, *args, **kwargs):
"""
Save the original SIGINT handler for later.
"""
super(InterruptibleMixin, self).__init__(*args, **kwargs)
self.original_handler = signal(SIGINT, self.handle_sigint)
# If signal() returns None, the previous handler was not installed from
# Python, and we cannot restore it. This probably should not happen,
# but if it does, we must restore something sensible instead, at least.
# The least bad option should be Python's default SIGINT handler, which
# just raises KeyboardInterrupt.
if self.original_handler is None:
self.original_handler = default_int_handler
def finish(self):
"""
Restore the original SIGINT handler after finishing.
This should happen regardless of whether the progress display finishes
normally, or gets interrupted.
"""
super(InterruptibleMixin, self).finish()
signal(SIGINT, self.original_handler)
def handle_sigint(self, signum, frame):
"""
Call self.finish() before delegating to the original SIGINT handler.
This handler should only be in place while the progress display is
active.
"""
self.finish()
self.original_handler(signum, frame)
class DownloadProgressMixin(object):
def __init__(self, *args, **kwargs):
super(DownloadProgressMixin, self).__init__(*args, **kwargs)
self.message = (" " * (get_indentation() + 2)) + self.message
@property
def downloaded(self):
return format_size(self.index)
@property
def download_speed(self):
# Avoid zero division errors...
if self.avg == 0.0:
return "..."
return format_size(1 / self.avg) + "/s"
@property
def pretty_eta(self):
if self.eta:
return "eta %s" % self.eta_td
return ""
def iter(self, it, n=1):
for x in it:
yield x
self.next(n)
self.finish()
class WindowsMixin(object):
def __init__(self, *args, **kwargs):
# The Windows terminal does not support the hide/show cursor ANSI codes
# even with colorama. So we'll ensure that hide_cursor is False on
# Windows.
# This call neds to go before the super() call, so that hide_cursor
# is set in time. The base progress bar class writes the "hide cursor"
# code to the terminal in its init, so if we don't set this soon
# enough, we get a "hide" with no corresponding "show"...
if WINDOWS and self.hide_cursor:
self.hide_cursor = False
super(WindowsMixin, self).__init__(*args, **kwargs)
# Check if we are running on Windows and we have the colorama module,
# if we do then wrap our file with it.
if WINDOWS and colorama:
self.file = colorama.AnsiToWin32(self.file)
# The progress code expects to be able to call self.file.isatty()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.isatty = lambda: self.file.wrapped.isatty()
# The progress code expects to be able to call self.file.flush()
# but the colorama.AnsiToWin32() object doesn't have that, so we'll
# add it.
self.file.flush = lambda: self.file.wrapped.flush()
class DownloadProgressBar(WindowsMixin, InterruptibleMixin,
DownloadProgressMixin, _BaseBar):
file = sys.stdout
message = "%(percent)d%%"
suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s"
class DownloadProgressSpinner(WindowsMixin, InterruptibleMixin,
DownloadProgressMixin, WritelnMixin, Spinner):
file = sys.stdout
suffix = "%(downloaded)s %(download_speed)s"
def next_phase(self):
if not hasattr(self, "_phaser"):
self._phaser = itertools.cycle(self.phases)
return next(self._phaser)
def update(self):
message = self.message % self
phase = self.next_phase()
suffix = self.suffix % self
line = ''.join([
message,
" " if message else "",
phase,
" " if suffix else "",
suffix,
])
self.writeln(line)
################################################################
# Generic "something is happening" spinners
#
# We don't even try using progress.spinner.Spinner here because it's actually
# simpler to reimplement from scratch than to coerce their code into doing
# what we need.
################################################################
@contextlib.contextmanager
def hidden_cursor(file):
# The Windows terminal does not support the hide/show cursor ANSI codes,
# even via colorama. So don't even try.
if WINDOWS:
yield
else:
file.write(HIDE_CURSOR)
try:
yield
finally:
file.write(SHOW_CURSOR)
class RateLimiter(object):
def __init__(self, min_update_interval_seconds):
self._min_update_interval_seconds = min_update_interval_seconds
self._last_update = 0
def ready(self):
now = time.time()
delta = now - self._last_update
return delta >= self._min_update_interval_seconds
def reset(self):
self._last_update = time.time()
class InteractiveSpinner(object):
def __init__(self, message, file=None, spin_chars="-\\|/",
# Empirically, 8 updates/second looks nice
min_update_interval_seconds=0.125):
self._message = message
if file is None:
file = sys.stdout
self._file = file
self._rate_limiter = RateLimiter(min_update_interval_seconds)
self._finished = False
self._spin_cycle = itertools.cycle(spin_chars)
self._file.write(" " * get_indentation() + self._message + " ... ")
self._width = 0
def _write(self, status):
assert not self._finished
# Erase what we wrote before by backspacing to the beginning, writing
# spaces to overwrite the old text, and then backspacing again
backup = "\b" * self._width
self._file.write(backup + " " * self._width + backup)
# Now we have a blank slate to add our status
self._file.write(status)
self._width = len(status)
self._file.flush()
self._rate_limiter.reset()
def spin(self):
if self._finished:
return
if not self._rate_limiter.ready():
return
self._write(next(self._spin_cycle))
def finish(self, final_status):
if self._finished:
return
self._write(final_status)
self._file.write("\n")
self._file.flush()
self._finished = True
# Used for dumb terminals, non-interactive installs (no tty), etc.
# We still print updates occasionally (once every 60 seconds by default) to
# act as a keep-alive for systems like Travis-CI that take lack-of-output as
# an indication that a task has frozen.
class NonInteractiveSpinner(object):
def __init__(self, message, min_update_interval_seconds=60):
self._message = message
self._finished = False
self._rate_limiter = RateLimiter(min_update_interval_seconds)
self._update("started")
def _update(self, status):
assert not self._finished
self._rate_limiter.reset()
logger.info("%s: %s", self._message, status)
def spin(self):
if self._finished:
return
if not self._rate_limiter.ready():
return
self._update("still running...")
def finish(self, final_status):
if self._finished:
return
self._update("finished with status '%s'" % (final_status,))
self._finished = True
@contextlib.contextmanager
def open_spinner(message):
# Interactive spinner goes directly to sys.stdout rather than being routed
# through the logging system, but it acts like it has level INFO,
# i.e. it's only displayed if we're at level INFO or better.
# Non-interactive spinner goes through the logging system, so it is always
# in sync with logging configuration.
if sys.stdout.isatty() and logger.getEffectiveLevel() <= logging.INFO:
spinner = InteractiveSpinner(message)
else:
spinner = NonInteractiveSpinner(message)
try:
with hidden_cursor(sys.stdout):
yield spinner
except KeyboardInterrupt:
spinner.finish("canceled")
raise
except Exception:
spinner.finish("error")
raise
else:
spinner.finish("done")
| apache-2.0 |
edgedb/edgedb | edb/pgsql/delta.py | 1 | 188387 | # mypy: ignore-errors
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2008-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import annotations
import collections.abc
import dataclasses
import itertools
import textwrap
from typing import *
from edb import errors
from edb.edgeql import ast as ql_ast
from edb.edgeql import qltypes as ql_ft
from edb.edgeql import compiler as qlcompiler
from edb.schema import annos as s_anno
from edb.schema import casts as s_casts
from edb.schema import scalars as s_scalars
from edb.schema import objtypes as s_objtypes
from edb.schema import constraints as s_constr
from edb.schema import database as s_db
from edb.schema import delta as sd
from edb.schema import expr as s_expr
from edb.schema import expraliases as s_aliases
from edb.schema import extensions as s_exts
from edb.schema import functions as s_funcs
from edb.schema import indexes as s_indexes
from edb.schema import links as s_links
from edb.schema import lproperties as s_props
from edb.schema import migrations as s_migrations
from edb.schema import modules as s_mod
from edb.schema import name as sn
from edb.schema import objects as so
from edb.schema import operators as s_opers
from edb.schema import pointers as s_pointers
from edb.schema import pseudo as s_pseudo
from edb.schema import roles as s_roles
from edb.schema import sources as s_sources
from edb.schema import types as s_types
from edb.schema import version as s_ver
from edb.schema import utils as s_utils
from edb.common import markup
from edb.common import ordered
from edb.common import topological
from edb.common import uuidgen
from edb.ir import pathid as irpathid
from edb.ir import typeutils as irtyputils
from edb.ir import utils as irutils
from edb.pgsql import common
from edb.pgsql import dbops
from edb.server import defines as edbdef
from edb.server import pgcluster
from . import ast as pg_ast
from .common import qname as q
from .common import quote_literal as ql
from .common import quote_ident as qi
from .common import quote_type as qt
from . import compiler
from . import codegen
from . import schemamech
from . import types
if TYPE_CHECKING:
from edb.schema import schema as s_schema
def has_table(obj, schema):
if isinstance(obj, s_objtypes.ObjectType):
return not (
obj.is_compound_type(schema) or
obj.get_is_derived(schema) or
obj.is_view(schema)
)
elif obj.is_pure_computable(schema) or obj.get_is_derived(schema):
return False
elif obj.generic(schema):
return (
not isinstance(obj, s_props.Property)
and str(obj.get_name(schema)) != 'std::link'
)
elif obj.is_link_property(schema):
return not obj.singular(schema)
elif not has_table(obj.get_source(schema), schema):
return False
else:
ptr_stor_info = types.get_pointer_storage_info(
obj, resolve_type=False, schema=schema, link_bias=True)
return (
ptr_stor_info is not None
and ptr_stor_info.table_type == 'link'
)
class CommandMeta(sd.CommandMeta):
pass
class MetaCommand(sd.Command, metaclass=CommandMeta):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.pgops = ordered.OrderedSet()
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
for op in self.before_ops:
if not isinstance(op, sd.AlterObjectProperty):
self.pgops.add(op)
for op in self.ops:
if not isinstance(op, sd.AlterObjectProperty):
self.pgops.add(op)
return schema
def generate(self, block: dbops.PLBlock) -> None:
for op in sorted(
self.pgops, key=lambda i: getattr(i, 'priority', 0),
reverse=True):
op.generate(block)
@classmethod
def as_markup(cls, self, *, ctx):
node = markup.elements.lang.TreeNode(name=str(self))
for dd in self.pgops:
if isinstance(dd, AlterObjectProperty):
diff = markup.elements.doc.ValueDiff(
before=repr(dd.old_value), after=repr(dd.new_value))
if dd.new_inherited:
diff.comment = 'inherited'
elif dd.new_computed:
diff.comment = 'computed'
node.add_child(label=dd.property, node=diff)
else:
node.add_child(node=markup.serialize(dd, ctx=ctx))
return node
def _get_backend_params(
self,
context: sd.CommandContext,
) -> pgcluster.BackendRuntimeParams:
ctx_backend_params = context.backend_runtime_params
if ctx_backend_params is not None:
backend_params = cast(
pgcluster.BackendRuntimeParams, ctx_backend_params)
else:
backend_params = pgcluster.get_default_runtime_params()
return backend_params
def _get_instance_params(
self,
context: sd.CommandContext,
) -> pgcluster.BackendInstanceParams:
return self._get_backend_params(context).instance_params
def _get_tenant_id(self, context: sd.CommandContext) -> str:
return self._get_instance_params(context).tenant_id
class CommandGroupAdapted(MetaCommand, adapts=sd.CommandGroup):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = sd.CommandGroup.apply(self, schema, context)
schema = MetaCommand.apply(self, schema, context)
return schema
class Record:
def __init__(self, items):
self._items = items
def __iter__(self):
return iter(self._items)
def __len__(self):
return len(self._items)
def __repr__(self):
return '<_Record {!r}>'.format(self._items)
class ObjectMetaCommand(MetaCommand, sd.ObjectCommand,
metaclass=CommandMeta):
op_priority = 0
class CreateObject(ObjectMetaCommand):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = self.__class__.get_adaptee().apply(self, schema, context)
return ObjectMetaCommand.apply(self, schema, context)
class RenameObject(ObjectMetaCommand):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = self.__class__.get_adaptee().apply(self, schema, context)
return ObjectMetaCommand.apply(self, schema, context)
class RebaseObject(ObjectMetaCommand):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = self.__class__.get_adaptee().apply(self, schema, context)
return ObjectMetaCommand.apply(self, schema, context)
class AlterObject(ObjectMetaCommand):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = ObjectMetaCommand.apply(self, schema, context)
return self.__class__.get_adaptee().apply(self, schema, context)
class DeleteObject(ObjectMetaCommand):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = self.__class__.get_adaptee().apply(self, schema, context)
return ObjectMetaCommand.apply(self, schema, context)
class Nop(MetaCommand, adapts=sd.Nop):
pass
class Query(MetaCommand, adapts=sd.Query):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super().apply(schema, context)
sql_tree = compiler.compile_ir_to_sql_tree(
self.expr.irast,
output_format=compiler.OutputFormat.NATIVE_INTERNAL,
explicit_top_cast=irtyputils.type_to_typeref(
schema,
schema.get('std::str'),
),
)
sql_text = codegen.generate_source(sql_tree)
# The INTO _dummy_text bit is needed because PL/pgSQL _really_
# wants the result of a returning query to be stored in a variable,
# and the PERFORM hack does not work if the query has DML CTEs.
self.pgops.add(dbops.Query(
text=f'{sql_text} INTO _dummy_text',
))
return schema
class AlterObjectProperty(MetaCommand, adapts=sd.AlterObjectProperty):
pass
class SchemaVersionCommand(ObjectMetaCommand):
pass
class CreateSchemaVersion(
SchemaVersionCommand,
CreateObject,
adapts=s_ver.CreateSchemaVersion,
):
pass
class AlterSchemaVersion(
SchemaVersionCommand,
AlterObject,
adapts=s_ver.AlterSchemaVersion,
):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super().apply(schema, context)
expected_ver = self.get_orig_attribute_value('version')
check = dbops.Query(
f'''
SELECT
edgedb.raise_on_not_null(
(SELECT NULLIF(
(SELECT
version::text
FROM
edgedb."_SchemaSchemaVersion"
FOR UPDATE),
{ql(str(expected_ver))}
)),
'serialization_failure',
msg => (
'Cannot serialize DDL: '
|| (SELECT version::text FROM
edgedb."_SchemaSchemaVersion")
)
)
INTO _dummy_text
'''
)
self.pgops.add(check)
return schema
class GlobalSchemaVersionCommand(ObjectMetaCommand):
pass
class CreateGlobalSchemaVersion(
ObjectMetaCommand,
adapts=s_ver.CreateGlobalSchemaVersion,
):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_ver.CreateGlobalSchemaVersion.apply(self, schema, context)
schema = ObjectMetaCommand.apply(self, schema, context)
ver_id = str(self.scls.id)
ver_name = str(self.scls.get_name(schema))
tenant_id = self._get_tenant_id(context)
self.pgops.add(
dbops.UpdateMetadataSection(
dbops.Database(name=common.get_database_backend_name(
edbdef.EDGEDB_TEMPLATE_DB, tenant_id=tenant_id)),
section='GlobalSchemaVersion',
metadata={
ver_id: {
'id': ver_id,
'name': ver_name,
'version': str(self.scls.get_version(schema)),
'builtin': self.scls.get_builtin(schema),
'internal': self.scls.get_internal(schema),
}
}
)
)
return schema
class AlterGlobalSchemaVersion(
ObjectMetaCommand,
adapts=s_ver.AlterGlobalSchemaVersion,
):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_ver.AlterGlobalSchemaVersion.apply(self, schema, context)
schema = ObjectMetaCommand.apply(self, schema, context)
ver_id = str(self.scls.id)
ver_name = str(self.scls.get_name(schema))
ctx_backend_params = context.backend_runtime_params
if ctx_backend_params is not None:
backend_params = cast(
pgcluster.BackendRuntimeParams, ctx_backend_params)
else:
backend_params = pgcluster.get_default_runtime_params()
instance_params = backend_params.instance_params
capabilities = instance_params.capabilities
tenant_id = instance_params.tenant_id
tpl_db_name = common.get_database_backend_name(
edbdef.EDGEDB_TEMPLATE_DB, tenant_id=tenant_id)
if capabilities & pgcluster.BackendCapabilities.SUPERUSER_ACCESS:
# Only superusers are generally allowed to make an UPDATE
# lock on shared catalogs.
lock = dbops.Query(
f'''
SELECT
description
FROM
pg_catalog.pg_shdescription
WHERE
objoid = (
SELECT oid
FROM pg_database
WHERE datname = {ql(tpl_db_name)}
)
AND classoid = 'pg_database'::regclass::oid
FOR UPDATE
INTO _dummy_text
'''
)
else:
# Without superuser access we have to resort to lock polling.
# This is racy, but is unfortunately the best we can do.
lock = dbops.Query(f'''
SELECT
edgedb.raise_on_not_null(
(
SELECT 'locked'
FROM pg_catalog.pg_locks
WHERE
locktype = 'object'
AND classid = 'pg_database'::regclass::oid
AND objid = (
SELECT oid
FROM pg_database
WHERE
datname = {ql(tpl_db_name)}
)
AND mode = 'ShareUpdateExclusiveLock'
AND granted
AND pid != pg_backend_pid()
),
'serialization_failure',
msg => (
'Cannot serialize global DDL: '
|| (SELECT version::text FROM
edgedb."_SysGlobalSchemaVersion")
)
)
INTO _dummy_text
''')
self.pgops.add(lock)
expected_ver = self.get_orig_attribute_value('version')
check = dbops.Query(
f'''
SELECT
edgedb.raise_on_not_null(
(SELECT NULLIF(
(SELECT
version::text
FROM
edgedb."_SysGlobalSchemaVersion"
),
{ql(str(expected_ver))}
)),
'serialization_failure',
msg => (
'Cannot serialize global DDL: '
|| (SELECT version::text FROM
edgedb."_SysGlobalSchemaVersion")
)
)
INTO _dummy_text
'''
)
self.pgops.add(check)
self.pgops.add(
dbops.UpdateMetadataSection(
dbops.Database(name=tpl_db_name),
section='GlobalSchemaVersion',
metadata={
ver_id: {
'id': ver_id,
'name': ver_name,
'version': str(self.scls.get_version(schema)),
'builtin': self.scls.get_builtin(schema),
'internal': self.scls.get_internal(schema),
}
}
)
)
return schema
class PseudoTypeCommand(ObjectMetaCommand):
pass
class CreatePseudoType(
PseudoTypeCommand,
CreateObject,
adapts=s_pseudo.CreatePseudoType,
):
pass
class TupleCommand(ObjectMetaCommand):
pass
class CreateTuple(TupleCommand, adapts=s_types.CreateTuple):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = self.__class__.get_adaptee().apply(self, schema, context)
schema = TupleCommand.apply(self, schema, context)
if self.scls.is_polymorphic(schema):
return schema
elements = self.scls.get_element_types(schema).items(schema)
ctype = dbops.CompositeType(
name=common.get_backend_name(schema, self.scls, catenate=False),
columns=[
dbops.Column(
name=n,
type=qt(types.pg_type_from_object(
schema, t, persistent_tuples=True)),
)
for n, t in elements
]
)
self.pgops.add(dbops.CreateCompositeType(type=ctype))
return schema
class AlterTuple(TupleCommand, adapts=s_types.AlterTuple):
pass
class RenameTuple(TupleCommand, adapts=s_types.RenameTuple):
pass
class DeleteTuple(TupleCommand, adapts=s_types.DeleteTuple):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
tup = schema.get_global(s_types.Tuple, self.classname)
if not tup.is_polymorphic(schema):
self.pgops.add(dbops.DropCompositeType(
name=common.get_backend_name(schema, tup, catenate=False),
priority=2,
))
schema = self.__class__.get_adaptee().apply(self, schema, context)
schema = TupleCommand.apply(self, schema, context)
return schema
class ExprAliasCommand(ObjectMetaCommand):
pass
class CreateAlias(
ExprAliasCommand,
CreateObject,
adapts=s_aliases.CreateAlias,
):
pass
class RenameAlias(
ExprAliasCommand,
RenameObject,
adapts=s_aliases.RenameAlias,
):
pass
class AlterAlias(
ExprAliasCommand,
AlterObject,
adapts=s_aliases.AlterAlias,
):
pass
class DeleteAlias(
ExprAliasCommand,
DeleteObject,
adapts=s_aliases.DeleteAlias,
):
pass
class TupleExprAliasCommand(ObjectMetaCommand):
pass
class CreateTupleExprAlias(
TupleExprAliasCommand, CreateObject,
adapts=s_types.CreateTupleExprAlias):
pass
class RenameTupleExprAlias(
TupleExprAliasCommand, RenameObject,
adapts=s_types.RenameTupleExprAlias):
pass
class AlterTupleExprAlias(
TupleExprAliasCommand, AlterObject,
adapts=s_types.AlterTupleExprAlias):
pass
class DeleteTupleExprAlias(
TupleExprAliasCommand, DeleteObject,
adapts=s_types.DeleteTupleExprAlias):
pass
class ArrayCommand(ObjectMetaCommand):
pass
class CreateArray(ArrayCommand, adapts=s_types.CreateArray):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = self.__class__.get_adaptee().apply(self, schema, context)
schema = ArrayCommand.apply(self, schema, context)
return schema
class AlterArray(ArrayCommand, adapts=s_types.AlterArray):
pass
class RenameArray(ArrayCommand, adapts=s_types.RenameArray):
pass
class DeleteArray(ArrayCommand, adapts=s_types.DeleteArray):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = self.__class__.get_adaptee().apply(self, schema, context)
schema = ArrayCommand.apply(self, schema, context)
return schema
class ArrayExprAliasCommand(ObjectMetaCommand):
pass
class CreateArrayExprAlias(
ArrayExprAliasCommand, CreateObject,
adapts=s_types.CreateArrayExprAlias):
pass
class RenameArrayExprAlias(
ArrayExprAliasCommand, RenameObject,
adapts=s_types.RenameArrayExprAlias):
pass
class AlterArrayExprAlias(
ArrayExprAliasCommand, AlterObject,
adapts=s_types.AlterArrayExprAlias):
pass
class DeleteArrayExprAlias(
ArrayExprAliasCommand, DeleteObject,
adapts=s_types.DeleteArrayExprAlias):
pass
class ParameterCommand(sd.ObjectCommand,
metaclass=CommandMeta):
pass
class CreateParameter(ParameterCommand, CreateObject,
adapts=s_funcs.CreateParameter):
pass
class DeleteParameter(ParameterCommand, DeleteObject,
adapts=s_funcs.DeleteParameter):
pass
class RenameParameter(ParameterCommand, RenameObject,
adapts=s_funcs.RenameParameter):
pass
class AlterParameter(ParameterCommand, AlterObject,
adapts=s_funcs.AlterParameter):
pass
class FunctionCommand:
def get_pgname(self, func: s_funcs.Function, schema):
return common.get_backend_name(schema, func, catenate=False)
def get_pgtype(self, func: s_funcs.Function, obj, schema):
if obj.is_any(schema):
return ('anyelement',)
try:
return types.pg_type_from_object(
schema, obj, persistent_tuples=True)
except ValueError:
raise errors.QueryError(
f'could not compile parameter type {obj!r} '
f'of function {func.get_shortname(schema)}',
context=self.source_context) from None
def compile_default(self, func: s_funcs.Function,
default: s_expr.Expression, schema):
try:
comp = s_expr.Expression.compiled(
default,
schema=schema,
as_fragment=True,
)
ir = comp.irast
if not irutils.is_const(ir.expr):
raise ValueError('expression not constant')
sql_tree = compiler.compile_ir_to_sql_tree(
ir.expr, singleton_mode=True)
return codegen.SQLSourceGenerator.to_source(sql_tree)
except Exception as ex:
raise errors.QueryError(
f'could not compile default expression {default!r} '
f'of function {func.get_shortname(schema)}: {ex}',
context=self.source_context) from ex
def compile_args(self, func: s_funcs.Function, schema):
func_params = func.get_params(schema)
has_inlined_defaults = func.has_inlined_defaults(schema)
args = []
if has_inlined_defaults:
args.append(('__defaults_mask__', ('bytea',), None))
compile_defaults = not (
has_inlined_defaults or func_params.find_named_only(schema)
)
for param in func_params.get_in_canonical_order(schema):
param_type = param.get_type(schema)
param_default = param.get_default(schema)
pg_at = self.get_pgtype(func, param_type, schema)
default = None
if compile_defaults and param_default is not None:
default = self.compile_default(func, param_default, schema)
args.append((param.get_parameter_name(schema), pg_at, default))
return args
def make_function(self, func: s_funcs.Function, code, schema):
func_return_typemod = func.get_return_typemod(schema)
func_params = func.get_params(schema)
return dbops.Function(
name=self.get_pgname(func, schema),
args=self.compile_args(func, schema),
has_variadic=func_params.find_variadic(schema) is not None,
set_returning=func_return_typemod is ql_ft.TypeModifier.SetOfType,
volatility=func.get_volatility(schema),
returns=self.get_pgtype(
func, func.get_return_type(schema), schema),
text=code)
def compile_sql_function(self, func: s_funcs.Function, schema):
return self.make_function(func, func.get_code(schema), schema)
def fix_return_type(
self, func: s_funcs.Function, nativecode, schema, context):
return_type = self._get_attribute_value(schema, context, 'return_type')
ir = nativecode.irast
if not (
return_type.is_object_type()
or s_types.is_type_compatible(return_type, ir.stype,
schema=nativecode.schema)
):
# Add a cast and recompile it
qlexpr = qlcompiler.astutils.ensure_qlstmt(ql_ast.TypeCast(
type=s_utils.typeref_to_ast(schema, return_type),
expr=nativecode.qlast,
))
nativecode = self.compile_function(
schema, context, type(nativecode).from_ast(qlexpr, schema))
return nativecode
def compile_edgeql_function(self, func: s_funcs.Function, schema, context):
nativecode = func.get_nativecode(schema)
if nativecode.irast is None:
nativecode = self.compile_function(schema, context, nativecode)
nativecode = self.fix_return_type(func, nativecode, schema, context)
sql_text, _ = compiler.compile_ir_to_sql(
nativecode.irast,
ignore_shapes=True,
explicit_top_cast=irtyputils.type_to_typeref( # note: no cache
schema, func.get_return_type(schema)),
output_format=compiler.OutputFormat.NATIVE,
use_named_params=True)
return self.make_function(func, sql_text, schema)
def sql_rval_consistency_check(
self,
cobj: s_funcs.CallableObject,
expr: str,
schema: s_schema.Schema,
) -> dbops.Command:
fname = cobj.get_verbosename(schema)
rtype = types.pg_type_from_object(
schema,
cobj.get_return_type(schema),
persistent_tuples=True,
)
rtype_desc = '.'.join(rtype)
# Determine the actual returned type of the SQL function.
# We can't easily do this by looking in system catalogs because
# of polymorphic dispatch, but, fortunately, there's pg_typeof().
# We only need to be sure to actually NOT call the target function,
# as we can't assume how it'll behave with dummy inputs. Hence, the
# weird looking query below, where we rely in Postgres executor to
# skip the call, because no rows satisfy the WHERE condition, but
# we then still generate a NULL row via a LEFT JOIN.
f_test = textwrap.dedent(f'''\
(SELECT
pg_typeof(f.i)
FROM
(SELECT NULL::text) AS spreader
LEFT JOIN (SELECT {expr} WHERE False) AS f(i) ON (true))''')
check = dbops.Query(text=f'''
PERFORM
edgedb.raise_on_not_null(
NULLIF(
pg_typeof(NULL::{qt(rtype)}),
{f_test}
),
'invalid_function_definition',
msg => format(
'%s is declared to return SQL type "%s", but '
|| 'the underlying SQL function returns "%s"',
{ql(fname)},
{ql(rtype_desc)},
{f_test}::text
),
hint => (
'Declare the function with '
|| '`force_return_cast := true`, '
|| 'or add an explicit cast to its body.'
)
);
''')
return check
def get_dummy_func_call(
self,
cobj: s_funcs.CallableObject,
sql_func: str,
schema: s_schema.Schema,
) -> str:
args = []
func_params = cobj.get_params(schema)
for param in func_params.get_in_canonical_order(schema):
param_type = param.get_type(schema)
pg_at = self.get_pgtype(cobj, param_type, schema)
args.append(f'NULL::{qt(pg_at)}')
return f'{sql_func}({", ".join(args)})'
def make_op(
self,
func: s_funcs.Function,
schema: s_schema.Schema,
context: sd.CommandContext,
*,
or_replace: bool=False,
) -> Iterable[dbops.Command]:
if func.get_from_expr(schema):
# Intrinsic function, handled directly by the compiler.
return ()
elif sql_func := func.get_from_function(schema):
func_params = func.get_params(schema)
if (
func.get_force_return_cast(schema)
or func_params.has_polymorphic(schema)
or func.get_sql_func_has_out_params(schema)
):
return ()
else:
# Function backed directly by an SQL function.
# Check the consistency of the return type.
dexpr = self.get_dummy_func_call(func, sql_func, schema)
check = self.sql_rval_consistency_check(func, dexpr, schema)
return (check,)
else:
func_language = func.get_language(schema)
if func_language is ql_ast.Language.SQL:
dbf = self.compile_sql_function(func, schema)
elif func_language is ql_ast.Language.EdgeQL:
dbf = self.compile_edgeql_function(func, schema, context)
else:
raise errors.QueryError(
f'cannot compile function {func.get_shortname(schema)}: '
f'unsupported language {func_language}',
context=self.source_context)
op = dbops.CreateFunction(dbf, or_replace=or_replace)
return (op,)
class CreateFunction(FunctionCommand, CreateObject,
adapts=s_funcs.CreateFunction):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super().apply(schema, context)
self.pgops.update(self.make_op(self.scls, schema, context))
return schema
class RenameFunction(
FunctionCommand, RenameObject, adapts=s_funcs.RenameFunction):
pass
class AlterFunction(
FunctionCommand, AlterObject, adapts=s_funcs.AlterFunction):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super().apply(schema, context)
if self.metadata_only:
return schema
if (
self.get_attribute_value('volatility') is not None or
self.get_attribute_value('nativecode') is not None
):
self.pgops.update(
self.make_op(self.scls, schema, context, or_replace=True))
return schema
class DeleteFunction(
FunctionCommand, DeleteObject, adapts=s_funcs.DeleteFunction):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
schema = super().apply(schema, context)
func = self.scls
if func.get_code(orig_schema) or func.get_nativecode(orig_schema):
# An EdgeQL or a SQL function
# (not just an alias to a SQL function).
variadic = func.get_params(orig_schema).find_variadic(orig_schema)
self.pgops.add(
dbops.DropFunction(
name=self.get_pgname(func, orig_schema),
args=self.compile_args(func, orig_schema),
has_variadic=variadic is not None,
)
)
return schema
class OperatorCommand(FunctionCommand):
def oper_name_to_pg_name(
self,
schema,
name: sn.QualName,
) -> Tuple[str, str]:
return common.get_operator_backend_name(
name, catenate=False)
def get_pg_operands(self, schema, oper: s_opers.Operator):
left_type = None
right_type = None
oper_params = list(oper.get_params(schema).objects(schema))
oper_kind = oper.get_operator_kind(schema)
if oper_kind is ql_ft.OperatorKind.Infix:
left_type = types.pg_type_from_object(
schema, oper_params[0].get_type(schema))
right_type = types.pg_type_from_object(
schema, oper_params[1].get_type(schema))
elif oper_kind is ql_ft.OperatorKind.Prefix:
right_type = types.pg_type_from_object(
schema, oper_params[0].get_type(schema))
elif oper_kind is ql_ft.OperatorKind.Postfix:
left_type = types.pg_type_from_object(
schema, oper_params[0].get_type(schema))
else:
raise RuntimeError(
f'unexpected operator type: {oper.get_type(schema)!r}')
return left_type, right_type
def compile_args(self, oper: s_opers.Operator, schema):
args = []
oper_params = oper.get_params(schema)
for param in oper_params.get_in_canonical_order(schema):
pg_at = self.get_pgtype(oper, param.get_type(schema), schema)
args.append((param.get_parameter_name(schema), pg_at))
return args
def make_operator_function(self, oper: s_opers.Operator, schema):
return dbops.Function(
name=common.get_backend_name(
schema, oper, catenate=False, aspect='function'),
args=self.compile_args(oper, schema),
volatility=oper.get_volatility(schema),
returns=self.get_pgtype(
oper, oper.get_return_type(schema), schema),
text=oper.get_code(schema))
def get_dummy_operator_call(
self,
oper: s_opers.Operator,
pgop: str,
from_args: Tuple[Tuple[str, ...], ...],
schema: s_schema.Schema,
) -> str:
# Need a proxy function with casts
oper_kind = oper.get_operator_kind(schema)
if oper_kind is ql_ft.OperatorKind.Infix:
op = f'NULL::{qt(from_args[0])} {pgop} NULL::{qt(from_args[1])}'
elif oper_kind is ql_ft.OperatorKind.Postfix:
op = f'NULL::{qt(from_args[0])} {pgop}'
elif oper_kind is ql_ft.OperatorKind.Prefix:
op = f'{pgop} NULL::{qt(from_args[1])}'
else:
raise RuntimeError(f'unexpected operator kind: {oper_kind!r}')
return op
class CreateOperator(OperatorCommand, CreateObject,
adapts=s_opers.CreateOperator):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super().apply(schema, context)
oper = self.scls
if oper.get_abstract(schema):
return schema
oper_language = oper.get_language(schema)
oper_fromop = oper.get_from_operator(schema)
oper_fromfunc = oper.get_from_function(schema)
oper_code = oper.get_code(schema)
oper_comm = oper.get_commutator(schema)
if oper_comm:
commutator = self.oper_name_to_pg_name(schema, oper_comm)
else:
commutator = None
oper_neg = oper.get_negator(schema)
if oper_neg:
negator = self.oper_name_to_pg_name(schema, oper_neg)
else:
negator = None
if oper_language is ql_ast.Language.SQL and oper_fromop:
pg_oper_name = oper_fromop[0]
args = self.get_pg_operands(schema, oper)
if len(oper_fromop) > 1:
# Explicit operand types given in FROM SQL OPERATOR.
from_args = oper_fromop[1:]
else:
from_args = args
if oper_code:
oper_func = self.make_operator_function(oper, schema)
self.pgops.add(dbops.CreateFunction(oper_func))
oper_func_name = common.qname(*oper_func.name)
elif oper_fromfunc:
oper_func_name = oper_fromfunc
elif from_args != args:
# Need a proxy function with casts
oper_kind = oper.get_operator_kind(schema)
if oper_kind is ql_ft.OperatorKind.Infix:
op = (f'$1::{from_args[0]} {pg_oper_name} '
f'$2::{from_args[1]}')
elif oper_kind is ql_ft.OperatorKind.Postfix:
op = f'$1::{from_args[0]} {pg_oper_name}'
elif oper_kind is ql_ft.OperatorKind.Prefix:
op = f'{pg_oper_name} $1::{from_args[1]}'
else:
raise RuntimeError(
f'unexpected operator kind: {oper_kind!r}')
rtype = self.get_pgtype(
oper, oper.get_return_type(schema), schema)
oper_func = dbops.Function(
name=common.get_backend_name(
schema, oper, catenate=False, aspect='function'),
args=[(None, a) for a in args if a],
volatility=oper.get_volatility(schema),
returns=rtype,
text=f'SELECT ({op})::{qt(rtype)}',
)
self.pgops.add(dbops.CreateFunction(oper_func))
oper_func_name = common.qname(*oper_func.name)
else:
oper_func_name = None
params = oper.get_params(schema)
if (not params.has_polymorphic(schema) or
all(p.get_type(schema).is_array()
for p in params.objects(schema))):
self.pgops.add(dbops.CreateOperatorAlias(
name=common.get_backend_name(schema, oper, catenate=False),
args=args,
procedure=oper_func_name,
base_operator=('pg_catalog', pg_oper_name),
operator_args=from_args,
commutator=commutator,
negator=negator,
))
if oper_func_name is not None:
cexpr = self.get_dummy_func_call(
oper, oper_func_name, schema)
else:
cexpr = self.get_dummy_operator_call(
oper, pg_oper_name, from_args, schema)
check = self.sql_rval_consistency_check(oper, cexpr, schema)
self.pgops.add(check)
elif oper_language is ql_ast.Language.SQL and oper_code:
args = self.get_pg_operands(schema, oper)
oper_func = self.make_operator_function(oper, schema)
self.pgops.add(dbops.CreateFunction(oper_func))
oper_func_name = common.qname(*oper_func.name)
self.pgops.add(dbops.CreateOperator(
name=common.get_backend_name(schema, oper, catenate=False),
args=args,
procedure=oper_func_name,
))
cexpr = self.get_dummy_func_call(
oper, q(*oper_func.name), schema)
check = self.sql_rval_consistency_check(oper, cexpr, schema)
self.pgops.add(check)
elif oper.get_from_expr(schema):
# This operator is handled by the compiler and does not
# need explicit representation in the backend.
pass
else:
raise errors.QueryError(
f'cannot create operator {oper.get_shortname(schema)}: '
f'only "FROM SQL" and "FROM SQL OPERATOR" operators '
f'are currently supported',
context=self.source_context)
return schema
class RenameOperator(
OperatorCommand, RenameObject, adapts=s_opers.RenameOperator):
pass
class AlterOperator(
OperatorCommand, AlterObject, adapts=s_opers.AlterOperator):
pass
class DeleteOperator(
OperatorCommand, DeleteObject, adapts=s_opers.DeleteOperator):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
oper = schema.get(self.classname)
if oper.get_abstract(schema):
return super().apply(schema, context)
name = common.get_backend_name(schema, oper, catenate=False)
args = self.get_pg_operands(schema, oper)
schema = super().apply(schema, context)
if not oper.get_from_expr(orig_schema):
self.pgops.add(dbops.DropOperator(name=name, args=args))
return schema
class CastCommand:
def make_cast_function(self, cast: s_casts.Cast, schema):
name = common.get_backend_name(
schema, cast, catenate=False, aspect='function')
args = [(
'val',
types.pg_type_from_object(schema, cast.get_from_type(schema))
)]
returns = types.pg_type_from_object(schema, cast.get_to_type(schema))
return dbops.Function(
name=name,
args=args,
returns=returns,
text=cast.get_code(schema),
)
class CreateCast(CastCommand, CreateObject,
adapts=s_casts.CreateCast):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super().apply(schema, context)
cast = self.scls
cast_language = cast.get_language(schema)
cast_code = cast.get_code(schema)
from_cast = cast.get_from_cast(schema)
from_expr = cast.get_from_expr(schema)
if cast_language is ql_ast.Language.SQL and cast_code:
cast_func = self.make_cast_function(cast, schema)
self.pgops.add(dbops.CreateFunction(cast_func))
elif from_cast is not None or from_expr is not None:
# This operator is handled by the compiler and does not
# need explicit representation in the backend.
pass
else:
raise errors.QueryError(
f'cannot create cast: '
f'only "FROM SQL" and "FROM SQL FUNCTION" casts '
f'are currently supported',
context=self.source_context)
return schema
class RenameCast(
CastCommand, RenameObject, adapts=s_casts.RenameCast):
pass
class AlterCast(
CastCommand, AlterObject, adapts=s_casts.AlterCast):
pass
class DeleteCast(
CastCommand, DeleteObject, adapts=s_casts.DeleteCast):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
cast = schema.get(self.classname)
cast_language = cast.get_language(schema)
cast_code = cast.get_code(schema)
schema = super().apply(schema, context)
if cast_language is ql_ast.Language.SQL and cast_code:
cast_func = self.make_cast_function(cast, schema)
self.pgops.add(dbops.DropFunction(
cast_func.name, cast_func.args))
return schema
class AnnotationCommand:
pass
class CreateAnnotation(
AnnotationCommand, CreateObject,
adapts=s_anno.CreateAnnotation):
op_priority = 1
class RenameAnnotation(
AnnotationCommand, RenameObject,
adapts=s_anno.RenameAnnotation):
pass
class AlterAnnotation(
AnnotationCommand, AlterObject, adapts=s_anno.AlterAnnotation):
pass
class DeleteAnnotation(
AnnotationCommand, DeleteObject,
adapts=s_anno.DeleteAnnotation):
pass
class AnnotationValueCommand(sd.ObjectCommand,
metaclass=CommandMeta):
op_priority = 4
class CreateAnnotationValue(
AnnotationValueCommand, CreateObject,
adapts=s_anno.CreateAnnotationValue):
pass
class AlterAnnotationValue(
AnnotationValueCommand, AlterObject,
adapts=s_anno.AlterAnnotationValue):
pass
class AlterAnnotationValueOwned(
AnnotationValueCommand, AlterObject,
adapts=s_anno.AlterAnnotationValueOwned):
pass
class RenameAnnotationValue(
AnnotationValueCommand, RenameObject,
adapts=s_anno.RenameAnnotationValue):
pass
class RebaseAnnotationValue(
AnnotationValueCommand,
RebaseObject,
adapts=s_anno.RebaseAnnotationValue,
):
pass
class DeleteAnnotationValue(
AnnotationValueCommand, DeleteObject,
adapts=s_anno.DeleteAnnotationValue):
pass
class ConstraintCommand(sd.ObjectCommand,
metaclass=CommandMeta):
op_priority = 3
@classmethod
def constraint_is_effective(cls, schema, constraint):
subject = constraint.get_subject(schema)
if subject is None:
return False
ancestors = [
a for a in constraint.get_ancestors(schema).objects(schema)
if not a.generic(schema)
]
if (
constraint.get_delegated(schema)
and all(ancestor.get_delegated(schema) for ancestor in ancestors)
):
return False
elif isinstance(subject, s_pointers.Pointer):
if subject.generic(schema):
return True
else:
return has_table(subject.get_source(schema), schema)
elif isinstance(subject, s_objtypes.ObjectType):
return has_table(subject, schema)
else:
return True
@classmethod
def create_constraint(
cls, constraint, schema, context, source_context=None):
if cls.constraint_is_effective(schema, constraint):
subject = constraint.get_subject(schema)
if subject is not None:
schemac_to_backendc = \
schemamech.ConstraintMech.\
schema_constraint_to_backend_constraint
bconstr = schemac_to_backendc(
subject, constraint, schema, context,
source_context)
return bconstr.create_ops()
else:
return dbops.CommandGroup()
@classmethod
def delete_constraint(
cls, constraint, schema, context, source_context=None):
op = dbops.CommandGroup()
if cls.constraint_is_effective(schema, constraint):
subject = constraint.get_subject(schema)
if subject is not None:
schemac_to_backendc = \
schemamech.ConstraintMech.\
schema_constraint_to_backend_constraint
bconstr = schemac_to_backendc(
subject, constraint, schema, context,
source_context)
op.add_command(bconstr.delete_ops())
return op
class CreateConstraint(
ConstraintCommand, CreateObject,
adapts=s_constr.CreateConstraint):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super().apply(schema, context)
constraint = self.scls
op = self.create_constraint(
constraint, schema, context, self.source_context)
self.pgops.add(op)
return schema
class RenameConstraint(
ConstraintCommand, RenameObject,
adapts=s_constr.RenameConstraint):
pass
class AlterConstraintOwned(
ConstraintCommand,
AlterObject,
adapts=s_constr.AlterConstraintOwned,
):
pass
class AlterConstraint(
ConstraintCommand, AlterObject,
adapts=s_constr.AlterConstraint):
def apply(self, schema, context):
orig_schema = schema
schema = super().apply(schema, context)
constraint = self.scls
if self.metadata_only:
return schema
if (
not self.constraint_is_effective(schema, constraint)
and not self.constraint_is_effective(orig_schema, constraint)
):
return schema
subject = constraint.get_subject(schema)
subcommands = list(self.get_subcommands())
if (not subcommands or
isinstance(subcommands[0], s_constr.RenameConstraint)):
# This is a pure rename, so everything had been handled by
# RenameConstraint above.
return schema
if subject is not None:
schemac_to_backendc = \
schemamech.ConstraintMech.\
schema_constraint_to_backend_constraint
bconstr = schemac_to_backendc(
subject, constraint, schema, context, self.source_context)
orig_bconstr = schemac_to_backendc(
constraint.get_subject(orig_schema),
constraint,
orig_schema,
context,
self.source_context,
)
op = dbops.CommandGroup(priority=1)
if not self.constraint_is_effective(orig_schema, constraint):
op.add_command(bconstr.create_ops())
for child in constraint.children(schema):
orig_cbconstr = schemac_to_backendc(
child.get_subject(orig_schema),
child,
orig_schema,
context,
self.source_context,
)
cbconstr = schemac_to_backendc(
child.get_subject(schema),
child,
schema,
context,
self.source_context,
)
op.add_command(cbconstr.alter_ops(orig_cbconstr))
elif not self.constraint_is_effective(schema, constraint):
op.add_command(bconstr.alter_ops(orig_bconstr))
for child in constraint.children(schema):
orig_cbconstr = schemac_to_backendc(
child.get_subject(orig_schema),
child,
orig_schema,
context,
self.source_context,
)
cbconstr = schemac_to_backendc(
child.get_subject(schema),
child,
schema,
context,
self.source_context,
)
op.add_command(cbconstr.alter_ops(orig_cbconstr))
else:
op.add_command(bconstr.alter_ops(orig_bconstr))
self.pgops.add(op)
return schema
class DeleteConstraint(
ConstraintCommand, DeleteObject,
adapts=s_constr.DeleteConstraint):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
delta_root_ctx = context.top()
orig_schema = delta_root_ctx.original_schema
constraint = schema.get(self.classname)
op = self.delete_constraint(
constraint, orig_schema, context, self.source_context)
self.pgops.add(op)
schema = super().apply(schema, context)
return schema
class RebaseConstraint(
ConstraintCommand, RebaseObject,
adapts=s_constr.RebaseConstraint):
pass
class AliasCapableObjectMetaCommand(ObjectMetaCommand):
pass
class ScalarTypeMetaCommand(AliasCapableObjectMetaCommand):
def is_sequence(self, schema, scalar):
seq = schema.get('std::sequence', default=None)
return seq is not None and scalar.issubclass(schema, seq)
class CreateScalarType(ScalarTypeMetaCommand,
adapts=s_scalars.CreateScalarType):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_scalars.CreateScalarType.apply(self, schema, context)
schema = ScalarTypeMetaCommand.apply(self, schema, context)
return schema
def _create_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super()._create_begin(schema, context)
scalar = self.scls
if scalar.get_abstract(schema):
return schema
new_domain_name = types.pg_type_from_scalar(schema, scalar)
if types.is_builtin_scalar(schema, scalar):
return schema
enum_values = scalar.get_enum_values(schema)
if enum_values:
new_enum_name = common.get_backend_name(
schema, scalar, catenate=False)
self.pgops.add(dbops.CreateEnum(
dbops.Enum(name=new_enum_name, values=enum_values)))
base = q(*new_enum_name)
else:
base = types.get_scalar_base(schema, scalar)
if self.is_sequence(schema, scalar):
seq_name = common.get_backend_name(
schema, scalar, catenate=False, aspect='sequence')
self.pgops.add(dbops.CreateSequence(name=seq_name))
domain = dbops.Domain(name=new_domain_name, base=base)
self.pgops.add(dbops.CreateDomain(domain=domain))
default = self.get_resolved_attribute_value(
'default',
schema=schema,
context=context,
)
if (default is not None
and not isinstance(default, s_expr.Expression)):
# We only care to support literal defaults here. Supporting
# defaults based on queries has no sense on the database
# level since the database forbids queries for DEFAULT and
# pre- calculating the value does not make sense either
# since the whole point of query defaults is for them to be
# dynamic.
self.pgops.add(
dbops.AlterDomainAlterDefault(
name=new_domain_name, default=default))
return schema
class RenameScalarType(ScalarTypeMetaCommand, RenameObject,
adapts=s_scalars.RenameScalarType):
pass
class RebaseScalarType(ScalarTypeMetaCommand,
adapts=s_scalars.RebaseScalarType):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
# Actual rebase is taken care of in AlterScalarType
schema = ScalarTypeMetaCommand.apply(self, schema, context)
return s_scalars.RebaseScalarType.apply(self, schema, context)
class AlterScalarType(ScalarTypeMetaCommand, adapts=s_scalars.AlterScalarType):
def _get_problematic_refs(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
*,
composite_only: bool,
) -> Optional[Tuple[
Tuple[so.Object, ...],
List[Tuple[s_props.Property, s_types.TypeShell]],
]]:
"""Find problematic references to this scalar type that need handled.
This is used to work around two irritating limitations of Postgres:
1. That elements of enum types may not be removed or reordered
2. That a constraint may not be added to a domain type if that
domain type appears in a *composite* type that is used in a
column somewhere.
We don't want to have these limitations, and we need to do a decent
amount of work to work around them.
1. Find all of the affected properties. For case 2, this is any
property whose type is a container type that contains this
scalar. (Possibly transitively.) For case 1, the container type
restriction is dropped.
2. Change the type of all offending properties to an equivalent type
that does not reference this scalar. This may require creating
new types. (See _undo_everything.)
3. Add the constraint.
4. Restore the type of all offending properties. If existing data
violates the new constraint, we will fail here. Delete any
temporarily created types. (See _redo_everything.)
Somewhat hackily, _undo_everything and _redo_everything
operate by creating new schema delta command objects, and
adapting and applying them. This is the most straightforward
way to perform the high-level operations needed here.
I've kept this code in pgsql/delta instead of trying to put in
schema/delta because it is pretty aggressively an irritating
pgsql implementation detail and because I didn't want it to
have to interact with ordering ever.
This function finds all of the relevant properties and returns
a list of them along with the appropriate replacement type.
In case 1, it also finds other referencing objects which need
to be deleted and then recreated.
"""
seen_props = set()
seen_other = set()
typ = self.scls
# Do a worklist driven search for properties that refer to this scalar
# through a collection type. We search backwards starting from
# referring collection types or from all refs, depending on
# composite_only.
scls_type = s_types.Collection if composite_only else None
wl = list(schema.get_referrers(typ, scls_type=scls_type))
while wl:
obj = wl.pop()
if isinstance(obj, s_props.Property):
seen_props.add(obj)
elif isinstance(obj, s_scalars.ScalarType):
pass
elif isinstance(obj, s_types.Collection):
wl.extend(schema.get_referrers(obj))
elif isinstance(obj, s_funcs.Parameter) and not composite_only:
wl.extend(schema.get_referrers(obj))
elif isinstance(obj, s_funcs.Function) and not composite_only:
wl.extend(schema.get_referrers(obj))
seen_other.add(obj)
elif isinstance(obj, s_constr.Constraint) and not composite_only:
seen_other.add(obj)
elif isinstance(obj, s_indexes.Index) and not composite_only:
seen_other.add(obj)
if not seen_props and not seen_other:
return None
props = []
if seen_props:
# Find a concrete ancestor to substitute in.
if typ.is_enum(schema):
ancestor = schema.get(sn.QualName('std', 'str'))
else:
for ancestor in typ.get_ancestors(schema).objects(schema):
if not ancestor.get_abstract(schema):
break
else:
raise AssertionError("can't find concrete base for scalar")
replacement_shell = ancestor.as_shell(schema)
props = [
(
prop,
s_utils.type_shell_substitute(
typ.get_name(schema),
replacement_shell,
prop.get_target(schema).as_shell(schema))
)
for prop in seen_props
]
other = sd.sort_by_cross_refs(schema, seen_other)
return other, props
def _undo_everything(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
other: Tuple[so.Object, ...],
props: List[Tuple[s_props.Property, s_types.TypeShell]],
) -> s_schema.Schema:
"""Rewrite the type of everything that uses this scalar dangerously.
See _get_problematic_refs above for details.
"""
# First we need to strip out any default value that might reference
# one of the functions we are going to delete.
cmd = sd.CommandGroup()
for prop, _ in props:
if prop.get_default(schema):
delta_alter, cmd_alter, alter_context = prop.init_delta_branch(
schema, context, cmdtype=sd.AlterObject)
cmd_alter.set_attribute_value('default', None)
cmd.add(delta_alter)
acmd = CommandMeta.adapt(cmd)
schema = acmd.apply(schema, context)
self.pgops.update(acmd.get_subcommands())
for obj in other:
if isinstance(obj, s_funcs.Function):
# Force function deletions at the SQL level without ever
# bothering to remove them from our schema.
fc = FunctionCommand()
variadic = obj.get_params(schema).find_variadic(schema)
self.pgops.add(
dbops.DropFunction(
name=fc.get_pgname(obj, schema),
args=fc.compile_args(obj, schema),
has_variadic=variadic is not None,
)
)
elif isinstance(obj, s_constr.Constraint):
self.pgops.add(
ConstraintCommand.delete_constraint(obj, schema, context))
elif isinstance(obj, s_indexes.Index):
self.pgops.add(
DeleteIndex.delete_index(
obj, schema, context, priority=0))
cmd = sd.DeltaRoot()
for prop, new_typ in props:
try:
cmd.add(new_typ.as_create_delta(schema))
except NotImplementedError:
pass
delta_alter, cmd_alter, alter_context = prop.init_delta_branch(
schema, context, cmdtype=sd.AlterObject)
cmd_alter.set_attribute_value('target', new_typ)
cmd_alter.set_attribute_value('default', None)
cmd.add(delta_alter)
cmd.apply(schema, context)
for sub in cmd.get_subcommands():
acmd = CommandMeta.adapt(sub)
schema = acmd.apply(schema, context)
self.pgops.add(acmd)
return schema
def _redo_everything(
self,
schema: s_schema.Schema,
orig_schema: s_schema.Schema,
context: sd.CommandContext,
other: Tuple[so.Object, ...],
props: List[Tuple[s_props.Property, s_types.TypeShell]],
) -> s_schema.Schema:
"""Restore the type of everything that uses this scalar dangerously.
See _get_problematic_refs above for details.
"""
for obj in reversed(other):
if isinstance(obj, s_funcs.Function):
# Super hackily recreate the functions
fc = CreateFunction(classname=obj.get_name(schema))
for f in ('language', 'params', 'return_type'):
fc.set_attribute_value(f, obj.get_field_value(schema, f))
self.pgops.update(fc.make_op(obj, schema, context))
elif isinstance(obj, s_constr.Constraint):
self.pgops.add(
ConstraintCommand.create_constraint(obj, schema, context))
elif isinstance(obj, s_indexes.Index):
self.pgops.add(
CreateIndex.create_index(obj, orig_schema, context))
cmd = sd.DeltaRoot()
for prop, new_typ in props:
delta_alter, cmd_alter, alter_context = prop.init_delta_branch(
schema, context, cmdtype=sd.AlterObject)
cmd_alter.set_attribute_value(
'target', prop.get_target(orig_schema))
cmd_alter.set_attribute_value(
'default', prop.get_default(orig_schema))
cmd.add_prerequisite(delta_alter)
rnew_typ = new_typ.resolve(schema)
if delete := rnew_typ.as_type_delete_if_dead(schema):
cmd.add(delete)
# do an apply of the schema-level command to force it to canonicalize,
# which prunes out duplicate deletions
cmd.apply(schema, context)
for sub in cmd.get_subcommands():
acmd = CommandMeta.adapt(sub)
schema = acmd.apply(schema, context)
self.pgops.add(acmd)
return schema
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
schema = s_scalars.AlterScalarType.apply(self, schema, context)
new_scalar = self.scls
old_enum_values = new_scalar.get_enum_values(orig_schema)
new_enum_values = new_scalar.get_enum_values(schema)
# If values were deleted or reordered, we need to drop the enum
# and recreate it.
needs_recreate = (
old_enum_values != new_enum_values
and old_enum_values != new_enum_values[:len(old_enum_values)])
has_create_constraint = bool(
list(self.get_subcommands(type=s_constr.CreateConstraint)))
problematic_refs = None
if needs_recreate or has_create_constraint:
problematic_refs = self._get_problematic_refs(
schema, context, composite_only=not needs_recreate)
if problematic_refs:
other, props = problematic_refs
schema = self._undo_everything(schema, context, other, props)
schema = ScalarTypeMetaCommand.apply(self, schema, context)
if new_enum_values:
type_name = common.get_backend_name(
schema, new_scalar, catenate=False)
if needs_recreate:
self.pgops.add(
dbops.DropEnum(name=type_name))
self.pgops.add(dbops.CreateEnum(
dbops.Enum(name=type_name, values=new_enum_values)))
elif old_enum_values != new_enum_values:
old_idx = 0
old_enum_values = list(old_enum_values)
for v in new_enum_values:
if old_idx >= len(old_enum_values):
self.pgops.add(
dbops.AlterEnumAddValue(
type_name, v,
)
)
elif v != old_enum_values[old_idx]:
self.pgops.add(
dbops.AlterEnumAddValue(
type_name, v, before=old_enum_values[old_idx],
)
)
old_enum_values.insert(old_idx, v)
else:
old_idx += 1
if problematic_refs:
other, props = problematic_refs
schema = self._redo_everything(
schema, orig_schema, context, other, props)
default_delta = self.get_resolved_attribute_value(
'default',
schema=schema,
context=context,
)
if default_delta:
if (default_delta is None or
isinstance(default_delta, s_expr.Expression)):
new_default = None
else:
new_default = default_delta
domain_name = common.get_backend_name(
schema, new_scalar, catenate=False)
adad = dbops.AlterDomainAlterDefault(
name=domain_name, default=new_default)
self.pgops.add(adad)
return schema
class DeleteScalarType(ScalarTypeMetaCommand,
adapts=s_scalars.DeleteScalarType):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
schema = s_scalars.DeleteScalarType.apply(self, schema, context)
scalar = self.scls
schema = ScalarTypeMetaCommand.apply(self, schema, context)
link = None
if context:
link = context.get(s_links.LinkCommandContext)
ops = link.op.pgops if link else self.pgops
old_domain_name = common.get_backend_name(
orig_schema, scalar, catenate=False)
# Domain dropping gets low priority since other things may
# depend on it.
if scalar.is_enum(orig_schema):
old_enum_name = common.get_backend_name(
orig_schema, scalar, catenate=False)
cond = dbops.EnumExists(old_enum_name)
ops.add(
dbops.DropEnum(
name=old_enum_name, conditions=[cond], priority=3))
else:
cond = dbops.DomainExists(old_domain_name)
ops.add(
dbops.DropDomain(
name=old_domain_name, conditions=[cond], priority=3))
if self.is_sequence(orig_schema, scalar):
seq_name = common.get_backend_name(
orig_schema, scalar, catenate=False, aspect='sequence')
self.pgops.add(dbops.DropSequence(name=seq_name))
return schema
class CompositeObjectMetaCommand(ObjectMetaCommand):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.table_name = None
self._multicommands = {}
self.update_search_indexes = None
def _get_multicommand(
self, context, cmdtype, object_name, *, priority=0,
force_new=False, manual=False, cmdkwargs=None):
if cmdkwargs is None:
cmdkwargs = {}
key = (object_name, priority, frozenset(cmdkwargs.items()))
try:
typecommands = self._multicommands[cmdtype]
except KeyError:
typecommands = self._multicommands[cmdtype] = {}
commands = typecommands.get(key)
if commands is None or force_new or manual:
command = cmdtype(object_name, priority=priority, **cmdkwargs)
if not manual:
try:
commands = typecommands[key]
except KeyError:
commands = typecommands[key] = []
commands.append(command)
else:
command = commands[-1]
return command
def _attach_multicommand(self, context, cmdtype):
try:
typecommands = self._multicommands[cmdtype]
except KeyError:
return
else:
commands = list(
itertools.chain.from_iterable(typecommands.values()))
if commands:
commands = sorted(commands, key=lambda i: i.priority)
self.pgops.update(commands)
def get_alter_table(
self, schema, context, priority=0, force_new=False,
contained=False, manual=False, table_name=None):
tabname = table_name if table_name else self.table_name
if not tabname:
ctx = context.get(self.__class__)
assert ctx
tabname = common.get_backend_name(schema, ctx.scls, catenate=False)
if table_name is None:
self.table_name = tabname
return self._get_multicommand(
context, dbops.AlterTable, tabname, priority=priority,
force_new=force_new, manual=manual,
cmdkwargs={'contained': contained})
def attach_alter_table(self, context):
self._attach_multicommand(context, dbops.AlterTable)
@classmethod
def get_source_and_pointer_ctx(cls, schema, context):
if context:
objtype = context.get(s_objtypes.ObjectTypeCommandContext)
link = context.get(s_links.LinkCommandContext)
else:
objtype = link = None
if objtype:
source, pointer = objtype, link
elif link:
property = context.get(s_props.PropertyCommandContext)
source, pointer = link, property
else:
source = pointer = None
return source, pointer
def schedule_inhviews_update(
self,
schema,
context,
obj,
*,
update_ancestors: Optional[bool]=None,
update_descendants: Optional[bool]=None,
):
self.pgops.add(
self.drop_inhview(
schema, context, obj, drop_ancestors=update_ancestors)
)
root = context.get(sd.DeltaRootContext).op
updates = root.update_inhviews.view_updates
update = updates.get(obj)
if update is None:
update = updates[obj] = InheritanceViewUpdate()
if update_ancestors is not None:
update.update_ancestors = update_ancestors
if update_descendants is not None:
update.update_descendants = update_descendants
def schedule_inhview_deletion(
self,
schema,
context,
obj,
):
root = context.get(sd.DeltaRootContext).op
updates = root.update_inhviews.view_updates
updates.pop(obj, None)
deletions = root.update_inhviews.view_deletions
deletions[obj] = schema
def update_base_inhviews(self, schema, context, obj):
for base in obj.get_bases(schema).objects(schema):
if not context.is_deleting(base):
self.schedule_inhviews_update(
schema, context, base, update_ancestors=True)
def update_lineage_inhviews(self, schema, context, obj):
self.schedule_inhviews_update(
schema, context, obj, update_ancestors=True)
def update_base_inhviews_on_rebase(
self,
schema,
orig_schema,
context,
obj,
):
bases = set(obj.get_bases(schema).objects(schema))
orig_bases = set(obj.get_bases(orig_schema).objects(orig_schema))
for new_base in bases - orig_bases:
self.schedule_inhviews_update(
schema, context, new_base, update_ancestors=True)
for old_base in orig_bases - bases:
self.schedule_inhviews_update(
schema, context, old_base, update_ancestors=True)
def drop_inhview(
self,
schema,
context,
obj,
*,
drop_ancestors=False,
) -> dbops.CommandGroup:
cmd = dbops.CommandGroup()
objs = [obj]
if drop_ancestors:
objs.extend(obj.get_ancestors(schema).objects(schema))
for obj in objs:
if not has_table(obj, schema):
continue
inhview_name = common.get_backend_name(
schema, obj, catenate=False, aspect='inhview')
cmd.add_command(
dbops.DropView(
inhview_name,
conditions=[dbops.ViewExists(inhview_name)],
),
)
return cmd
class IndexCommand(sd.ObjectCommand, metaclass=CommandMeta):
pass
class CreateIndex(IndexCommand, CreateObject, adapts=s_indexes.CreateIndex):
@classmethod
def create_index(cls, index, schema, context):
subject = index.get_subject(schema)
if not isinstance(subject, s_pointers.Pointer):
singletons = [subject]
path_prefix_anchor = ql_ast.Subject().name
else:
singletons = []
path_prefix_anchor = None
index_expr = index.get_expr(schema)
ir = index_expr.irast
if ir is None:
index_expr = type(index_expr).compiled(
index_expr,
schema=schema,
options=qlcompiler.CompilerOptions(
modaliases=context.modaliases,
schema_object_context=cls.get_schema_metaclass(),
anchors={ql_ast.Subject().name: subject},
path_prefix_anchor=path_prefix_anchor,
singletons=singletons,
apply_query_rewrites=not context.stdmode,
),
)
ir = index_expr.irast
table_name = common.get_backend_name(
schema, subject, catenate=False)
sql_tree = compiler.compile_ir_to_sql_tree(
ir.expr, singleton_mode=True)
sql_expr = codegen.SQLSourceGenerator.to_source(sql_tree)
if isinstance(sql_tree, pg_ast.ImplicitRowExpr):
# Trim the parentheses to avoid PostgreSQL choking on double
# parentheses. since it expects only a single set around the column
# list.
sql_expr = sql_expr[1:-1]
module_name = index.get_name(schema).module
index_name = common.get_index_backend_name(
index.id, module_name, catenate=False)
pg_index = dbops.Index(
name=index_name[1], table_name=table_name, expr=sql_expr,
unique=False, inherit=True,
metadata={'schemaname': str(index.get_name(schema))})
return dbops.CreateIndex(pg_index, priority=3)
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = CreateObject.apply(self, schema, context)
index = self.scls
self.pgops.add(self.create_index(index, schema, context))
return schema
class RenameIndex(IndexCommand, RenameObject, adapts=s_indexes.RenameIndex):
pass
class AlterIndexOwned(
IndexCommand,
AlterObject,
adapts=s_indexes.AlterIndexOwned,
):
pass
class AlterIndex(IndexCommand, AlterObject, adapts=s_indexes.AlterIndex):
pass
class DeleteIndex(IndexCommand, DeleteObject, adapts=s_indexes.DeleteIndex):
@classmethod
def delete_index(cls, index, schema, context, priority=3):
subject = index.get_subject(schema)
table_name = common.get_backend_name(
schema, subject, catenate=False)
module_name = index.get_name(schema).module
orig_idx_name = common.get_index_backend_name(
index.id, module_name, catenate=False)
index = dbops.Index(
name=orig_idx_name[1], table_name=table_name, inherit=True)
index_exists = dbops.IndexExists(
(table_name[0], index.name_in_catalog))
conditions = (index_exists, ) if priority else ()
return dbops.DropIndex(
index, priority=priority, conditions=conditions)
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
schema = DeleteObject.apply(self, schema, context)
index = self.scls
source = context.get(s_links.LinkCommandContext)
if not source:
source = context.get(s_objtypes.ObjectTypeCommandContext)
if not isinstance(source.op, sd.DeleteObject):
# We should not drop indexes when the host is being dropped since
# the indexes are dropped automatically in this case.
#
self.pgops.add(self.delete_index(index, orig_schema, context))
return schema
class RebaseIndex(
IndexCommand, RebaseObject,
adapts=s_indexes.RebaseIndex):
pass
class CreateUnionType(
MetaCommand,
adapts=s_types.CreateUnionType,
metaclass=CommandMeta,
):
def apply(self, schema, context):
schema = self.__class__.get_adaptee().apply(self, schema, context)
schema = ObjectMetaCommand.apply(self, schema, context)
return schema
class ObjectTypeMetaCommand(AliasCapableObjectMetaCommand,
CompositeObjectMetaCommand):
def schedule_endpoint_delete_action_update(self, obj, schema, context):
endpoint_delete_actions = context.get(
sd.DeltaRootContext).op.update_endpoint_delete_actions
changed_targets = endpoint_delete_actions.changed_targets
changed_targets.add((self, obj))
class CreateObjectType(ObjectTypeMetaCommand,
adapts=s_objtypes.CreateObjectType):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_objtypes.CreateObjectType.apply(self, schema, context)
schema = ObjectTypeMetaCommand.apply(self, schema, context)
objtype = self.scls
if objtype.is_compound_type(schema) or objtype.get_is_derived(schema):
return schema
self.update_lineage_inhviews(schema, context, objtype)
self.attach_alter_table(context)
if self.update_search_indexes:
schema = self.update_search_indexes.apply(schema, context)
self.pgops.add(self.update_search_indexes)
self.schedule_endpoint_delete_action_update(self.scls, schema, context)
return schema
def _create_begin(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = super()._create_begin(schema, context)
objtype = self.scls
if objtype.is_compound_type(schema) or objtype.get_is_derived(schema):
return schema
new_table_name = common.get_backend_name(
schema, self.scls, catenate=False)
self.table_name = new_table_name
columns = []
token_col = dbops.Column(
name='__edb_token', type='uuid', required=False)
columns.append(token_col)
objtype_table = dbops.Table(name=new_table_name, columns=columns)
self.pgops.add(dbops.CreateTable(table=objtype_table))
self.pgops.add(dbops.Comment(
object=objtype_table,
text=str(objtype.get_verbosename(schema)),
))
return schema
class RenameObjectType(ObjectTypeMetaCommand, RenameObject,
adapts=s_objtypes.RenameObjectType):
pass
class RebaseObjectType(ObjectTypeMetaCommand,
adapts=s_objtypes.RebaseObjectType):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
schema = s_objtypes.RebaseObjectType.apply(self, schema, context)
result = self.scls
schema = ObjectTypeMetaCommand.apply(self, schema, context)
if has_table(result, schema):
self.update_base_inhviews_on_rebase(
schema, orig_schema, context, self.scls)
self.schedule_endpoint_delete_action_update(self.scls, schema, context)
return schema
class AlterObjectType(ObjectTypeMetaCommand,
adapts=s_objtypes.AlterObjectType):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_objtypes.AlterObjectType.apply(
self, schema, context=context)
objtype = self.scls
self.table_name = common.get_backend_name(
schema, objtype, catenate=False)
schema = ObjectTypeMetaCommand.apply(self, schema, context)
if has_table(objtype, schema):
self.attach_alter_table(context)
if self.update_search_indexes:
schema = self.update_search_indexes.apply(schema, context)
self.pgops.add(self.update_search_indexes)
return schema
class DeleteObjectType(ObjectTypeMetaCommand,
adapts=s_objtypes.DeleteObjectType):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
self.scls = objtype = schema.get(self.classname)
old_table_name = common.get_backend_name(
schema, objtype, catenate=False)
orig_schema = schema
schema = ObjectTypeMetaCommand.apply(self, schema, context)
schema = s_objtypes.DeleteObjectType.apply(self, schema, context)
if has_table(objtype, orig_schema):
self.attach_alter_table(context)
self.pgops.add(dbops.DropTable(name=old_table_name, priority=3))
self.update_base_inhviews(orig_schema, context, objtype)
self.schedule_inhview_deletion(orig_schema, context, objtype)
return schema
class SchedulePointerCardinalityUpdate(MetaCommand):
pass
class CancelPointerCardinalityUpdate(MetaCommand):
pass
class PointerMetaCommand(MetaCommand, sd.ObjectCommand,
metaclass=CommandMeta):
def get_host(self, schema, context):
if context:
link = context.get(s_links.LinkCommandContext)
if link and isinstance(self, s_props.PropertyCommand):
return link
objtype = context.get(s_objtypes.ObjectTypeCommandContext)
if objtype:
return objtype
def alter_host_table_column(self, ptr, schema, orig_schema, context):
old_target = ptr.get_target(orig_schema)
new_target = ptr.get_target(schema)
alter_table = context.get(
s_objtypes.ObjectTypeCommandContext).op.get_alter_table(
schema, context, priority=1)
ptr_stor_info = types.get_pointer_storage_info(ptr, schema=schema)
if isinstance(new_target, s_scalars.ScalarType):
target_type = types.pg_type_from_object(schema, new_target)
if isinstance(old_target, s_scalars.ScalarType):
alter_type = dbops.AlterTableAlterColumnType(
ptr_stor_info.column_name, common.qname(*target_type))
alter_table.add_operation(alter_type)
else:
cols = self.get_columns(ptr, schema)
ops = [dbops.AlterTableAddColumn(col) for col in cols]
for op in ops:
alter_table.add_operation(op)
else:
col = dbops.Column(
name=ptr_stor_info.column_name,
type=ptr_stor_info.column_type)
alter_table.add_operation(dbops.AlterTableDropColumn(col))
def get_pointer_default(self, ptr, schema, context):
if ptr.is_pure_computable(schema):
return None
default = self.get_resolved_attribute_value(
'default',
schema=schema,
context=context,
)
default_value = None
if default is not None:
if isinstance(default, s_expr.Expression):
default_value = schemamech.ptr_default_to_col_default(
schema, ptr, default)
else:
default_value = common.quote_literal(
str(default))
elif (tgt := ptr.get_target(schema)) and tgt.issubclass(
schema, schema.get('std::sequence')):
# TODO: replace this with a generic scalar type default
# using std::nextval().
seq_name = common.quote_literal(
common.get_backend_name(
schema, ptr.get_target(schema), aspect='sequence'))
default_value = f'nextval({seq_name}::regclass)'
return default_value
def alter_pointer_default(self, pointer, orig_schema, schema, context):
default_value = self.get_pointer_default(pointer, schema, context)
if default_value is None and not (
not orig_schema
or pointer.get_default(orig_schema)
or (tgt := pointer.get_target(orig_schema)) and tgt.issubclass(
orig_schema, schema.get('std::sequence'))
):
return
source_ctx = context.get_ancestor(
s_sources.SourceCommandContext, self)
alter_table = source_ctx.op.get_alter_table(
schema, context, contained=True, priority=0)
ptr_stor_info = types.get_pointer_storage_info(
pointer, schema=schema)
alter_table.add_operation(
dbops.AlterTableAlterColumnDefault(
column_name=ptr_stor_info.column_name,
default=default_value))
@classmethod
def get_columns(cls, pointer, schema, default=None, sets_required=False):
ptr_stor_info = types.get_pointer_storage_info(pointer, schema=schema)
col_type = list(ptr_stor_info.column_type)
if col_type[-1].endswith('[]'):
# Array
col_type[-1] = col_type[-1][:-2]
col_type = common.qname(*col_type) + '[]'
else:
col_type = common.qname(*col_type)
return [
dbops.Column(
name=ptr_stor_info.column_name,
type=col_type,
required=(
(
pointer.get_required(schema)
and not pointer.is_pure_computable(schema)
and not sets_required
) or (
ptr_stor_info.table_type == 'link'
and not pointer.is_link_property(schema)
)
),
default=default,
comment=str(pointer.get_shortname(schema)),
),
]
def create_table(self, ptr, schema, context, conditional=False):
c = self._create_table(ptr, schema, context, conditional=conditional)
self.pgops.add(c)
def provide_table(self, ptr, schema, context):
if has_table(ptr, schema):
self.create_table(ptr, schema, context, conditional=True)
self.update_lineage_inhviews(schema, context, ptr)
return True
else:
return False
def _alter_pointer_cardinality(
self,
schema: s_schema.Schema,
orig_schema: s_schema.Schema,
context: sd.CommandContext,
) -> None:
ptr = self.scls
ptr_stor_info = types.get_pointer_storage_info(ptr, schema=schema)
old_ptr_stor_info = types.get_pointer_storage_info(
ptr, schema=orig_schema)
ptr_table = ptr_stor_info.table_type == 'link'
is_lprop = ptr.is_link_property(schema)
is_multi = ptr_table and not is_lprop
is_required = ptr.get_required(schema)
ref_op = self.get_referrer_context_or_die(context).op
if is_multi:
if isinstance(self, sd.AlterObjectFragment):
source_op = self.get_parent_op(context)
else:
source_op = self
else:
source_op = ref_op
# Ignore cardinality changes resulting from the creation of
# an overloaded pointer as there is no data yet.
if isinstance(source_op, sd.CreateObject):
return
if self.conv_expr is not None:
_, conv_sql_expr, orig_rel_alias, _ = (
self._compile_conversion_expr(
pointer=ptr,
conv_expr=self.conv_expr,
schema=schema,
orig_schema=orig_schema,
context=context,
orig_rel_is_always_source=True,
target_as_singleton=False,
)
)
if is_lprop:
obj_id_ref = f'{qi(orig_rel_alias)}.source'
else:
obj_id_ref = f'{qi(orig_rel_alias)}.id'
if is_required and not is_multi:
conv_sql_expr = textwrap.dedent(f'''\
edgedb.raise_on_null(
({conv_sql_expr}),
'not_null_violation',
msg => 'missing value for required property',
detail => '{{"object_id": "' || {obj_id_ref} || '"}}',
"column" => {ql(str(ptr.id))}
)
''')
else:
orig_rel_alias = f'alias_{uuidgen.uuid1mc()}'
if not is_multi:
raise AssertionError(
'explicit conversion expression was expected'
' for multi->single transition'
)
else:
# single -> multi
conv_sql_expr = (
f'SELECT '
f'{qi(orig_rel_alias)}.{qi(old_ptr_stor_info.column_name)}'
)
tab = q(*ptr_stor_info.table_name)
target_col = ptr_stor_info.column_name
if not is_multi:
# Moving from pointer table to source table.
cols = self.get_columns(ptr, schema)
alter_table = source_op.get_alter_table(
schema, context, manual=True)
for col in cols:
cond = dbops.ColumnExists(
ptr_stor_info.table_name,
column_name=col.name,
)
op = (dbops.AlterTableAddColumn(col), None, (cond, ))
alter_table.add_operation(op)
self.pgops.add(alter_table)
update_qry = textwrap.dedent(f'''\
UPDATE {tab} AS {qi(orig_rel_alias)}
SET {qi(target_col)} = ({conv_sql_expr})
''')
self.pgops.add(dbops.Query(update_qry))
if not has_table(ptr, schema):
self.pgops.add(
self.drop_inhview(
orig_schema,
context,
source_op.scls,
drop_ancestors=True,
),
)
self.pgops.add(
self.drop_inhview(
orig_schema,
context,
ptr,
drop_ancestors=True
),
)
otabname = common.get_backend_name(
orig_schema, ptr, catenate=False)
condition = dbops.TableExists(name=otabname)
dt = dbops.DropTable(name=otabname, conditions=[condition])
self.pgops.add(dt)
self.schedule_inhviews_update(
schema,
context,
source_op.scls,
update_descendants=True,
)
else:
# Moving from source table to pointer table.
self.provide_table(ptr, schema, context)
source = ptr.get_source(orig_schema)
src_tab = q(*common.get_backend_name(
orig_schema,
source,
catenate=False,
))
update_qry = textwrap.dedent(f'''\
INSERT INTO {tab} (source, target)
(
SELECT
{qi(orig_rel_alias)}.id,
q.val
FROM
{src_tab} AS {qi(orig_rel_alias)},
LATERAL (
{conv_sql_expr}
) AS q(val)
WHERE
q.val IS NOT NULL
)
ON CONFLICT (source, target) DO NOTHING
''')
self.pgops.add(dbops.Query(update_qry))
check_qry = textwrap.dedent(f'''\
SELECT
edgedb.raise(
NULL::text,
'not_null_violation',
msg => 'missing value for required property',
detail => '{{"object_id": "' || id || '"}}',
"column" => {ql(str(ptr.id))}
)
FROM {src_tab}
WHERE id != ALL (SELECT source FROM {tab})
LIMIT 1
INTO _dummy_text;
''')
self.pgops.add(dbops.Query(check_qry))
self.pgops.add(
self.drop_inhview(
orig_schema,
context,
ref_op.scls,
drop_ancestors=True,
),
)
ref_op = self.get_referrer_context_or_die(context).op
alter_table = ref_op.get_alter_table(
schema, context, manual=True)
col = dbops.Column(
name=old_ptr_stor_info.column_name,
type=common.qname(*old_ptr_stor_info.column_type),
)
alter_table.add_operation(dbops.AlterTableDropColumn(col))
self.pgops.add(alter_table)
self.schedule_inhviews_update(
schema,
context,
ref_op.scls,
update_descendants=True,
update_ancestors=True,
)
def _alter_pointer_optionality(
self,
schema: s_schema.Schema,
orig_schema: s_schema.Schema,
context: sd.CommandContext,
*,
fill_expr: Optional[s_expr.Expression],
) -> None:
new_required = self.scls.get_required(schema)
ptr = self.scls
ptr_stor_info = types.get_pointer_storage_info(ptr, schema=schema)
ptr_table = ptr_stor_info.table_type == 'link'
is_lprop = ptr.is_link_property(schema)
is_multi = ptr_table and not is_lprop
is_required = ptr.get_required(schema)
source_ctx = self.get_referrer_context_or_die(context)
source_op = source_ctx.op
# Ignore optionality changes resulting from the creation of
# an overloaded pointer as there is no data yet.
if isinstance(source_op, sd.CreateObject):
return
ops = dbops.CommandGroup(priority=1)
# For multi pointers, if there is no fill expression, we
# synthesize a bogus one so that an error will trip if there
# are any objects with empty values.
if fill_expr is None and is_multi:
if (
ptr.get_cardinality(schema).is_multi()
and fill_expr is None
and (target := ptr.get_target(schema))
):
fill_ast = ql_ast.TypeCast(
expr=ql_ast.Set(elements=[]),
type=s_utils.typeref_to_ast(schema, target),
)
fill_expr = s_expr.Expression.from_ast(
qltree=fill_ast, schema=schema
)
if fill_expr is not None:
_, fill_sql_expr, orig_rel_alias, _ = (
self._compile_conversion_expr(
pointer=ptr,
conv_expr=fill_expr,
schema=schema,
orig_schema=orig_schema,
context=context,
orig_rel_is_always_source=True,
)
)
if is_lprop:
obj_id_ref = f'{qi(orig_rel_alias)}.source'
else:
obj_id_ref = f'{qi(orig_rel_alias)}.id'
if is_required and not is_multi:
fill_sql_expr = textwrap.dedent(f'''\
edgedb.raise_on_null(
({fill_sql_expr}),
'not_null_violation',
msg => 'missing value for required property',
detail => '{{"object_id": "' || {obj_id_ref} || '"}}',
"column" => {ql(str(ptr.id))}
)
''')
tab = q(*ptr_stor_info.table_name)
target_col = ptr_stor_info.column_name
if not is_multi:
# For singleton pointers we simply update the
# requisite column of the host source in every
# row where it is NULL.
update_qry = textwrap.dedent(f'''\
UPDATE {tab} AS {qi(orig_rel_alias)}
SET {qi(target_col)} = ({fill_sql_expr})
WHERE {qi(target_col)} IS NULL
''')
ops.add_command(dbops.Query(update_qry))
else:
# For multi pointers we have to INSERT the
# result of USING into the link table for
# every source object that has _no entries_
# in said link table.
source = ptr.get_source(orig_schema)
src_tab = q(*common.get_backend_name(
orig_schema,
source,
catenate=False,
))
update_qry = textwrap.dedent(f'''\
INSERT INTO {tab} (source, target)
(
SELECT
{qi(orig_rel_alias)}.id,
q.val
FROM
(
SELECT *
FROM {src_tab}
WHERE id != ALL (
SELECT source FROM {tab}
)
) AS {qi(orig_rel_alias)},
LATERAL (
{fill_sql_expr}
) AS q(val)
WHERE
q.val IS NOT NULL
)
''')
ops.add_command(dbops.Query(update_qry))
check_qry = textwrap.dedent(f'''\
SELECT
edgedb.raise(
NULL::text,
'not_null_violation',
msg => 'missing value for required property',
detail => '{{"object_id": "' || id || '"}}',
"column" => {ql(str(ptr.id))}
)
FROM {src_tab}
WHERE id != ALL (SELECT source FROM {tab})
LIMIT 1
INTO _dummy_text;
''')
ops.add_command(dbops.Query(check_qry))
if not ptr_table or is_lprop:
alter_table = source_op.get_alter_table(
schema,
context,
manual=True,
)
alter_table.add_operation(
dbops.AlterTableAlterColumnNull(
column_name=ptr_stor_info.column_name,
null=not new_required,
)
)
ops.add_command(alter_table)
self.pgops.add(ops)
def _drop_constraints(self, pointer, schema, context):
# We need to be able to drop all the constraints referencing a
# pointer before modifying its type, and then recreate them
# once the change is done.
# We look at all referrers to the pointer (and not just the
# constraints directly on the pointer) because we want to
# pick up object constraints that reference it as well.
for cnstr in schema.get_referrers(
pointer, scls_type=s_constr.Constraint):
self.pgops.add(
ConstraintCommand.delete_constraint(cnstr, schema, context))
def _recreate_constraints(self, pointer, schema, context):
for cnstr in schema.get_referrers(
pointer, scls_type=s_constr.Constraint):
self.pgops.add(
ConstraintCommand.create_constraint(cnstr, schema, context))
def _alter_pointer_type(self, pointer, schema, orig_schema, context):
old_ptr_stor_info = types.get_pointer_storage_info(
pointer, schema=orig_schema)
new_target = pointer.get_target(schema)
ptr_table = old_ptr_stor_info.table_type == 'link'
is_link = isinstance(pointer, s_links.Link)
is_lprop = pointer.is_link_property(schema)
is_multi = ptr_table and not is_lprop
is_required = pointer.get_required(schema)
changing_col_type = not is_link
source_ctx = self.get_referrer_context_or_die(context)
if is_multi:
if isinstance(self, sd.AlterObjectFragment):
source_op = self.get_parent_op(context)
else:
source_op = self
else:
source_op = source_ctx.op
# Ignore type narrowing resulting from a creation of a subtype
# as there isn't any data in the link yet.
if is_link and isinstance(source_ctx.op, sd.CreateObject):
return
new_target = pointer.get_target(schema)
orig_target = pointer.get_target(orig_schema)
new_type = types.pg_type_from_object(
schema, new_target, persistent_tuples=True)
source = source_op.scls
using_eql_expr = self.cast_expr
# For links, when the new type is a supertype of the old, no
# SQL-level changes are necessary, unless an explicit conversion
# expression was specified.
if (
is_link
and using_eql_expr is None
and orig_target.issubclass(orig_schema, new_target)
):
return
# We actually have work to do, so drop any constraints we have
self._drop_constraints(pointer, schema, context)
if using_eql_expr is None and not is_link:
# A lack of an explicit EdgeQL conversion expression means
# that the new type is assignment-castable from the old type
# in the EdgeDB schema. BUT, it would not necessarily be
# assignment-castable in Postgres, especially if the types are
# compound. Thus, generate an explicit cast expression.
pname = pointer.get_shortname(schema).name
using_eql_expr = s_expr.Expression.from_ast(
ql_ast.TypeCast(
expr=ql_ast.Path(
partial=True,
steps=[
ql_ast.Ptr(
ptr=ql_ast.ObjectRef(name=pname),
type='property' if is_lprop else None,
),
],
),
type=s_utils.typeref_to_ast(schema, new_target),
),
schema=orig_schema,
)
# There are two major possibilities about the USING claus:
# 1) trivial case, where the USING clause refers only to the
# columns of the source table, in which case we simply compile that
# into an equivalent SQL USING clause, and 2) complex case, which
# supports arbitrary queries, but requires a temporary column,
# which is populated with the transition query and then used as the
# source for the SQL USING clause.
using_eql_expr, using_sql_expr, orig_rel_alias, sql_expr_is_trivial = (
self._compile_conversion_expr(
pointer=pointer,
conv_expr=using_eql_expr,
schema=schema,
orig_schema=orig_schema,
context=context,
)
)
expr_is_nullable = using_eql_expr.cardinality.can_be_zero()
need_temp_col = (
(is_multi and expr_is_nullable)
or (changing_col_type and not sql_expr_is_trivial)
)
if changing_col_type:
self.pgops.add(source_op.drop_inhview(
schema,
context,
source,
drop_ancestors=True,
))
tab = q(*old_ptr_stor_info.table_name)
target_col = old_ptr_stor_info.column_name
aux_ptr_table = None
aux_ptr_col = None
if is_link:
old_lb_ptr_stor_info = types.get_pointer_storage_info(
pointer, link_bias=True, schema=orig_schema)
if (
old_lb_ptr_stor_info is not None
and old_lb_ptr_stor_info.table_type == 'link'
):
aux_ptr_table = old_lb_ptr_stor_info.table_name
aux_ptr_col = old_lb_ptr_stor_info.column_name
if not sql_expr_is_trivial:
if need_temp_col:
alter_table = source_op.get_alter_table(
schema, context, priority=0, force_new=True, manual=True)
temp_column = dbops.Column(
name=f'??{pointer.id}_{common.get_unique_random_name()}',
type=qt(new_type),
)
alter_table.add_operation(
dbops.AlterTableAddColumn(temp_column))
self.pgops.add(alter_table)
target_col = temp_column.name
if is_multi:
obj_id_ref = f'{qi(orig_rel_alias)}.source'
else:
obj_id_ref = f'{qi(orig_rel_alias)}.id'
if is_required and not is_multi:
using_sql_expr = textwrap.dedent(f'''\
edgedb.raise_on_null(
({using_sql_expr}),
'not_null_violation',
msg => 'missing value for required property',
detail => '{{"object_id": "' || {obj_id_ref} || '"}}',
"column" => {ql(str(pointer.id))}
)
''')
update_qry = textwrap.dedent(f'''\
UPDATE {tab} AS {qi(orig_rel_alias)}
SET {qi(target_col)} = ({using_sql_expr})
''')
self.pgops.add(dbops.Query(update_qry))
actual_using_expr = qi(target_col)
else:
actual_using_expr = using_sql_expr
if changing_col_type or need_temp_col:
alter_table = source_op.get_alter_table(
schema, context, priority=0, force_new=True, manual=True)
if is_multi:
# Remove all rows where the conversion expression produced NULLs.
col = qi(target_col)
if pointer.get_required(schema):
clean_nulls = dbops.Query(textwrap.dedent(f'''\
WITH d AS (
DELETE FROM {tab} WHERE {col} IS NULL RETURNING source
)
SELECT
edgedb.raise(
NULL::text,
'not_null_violation',
msg => 'missing value for required property',
detail => '{{"object_id": "' || l.source || '"}}',
"column" => {ql(str(pointer.id))}
)
FROM
{tab} AS l
WHERE
l.source IN (SELECT source FROM d)
AND True = ALL (
SELECT {col} IS NULL
FROM {tab} AS l2
WHERE l2.source = l.source
)
LIMIT
1
INTO _dummy_text;
'''))
else:
clean_nulls = dbops.Query(textwrap.dedent(f'''\
DELETE FROM {tab} WHERE {col} IS NULL
'''))
self.pgops.add(clean_nulls)
elif aux_ptr_table is not None:
# SINGLE links with link properties are represented in
# _two_ tables (the host type table and a link table with
# properties), and we must update both.
actual_col = qi(old_ptr_stor_info.column_name)
if expr_is_nullable and not is_required:
cleanup_qry = textwrap.dedent(f'''\
DELETE FROM {q(*aux_ptr_table)} AS aux
USING {tab} AS main
WHERE
main.id = aux.source
AND {actual_col} IS NULL
''')
self.pgops.add(dbops.Query(cleanup_qry))
update_qry = textwrap.dedent(f'''\
UPDATE {q(*aux_ptr_table)} AS aux
SET {qi(aux_ptr_col)} = main.{actual_col}
FROM {tab} AS main
WHERE
main.id = aux.source
''')
self.pgops.add(dbops.Query(update_qry))
if changing_col_type:
alter_type = dbops.AlterTableAlterColumnType(
old_ptr_stor_info.column_name,
common.quote_type(new_type),
using_expr=actual_using_expr,
)
alter_table.add_operation(alter_type)
elif need_temp_col:
move_data = dbops.Query(textwrap.dedent(f'''\
UPDATE
{q(*old_ptr_stor_info.table_name)} AS {qi(orig_rel_alias)}
SET
{qi(old_ptr_stor_info.column_name)} = ({qi(target_col)})
'''))
self.pgops.add(move_data)
if need_temp_col:
alter_table.add_operation(dbops.AlterTableDropColumn(temp_column))
if changing_col_type or need_temp_col:
self.pgops.add(alter_table)
self._recreate_constraints(pointer, schema, context)
if changing_col_type:
self.schedule_inhviews_update(
schema,
context,
source,
update_descendants=True,
update_ancestors=True,
)
def _compile_conversion_expr(
self,
*,
pointer: s_pointers.Pointer,
conv_expr: s_expr.Expression,
schema: s_schema.Schema,
orig_schema: s_schema.Schema,
context: sd.CommandContext,
orig_rel_is_always_source: bool = False,
target_as_singleton: bool = True,
) -> Tuple[
s_expr.Expression, # Possibly-amended EdgeQL conversion expression
str, # SQL text
str, # original relation alias
bool, # whether SQL expression is trivial
]:
old_ptr_stor_info = types.get_pointer_storage_info(
pointer, schema=orig_schema)
ptr_table = old_ptr_stor_info.table_type == 'link'
is_link = isinstance(pointer, s_links.Link)
is_lprop = pointer.is_link_property(schema)
is_multi = ptr_table and not is_lprop
is_required = pointer.get_required(schema)
new_target = pointer.get_target(schema)
expr_is_trivial = False
if conv_expr.irast is not None:
ir = conv_expr.irast
else:
conv_expr = self._compile_expr(
orig_schema,
context,
conv_expr,
target_as_singleton=target_as_singleton,
)
ir = conv_expr.irast
assert ir is not None
if ir.stype != new_target and not is_link:
# The result of an EdgeQL USING clause does not match
# the target type exactly, but is castable. Like in the
# case of an empty USING clause, we still have to make
# ane explicit EdgeQL cast rather than rely on Postgres
# casting.
conv_expr = self._compile_expr(
orig_schema,
context,
s_expr.Expression.from_ast(
ql_ast.TypeCast(
expr=conv_expr.qlast,
type=s_utils.typeref_to_ast(schema, new_target),
),
schema=orig_schema,
),
target_as_singleton=target_as_singleton,
)
ir = conv_expr.irast
expr_is_nullable = conv_expr.cardinality.can_be_zero()
refs = irutils.get_longest_paths(ir.expr)
ref_tables = schemamech.get_ref_storage_info(ir.schema, refs)
local_table_only = all(
t == old_ptr_stor_info.table_name
for t in ref_tables
)
# TODO: implement IR complexity inference
can_translate_to_sql_value_expr = False
expr_is_trivial = (
# Only allow trivial USING if we can compile the
# EdgeQL expression into a trivial SQL value expression.
can_translate_to_sql_value_expr
# No link expr is trivially translatable into
# a USING SQL clause.
and not is_link
# SQL SET TYPE cannot contain references
# outside of the local table.
and local_table_only
# Changes to a multi-pointer might involve contraction of
# the overall cardinality, i.e. the deletion some rows.
and not is_multi
# If the property is required, and the USING expression
# was not proven by the compiler to not return ZERO, we
# must inject an explicit NULL guard, as the SQL null
# violation error is very nondescript in the context of
# a table rewrite, making it hard to pinpoint the failing
# object.
and (not is_required or not expr_is_nullable)
)
alias = f'alias_{uuidgen.uuid1mc()}'
if not expr_is_trivial:
# Non-trivial conversion expression means that we
# are compiling a full-blown EdgeQL statement as
# opposed to compiling a scalar fragment in trivial
# expression mode.
external_rvars = {}
if is_lprop:
tgt_path_id = irpathid.PathId.from_pointer(
orig_schema,
pointer,
).src_path()
else:
tgt_path_id = irpathid.PathId.from_pointer(
orig_schema,
pointer,
)
ptr_path_id = tgt_path_id.ptr_path()
src_path_id = ptr_path_id.src_path()
if ptr_table and not orig_rel_is_always_source:
rvar = compiler.new_external_rvar(
rel_name=(alias,),
path_id=ptr_path_id,
outputs={
(src_path_id, ('identity',)): 'source',
},
)
external_rvars[ptr_path_id, 'source'] = rvar
external_rvars[ptr_path_id, 'value'] = rvar
external_rvars[src_path_id, 'identity'] = rvar
if local_table_only and not is_lprop:
external_rvars[src_path_id, 'source'] = rvar
external_rvars[src_path_id, 'value'] = rvar
elif is_lprop:
external_rvars[tgt_path_id, 'identity'] = rvar
external_rvars[tgt_path_id, 'value'] = rvar
else:
src_rvar = compiler.new_external_rvar(
rel_name=(alias,),
path_id=src_path_id,
outputs={},
)
external_rvars[src_path_id, 'identity'] = src_rvar
external_rvars[src_path_id, 'value'] = src_rvar
external_rvars[src_path_id, 'source'] = src_rvar
else:
external_rvars = None
sql_tree = compiler.compile_ir_to_sql_tree(
ir,
output_format=compiler.OutputFormat.NATIVE_INTERNAL,
singleton_mode=expr_is_trivial,
external_rvars=external_rvars,
)
sql_text = codegen.generate_source(sql_tree)
return (conv_expr, sql_text, alias, expr_is_trivial)
class LinkMetaCommand(CompositeObjectMetaCommand, PointerMetaCommand):
@classmethod
def _create_table(
cls, link, schema, context, conditional=False, create_bases=True,
create_children=True):
new_table_name = common.get_backend_name(schema, link, catenate=False)
create_c = dbops.CommandGroup()
constraints = []
columns = []
src_col = 'source'
tgt_col = 'target'
columns.append(
dbops.Column(
name=src_col, type='uuid', required=True))
columns.append(
dbops.Column(
name=tgt_col, type='uuid', required=True))
constraints.append(
dbops.UniqueConstraint(
table_name=new_table_name,
columns=[src_col, tgt_col]))
if not link.generic(schema) and link.scalar():
tgt_prop = link.getptr(schema, 'target')
tgt_ptr = types.get_pointer_storage_info(
tgt_prop, schema=schema)
columns.append(
dbops.Column(
name=tgt_ptr.column_name,
type=common.qname(*tgt_ptr.column_type)))
table = dbops.Table(name=new_table_name)
table.add_columns(columns)
table.constraints = constraints
ct = dbops.CreateTable(table=table)
index_name = common.edgedb_name_to_pg_name(
str(link.get_name(schema)) + 'target_id_default_idx')
index = dbops.Index(index_name, new_table_name, unique=False)
index.add_columns([tgt_col])
ci = dbops.CreateIndex(index)
if conditional:
c = dbops.CommandGroup(
neg_conditions=[dbops.TableExists(new_table_name)])
else:
c = dbops.CommandGroup()
c.add_command(ct)
c.add_command(ci)
c.add_command(dbops.Comment(table, str(link.get_name(schema))))
create_c.add_command(c)
if create_children:
for l_descendant in link.descendants(schema):
if has_table(l_descendant, schema):
lc = LinkMetaCommand._create_table(
l_descendant, schema, context, conditional=True,
create_bases=False, create_children=False)
create_c.add_command(lc)
return create_c
def schedule_endpoint_delete_action_update(
self, link, orig_schema, schema, context):
endpoint_delete_actions = context.get(
sd.DeltaRootContext).op.update_endpoint_delete_actions
link_ops = endpoint_delete_actions.link_ops
if isinstance(self, sd.DeleteObject):
for i, (_, ex_link, _) in enumerate(link_ops):
if ex_link == link:
link_ops.pop(i)
break
link_ops.append((self, link, orig_schema))
class CreateLink(LinkMetaCommand, adapts=s_links.CreateLink):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
# Need to do this early, since potential table alters triggered by
# sub-commands need this.
orig_schema = schema
schema = s_links.CreateLink.apply(self, schema, context)
link = self.scls
self.table_name = common.get_backend_name(schema, link, catenate=False)
schema = LinkMetaCommand.apply(self, schema, context)
self.provide_table(link, schema, context)
objtype = context.get(s_objtypes.ObjectTypeCommandContext)
extra_ops = []
source = link.get_source(schema)
if source is not None:
source_is_view = (
source.is_view(schema)
or source.is_compound_type(schema)
or source.get_is_derived(schema)
)
else:
source_is_view = None
if source is not None and not source_is_view:
ptr_stor_info = types.get_pointer_storage_info(
link, resolve_type=False, schema=schema)
sets_required = bool(
self.get_subcommands(
type=s_pointers.AlterPointerLowerCardinality))
if ptr_stor_info.table_type == 'ObjectType':
default_value = self.get_pointer_default(link, schema, context)
cols = self.get_columns(
link, schema, default_value, sets_required)
table_name = common.get_backend_name(
schema, objtype.scls, catenate=False)
objtype_alter_table = objtype.op.get_alter_table(
schema, context)
for col in cols:
cmd = dbops.AlterTableAddColumn(col)
objtype_alter_table.add_operation(cmd)
if col.name == '__type__':
constr_name = common.edgedb_name_to_pg_name(
str(objtype.op.classname) + '.class_check')
constr_expr = dbops.Query(textwrap.dedent(f"""\
SELECT
'"__type__" = ' ||
quote_literal({ql(str(objtype.scls.id))})
"""), type='text')
cid_constraint = dbops.CheckConstraint(
self.table_name,
constr_name,
constr_expr,
inherit=False,
)
objtype_alter_table.add_operation(
dbops.AlterTableAddConstraint(cid_constraint),
)
if default_value is not None:
self.alter_pointer_default(link, None, schema, context)
index_name = common.get_backend_name(
schema, link, catenate=False, aspect='index'
)[1]
pg_index = dbops.Index(
name=index_name, table_name=table_name,
unique=False, columns=[c.name for c in cols],
inherit=True)
ci = dbops.CreateIndex(pg_index, priority=3)
extra_ops.append(ci)
self.update_lineage_inhviews(schema, context, link)
self.schedule_inhviews_update(
schema,
context,
source,
update_descendants=True,
)
# If we're creating a required multi pointer without a SET
# REQUIRED USING inside, run the alter_pointer_optionality
# path to produce an error if there is existing data.
if (
link.get_cardinality(schema).is_multi()
and link.get_required(schema)
and not sets_required
):
self._alter_pointer_optionality(
schema, schema, context, fill_expr=None)
objtype = context.get(s_objtypes.ObjectTypeCommandContext)
self.attach_alter_table(context)
self.pgops.update(extra_ops)
if (source is not None and not source_is_view
and not link.is_pure_computable(schema)):
self.schedule_endpoint_delete_action_update(
link, orig_schema, schema, context)
return schema
class RenameLink(LinkMetaCommand, RenameObject, adapts=s_links.RenameLink):
pass
class RebaseLink(LinkMetaCommand, adapts=s_links.RebaseLink):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
schema = s_links.RebaseLink.apply(self, schema, context)
schema = LinkMetaCommand.apply(self, schema, context)
link_ctx = context.get(s_links.LinkCommandContext)
source = link_ctx.scls
if has_table(source, schema):
self.update_base_inhviews_on_rebase(
schema, orig_schema, context, source)
if not source.is_pure_computable(schema):
self.schedule_endpoint_delete_action_update(
source, orig_schema, schema, context)
return schema
class SetLinkType(LinkMetaCommand, adapts=s_links.SetLinkType):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
schema = s_links.SetLinkType.apply(self, schema, context)
schema = LinkMetaCommand.apply(self, schema, context)
pop = self.get_parent_op(context)
orig_type = self.scls.get_target(orig_schema)
new_type = self.scls.get_target(schema)
if (
not pop.maybe_get_object_aux_data('from_alias')
and (orig_type != new_type or self.cast_expr is not None)
):
self._alter_pointer_type(self.scls, schema, orig_schema, context)
return schema
class AlterLinkUpperCardinality(
LinkMetaCommand,
adapts=s_links.AlterLinkUpperCardinality,
):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
pop = self.get_parent_op(context)
orig_schema = schema
schema = s_links.AlterLinkUpperCardinality.apply(self, schema, context)
schema = LinkMetaCommand.apply(self, schema, context)
if (
not self.scls.generic(schema)
and not self.scls.is_pure_computable(schema)
and not pop.maybe_get_object_aux_data('from_alias')
):
orig_card = self.scls.get_cardinality(orig_schema)
new_card = self.scls.get_cardinality(schema)
if orig_card != new_card:
self._alter_pointer_cardinality(schema, orig_schema, context)
return schema
class AlterLinkLowerCardinality(
LinkMetaCommand,
adapts=s_links.AlterLinkLowerCardinality,
):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
pop = self.get_parent_op(context)
orig_schema = schema
schema = s_links.AlterLinkLowerCardinality.apply(
self, schema, context)
schema = LinkMetaCommand.apply(self, schema, context)
if not self.scls.generic(schema):
orig_required = self.scls.get_required(orig_schema)
new_required = self.scls.get_required(schema)
if (
not pop.maybe_get_object_aux_data('from_alias')
and not self.scls.is_endpoint_pointer(schema)
and orig_required != new_required
):
self._alter_pointer_optionality(
schema, orig_schema, context, fill_expr=self.fill_expr)
return schema
class AlterLinkOwned(
LinkMetaCommand,
AlterObject,
adapts=s_links.AlterLinkOwned,
):
pass
class AlterLink(LinkMetaCommand, adapts=s_links.AlterLink):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
schema = s_links.AlterLink.apply(self, schema, context)
link = self.scls
schema = LinkMetaCommand.apply(self, schema, context)
with context(s_links.LinkCommandContext(schema, self, link)) as ctx:
ctx.original_schema = orig_schema
self.provide_table(link, schema, context)
self.attach_alter_table(context)
otd = self.get_resolved_attribute_value(
'on_target_delete',
schema=schema,
context=context,
)
card = self.get_resolved_attribute_value(
'cardinality',
schema=schema,
context=context,
)
if (otd or card) and not link.is_pure_computable(schema):
self.schedule_endpoint_delete_action_update(
link, orig_schema, schema, context)
return schema
class DeleteLink(LinkMetaCommand, adapts=s_links.DeleteLink):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
delta_root_ctx = context.top()
orig_schema = delta_root_ctx.original_schema
link = schema.get(self.classname)
old_table_name = common.get_backend_name(
schema, link, catenate=False)
schema = LinkMetaCommand.apply(self, schema, context)
schema = s_links.DeleteLink.apply(self, schema, context)
if (
not link.generic(orig_schema)
and has_table(link.get_source(orig_schema), orig_schema)
):
link_name = link.get_shortname(orig_schema).name
ptr_stor_info = types.get_pointer_storage_info(
link, schema=orig_schema)
objtype = context.get(s_objtypes.ObjectTypeCommandContext)
if (not isinstance(objtype.op, s_objtypes.DeleteObjectType)
and ptr_stor_info.table_type == 'ObjectType'
and objtype.scls.maybe_get_ptr(schema, link_name) is None):
# Only drop the column if the parent is not being dropped
# and the link was not reinherited in the same delta.
if objtype.scls.maybe_get_ptr(schema, link_name) is None:
# This must be a separate so that objects depending
# on this column can be dropped correctly.
#
alter_table = objtype.op.get_alter_table(
schema, context, manual=True, priority=2)
col = dbops.Column(
name=ptr_stor_info.column_name,
type=common.qname(*ptr_stor_info.column_type))
col = dbops.AlterTableDropColumn(col)
alter_table.add_operation(col)
self.pgops.add(alter_table)
self.schedule_inhviews_update(
schema,
context,
objtype.scls,
update_descendants=True,
)
self.schedule_endpoint_delete_action_update(
link, orig_schema, schema, context)
self.attach_alter_table(context)
self.pgops.add(
self.drop_inhview(orig_schema, context, link, drop_ancestors=True)
)
self.pgops.add(
dbops.DropTable(
name=old_table_name,
priority=1,
conditions=[dbops.TableExists(old_table_name)],
)
)
self.update_base_inhviews(orig_schema, context, link)
self.schedule_inhview_deletion(orig_schema, context, link)
return schema
class PropertyMetaCommand(CompositeObjectMetaCommand, PointerMetaCommand):
@classmethod
def _create_table(
cls, prop, schema, context, conditional=False, create_bases=True,
create_children=True):
new_table_name = common.get_backend_name(schema, prop, catenate=False)
create_c = dbops.CommandGroup()
constraints = []
columns = []
src_col = common.edgedb_name_to_pg_name('source')
columns.append(
dbops.Column(
name=src_col, type='uuid', required=True))
id = sn.QualName(
module=prop.get_name(schema).module, name=str(prop.id))
index_name = common.convert_name(id, 'idx0', catenate=True)
pg_index = dbops.Index(
name=index_name, table_name=new_table_name,
unique=False, columns=[src_col])
ci = dbops.CreateIndex(pg_index)
if not prop.generic(schema):
tgt_cols = cls.get_columns(prop, schema, None)
columns.extend(tgt_cols)
constraints.append(
dbops.UniqueConstraint(
table_name=new_table_name,
columns=[src_col] + [tgt_col.name for tgt_col in tgt_cols]
)
)
table = dbops.Table(name=new_table_name)
table.add_columns(columns)
table.constraints = constraints
ct = dbops.CreateTable(table=table)
if conditional:
c = dbops.CommandGroup(
neg_conditions=[dbops.TableExists(new_table_name)])
else:
c = dbops.CommandGroup()
c.add_command(ct)
c.add_command(ci)
c.add_command(dbops.Comment(table, str(prop.get_name(schema))))
create_c.add_command(c)
if create_children:
for p_descendant in prop.descendants(schema):
if has_table(p_descendant, schema):
pc = PropertyMetaCommand._create_table(
p_descendant, schema, context, conditional=True,
create_bases=False, create_children=False)
create_c.add_command(pc)
return create_c
class CreateProperty(PropertyMetaCommand, adapts=s_props.CreateProperty):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_props.CreateProperty.apply(self, schema, context)
prop = self.scls
propname = prop.get_shortname(schema).name
schema = PropertyMetaCommand.apply(self, schema, context)
src = context.get(s_sources.SourceCommandContext)
self.provide_table(prop, schema, context)
if src and has_table(src.scls, schema):
if isinstance(src.scls, s_links.Link):
src.op.provide_table(src.scls, schema, context)
ptr_stor_info = types.get_pointer_storage_info(
prop, resolve_type=False, schema=schema)
sets_required = bool(
self.get_subcommands(
type=s_pointers.AlterPointerLowerCardinality))
if (
(
not isinstance(src.scls, s_objtypes.ObjectType)
or ptr_stor_info.table_type == 'ObjectType'
)
and (
not isinstance(src.scls, s_links.Link)
or propname not in {'source', 'target'}
)
):
alter_table = src.op.get_alter_table(
schema,
context,
force_new=True,
manual=True,
)
default_value = self.get_pointer_default(prop, schema, context)
cols = self.get_columns(
prop, schema, default_value, sets_required)
for col in cols:
cmd = dbops.AlterTableAddColumn(col)
alter_table.add_operation(cmd)
if col.name == 'id':
constraint = dbops.PrimaryKey(
table_name=alter_table.name,
columns=[col.name],
)
alter_table.add_operation(
dbops.AlterTableAddConstraint(constraint),
)
self.pgops.add(alter_table)
self.update_lineage_inhviews(schema, context, prop)
if has_table(src.op.scls, schema):
self.schedule_inhviews_update(
schema,
context,
src.op.scls,
update_descendants=True,
)
# If we're creating a required multi pointer without a SET
# REQUIRED USING inside, run the alter_pointer_optionality
# path to produce an error if there is existing data.
if (
prop.get_cardinality(schema).is_multi()
and prop.get_required(schema)
and not sets_required
):
self._alter_pointer_optionality(
schema, schema, context, fill_expr=None)
return schema
class RenameProperty(
PropertyMetaCommand, RenameObject, adapts=s_props.RenameProperty):
pass
class RebaseProperty(
PropertyMetaCommand, adapts=s_props.RebaseProperty):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
schema = s_props.RebaseProperty.apply(self, schema, context)
schema = PropertyMetaCommand.apply(self, schema, context)
prop_ctx = context.get(s_props.PropertyCommandContext)
source = prop_ctx.scls
if has_table(source, schema):
self.update_base_inhviews_on_rebase(
schema, orig_schema, context, source)
return schema
class SetPropertyType(
PropertyMetaCommand, adapts=s_props.SetPropertyType):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
pop = self.get_parent_op(context)
orig_schema = schema
schema = s_props.SetPropertyType.apply(self, schema, context)
schema = PropertyMetaCommand.apply(self, schema, context)
orig_type = self.scls.get_target(orig_schema)
new_type = self.scls.get_target(schema)
if (
not pop.maybe_get_object_aux_data('from_alias')
and not self.scls.is_endpoint_pointer(schema)
and (orig_type != new_type or self.cast_expr is not None)
):
self._alter_pointer_type(self.scls, schema, orig_schema, context)
return schema
class AlterPropertyUpperCardinality(
PropertyMetaCommand,
adapts=s_props.AlterPropertyUpperCardinality,
):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
pop = self.get_parent_op(context)
orig_schema = schema
schema = s_props.AlterPropertyUpperCardinality.apply(
self, schema, context)
schema = PropertyMetaCommand.apply(self, schema, context)
if (
not self.scls.generic(schema)
and not self.scls.is_pure_computable(schema)
and not self.scls.is_endpoint_pointer(schema)
and not pop.maybe_get_object_aux_data('from_alias')
):
orig_card = self.scls.get_cardinality(orig_schema)
new_card = self.scls.get_cardinality(schema)
if orig_card != new_card:
self._alter_pointer_cardinality(schema, orig_schema, context)
return schema
class AlterPropertyLowerCardinality(
PropertyMetaCommand,
adapts=s_props.AlterPropertyLowerCardinality,
):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
pop = self.get_parent_op(context)
orig_schema = schema
schema = s_props.AlterPropertyLowerCardinality.apply(
self, schema, context)
schema = PropertyMetaCommand.apply(self, schema, context)
if not self.scls.generic(schema):
orig_required = self.scls.get_required(orig_schema)
new_required = self.scls.get_required(schema)
if (
not pop.maybe_get_object_aux_data('from_alias')
and not self.scls.is_endpoint_pointer(schema)
and orig_required != new_required
):
self._alter_pointer_optionality(
schema, orig_schema, context, fill_expr=self.fill_expr)
return schema
class AlterPropertyOwned(
PropertyMetaCommand,
AlterObject,
adapts=s_props.AlterPropertyOwned,
):
pass
class AlterProperty(
PropertyMetaCommand, adapts=s_props.AlterProperty):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
schema = s_props.AlterProperty.apply(self, schema, context)
prop = self.scls
schema = PropertyMetaCommand.apply(self, schema, context)
if self.metadata_only:
return schema
if prop.is_pure_computable(orig_schema):
return schema
with context(
s_props.PropertyCommandContext(schema, self, prop)) as ctx:
ctx.original_schema = orig_schema
self.provide_table(prop, schema, context)
self.alter_pointer_default(prop, orig_schema, schema, context)
return schema
class DeleteProperty(
PropertyMetaCommand, adapts=s_props.DeleteProperty):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
orig_schema = schema
prop = schema.get(self.classname)
schema = s_props.DeleteProperty.apply(self, schema, context)
schema = PropertyMetaCommand.apply(self, schema, context)
source_ctx = context.get(s_sources.SourceCommandContext)
if source_ctx is not None:
source = source_ctx.scls
source_op = source_ctx.op
else:
source = source_op = None
if (source
and not source.maybe_get_ptr(
schema, prop.get_shortname(orig_schema).name)
and has_table(source, schema)):
self.pgops.add(
self.drop_inhview(schema, context, source, drop_ancestors=True)
)
alter_table = source_op.get_alter_table(
schema, context, force_new=True)
ptr_stor_info = types.get_pointer_storage_info(
prop,
schema=orig_schema,
link_bias=prop.is_link_property(orig_schema),
)
if ptr_stor_info.table_type == 'ObjectType':
col = dbops.AlterTableDropColumn(
dbops.Column(name=ptr_stor_info.column_name,
type=ptr_stor_info.column_type))
alter_table.add_operation(col)
if has_table(prop, orig_schema):
self.pgops.add(
self.drop_inhview(
orig_schema, context, prop, drop_ancestors=True)
)
old_table_name = common.get_backend_name(
orig_schema, prop, catenate=False)
self.pgops.add(dbops.DropTable(name=old_table_name, priority=1))
self.update_base_inhviews(orig_schema, context, prop)
self.schedule_inhview_deletion(orig_schema, context, prop)
if (
source is not None
and not context.is_deleting(source)
):
self.schedule_inhviews_update(
schema,
context,
source,
update_descendants=True,
)
return schema
class UpdateEndpointDeleteActions(MetaCommand):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.link_ops = []
self.changed_targets = set()
def _get_link_table_union(self, schema, links, include_children) -> str:
selects = []
aspect = 'inhview' if include_children else None
for link in links:
selects.append(textwrap.dedent('''\
(SELECT
{id}::uuid AS __sobj_id__,
{src} as source,
{tgt} as target
FROM {table})
''').format(
id=ql(str(link.id)),
src=common.quote_ident('source'),
tgt=common.quote_ident('target'),
table=common.get_backend_name(
schema,
link,
aspect=aspect,
),
))
return '(' + '\nUNION ALL\n '.join(selects) + ') as q'
def _get_inline_link_table_union(
self, schema, links, include_children) -> str:
selects = []
aspect = 'inhview' if include_children else None
for link in links:
link_psi = types.get_pointer_storage_info(link, schema=schema)
link_col = link_psi.column_name
selects.append(textwrap.dedent('''\
(SELECT
{id}::uuid AS __sobj_id__,
{src} as source,
{tgt} as target
FROM {table})
''').format(
id=ql(str(link.id)),
src=common.quote_ident('id'),
tgt=common.quote_ident(link_col),
table=common.get_backend_name(
schema,
link.get_source(schema),
aspect=aspect,
),
))
return '(' + '\nUNION ALL\n '.join(selects) + ') as q'
def get_trigger_name(self, schema, target,
disposition, deferred=False, inline=False):
if disposition == 'target':
aspect = 'target-del'
else:
aspect = 'source-del'
if deferred:
aspect += '-def'
else:
aspect += '-imm'
if inline:
aspect += '-inl'
else:
aspect += '-otl'
aspect += '-t'
return common.get_backend_name(
schema, target, catenate=False, aspect=aspect)[1]
def get_trigger_proc_name(self, schema, target,
disposition, deferred=False, inline=False):
if disposition == 'target':
aspect = 'target-del'
else:
aspect = 'source-del'
if deferred:
aspect += '-def'
else:
aspect += '-imm'
if inline:
aspect += '-inl'
else:
aspect += '-otl'
aspect += '-f'
return common.get_backend_name(
schema, target, catenate=False, aspect=aspect)
def get_trigger_proc_text(self, target, links, *,
disposition, inline, schema):
if inline:
return self._get_inline_link_trigger_proc_text(
target, links, disposition=disposition, schema=schema)
else:
return self._get_outline_link_trigger_proc_text(
target, links, disposition=disposition, schema=schema)
def _get_outline_link_trigger_proc_text(
self, target, links, *, disposition, schema):
chunks = []
DA = s_links.LinkTargetDeleteAction
if disposition == 'target':
groups = itertools.groupby(
links, lambda l: l.get_on_target_delete(schema))
near_endpoint, far_endpoint = 'target', 'source'
else:
groups = [(DA.Allow, links)]
near_endpoint, far_endpoint = 'source', 'target'
for action, links in groups:
if action is DA.Restrict or action is DA.DeferredRestrict:
# Inherited link targets with restrict actions are
# elided by apply() to enable us to use inhviews here
# when looking for live references.
tables = self._get_link_table_union(
schema, links, include_children=True)
text = textwrap.dedent('''\
SELECT
q.__sobj_id__, q.source, q.target
INTO link_type_id, srcid, tgtid
FROM
{tables}
WHERE
q.{near_endpoint} = OLD.{id}
LIMIT 1;
IF FOUND THEN
SELECT
edgedb.shortname_from_fullname(link.name),
edgedb._get_schema_object_name(link.{far_endpoint})
INTO linkname, endname
FROM
edgedb."_SchemaLink" AS link
WHERE
link.id = link_type_id;
RAISE foreign_key_violation
USING
TABLE = TG_TABLE_NAME,
SCHEMA = TG_TABLE_SCHEMA,
MESSAGE = 'deletion of {tgtname} (' || tgtid
|| ') is prohibited by link target policy',
DETAIL = 'Object is still referenced in link '
|| linkname || ' of ' || endname || ' ('
|| srcid || ').';
END IF;
''').format(
tables=tables,
id='id',
tgtname=target.get_displayname(schema),
near_endpoint=near_endpoint,
far_endpoint=far_endpoint,
)
chunks.append(text)
elif action == s_links.LinkTargetDeleteAction.Allow:
for link in links:
link_table = common.get_backend_name(
schema, link)
# Since enforcement of 'required' on multi links
# is enforced manually on the query side and (not
# through constraints/triggers of its own), we
# also need to do manual enforcement of it when
# deleting a required multi link.
if link.get_required(schema) and disposition == 'target':
required_text = textwrap.dedent('''\
SELECT q.source INTO srcid
FROM {link_table} as q
WHERE q.target = OLD.{id}
AND NOT EXISTS (
SELECT FROM {link_table} as q2
WHERE q.source = q2.source
AND q2.target != OLD.{id}
);
IF FOUND THEN
RAISE not_null_violation
USING
TABLE = TG_TABLE_NAME,
SCHEMA = TG_TABLE_SCHEMA,
MESSAGE = 'missing value',
COLUMN = '{link_id}';
END IF;
''').format(
link_table=link_table,
link_id=str(link.id),
id='id'
)
chunks.append(required_text)
# Otherwise just delete it from the link table.
text = textwrap.dedent('''\
DELETE FROM
{link_table}
WHERE
{endpoint} = OLD.{id};
''').format(
link_table=link_table,
endpoint=common.quote_ident(near_endpoint),
id='id'
)
chunks.append(text)
elif action == s_links.LinkTargetDeleteAction.DeleteSource:
sources = collections.defaultdict(list)
for link in links:
sources[link.get_source(schema)].append(link)
for source, source_links in sources.items():
tables = self._get_link_table_union(
schema, source_links, include_children=False)
text = textwrap.dedent('''\
DELETE FROM
{source_table}
WHERE
{source_table}.{id} IN (
SELECT source
FROM {tables}
WHERE target = OLD.{id}
);
''').format(
source_table=common.get_backend_name(schema, source),
id='id',
tables=tables,
)
chunks.append(text)
text = textwrap.dedent('''\
DECLARE
link_type_id uuid;
srcid uuid;
tgtid uuid;
linkname text;
endname text;
BEGIN
{chunks}
RETURN OLD;
END;
''').format(chunks='\n\n'.join(chunks))
return text
def _get_inline_link_trigger_proc_text(
self, target, links, *, disposition, schema):
if disposition == 'source':
raise RuntimeError(
'source disposition link target delete action trigger does '
'not make sense for inline links')
chunks = []
DA = s_links.LinkTargetDeleteAction
groups = itertools.groupby(
links, lambda l: l.get_on_target_delete(schema))
near_endpoint, far_endpoint = 'target', 'source'
for action, links in groups:
if action is DA.Restrict or action is DA.DeferredRestrict:
# Inherited link targets with restrict actions are
# elided by apply() to enable us to use inhviews here
# when looking for live references.
tables = self._get_inline_link_table_union(
schema, links, include_children=True)
text = textwrap.dedent('''\
SELECT
q.__sobj_id__, q.source, q.target
INTO link_type_id, srcid, tgtid
FROM
{tables}
WHERE
q.{near_endpoint} = OLD.{id}
LIMIT 1;
IF FOUND THEN
SELECT
edgedb.shortname_from_fullname(link.name),
edgedb._get_schema_object_name(link.{far_endpoint})
INTO linkname, endname
FROM
edgedb."_SchemaLink" AS link
WHERE
link.id = link_type_id;
RAISE foreign_key_violation
USING
TABLE = TG_TABLE_NAME,
SCHEMA = TG_TABLE_SCHEMA,
MESSAGE = 'deletion of {tgtname} (' || tgtid
|| ') is prohibited by link target policy',
DETAIL = 'Object is still referenced in link '
|| linkname || ' of ' || endname || ' ('
|| srcid || ').';
END IF;
''').format(
tables=tables,
id='id',
tgtname=target.get_displayname(schema),
near_endpoint=near_endpoint,
far_endpoint=far_endpoint,
)
chunks.append(text)
elif action == s_links.LinkTargetDeleteAction.Allow:
for link in links:
link_psi = types.get_pointer_storage_info(
link, schema=schema)
link_col = link_psi.column_name
source_table = common.get_backend_name(
schema, link.get_source(schema))
text = textwrap.dedent(f'''\
UPDATE
{source_table}
SET
{qi(link_col)} = NULL
WHERE
{qi(link_col)} = OLD.id;
''')
chunks.append(text)
elif action == s_links.LinkTargetDeleteAction.DeleteSource:
sources = collections.defaultdict(list)
for link in links:
sources[link.get_source(schema)].append(link)
for source, source_links in sources.items():
tables = self._get_inline_link_table_union(
schema, source_links, include_children=False)
text = textwrap.dedent('''\
DELETE FROM
{source_table}
WHERE
{source_table}.{id} IN (
SELECT source
FROM {tables}
WHERE target = OLD.{id}
);
''').format(
source_table=common.get_backend_name(schema, source),
id='id',
tables=tables,
)
chunks.append(text)
text = textwrap.dedent('''\
DECLARE
link_type_id uuid;
srcid uuid;
tgtid uuid;
linkname text;
endname text;
links text[];
BEGIN
{chunks}
RETURN OLD;
END;
''').format(chunks='\n\n'.join(chunks))
return text
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
if not self.link_ops and not self.changed_targets:
return schema
DA = s_links.LinkTargetDeleteAction
affected_sources = set()
affected_targets = {t for _, t in self.changed_targets}
modifications = any(
isinstance(op, RebaseObjectType) and op.removed_bases
for op, _ in self.changed_targets
)
for link_op, link, orig_schema in self.link_ops:
# If our link has a restrict policy, we don't need to update
# the target on changes to inherited links.
# Most importantly, this optimization lets us avoid updating
# the triggers for every schema::Type subtype every time a
# new object type is created containing a __type__ link.
eff_schema = (
orig_schema if isinstance(link_op, DeleteLink) else schema)
action = link.get_on_target_delete(eff_schema)
target_is_affected = not (
(action is DA.Restrict or action is DA.DeferredRestrict)
and link.get_implicit_bases(eff_schema)
)
if (
link.generic(eff_schema)
or link.is_pure_computable(eff_schema)
):
continue
source = link.get_source(eff_schema)
target = link.get_target(eff_schema)
if not isinstance(link_op, CreateLink):
modifications = True
if isinstance(link_op, DeleteLink):
current_source = orig_schema.get_by_id(source.id, None)
if (current_source is not None
and not current_source.is_view(orig_schema)):
affected_sources.add((current_source, orig_schema))
current_target = schema.get_by_id(target.id, None)
if target_is_affected and current_target is not None:
affected_targets.add(current_target)
else:
if source.is_view(schema):
continue
affected_sources.add((source, schema))
if target_is_affected:
affected_targets.add(target)
if isinstance(link_op, AlterLink):
orig_target = link.get_target(orig_schema)
if target != orig_target:
current_orig_target = schema.get_by_id(
orig_target.id, None)
if current_orig_target is not None:
affected_targets.add(current_orig_target)
for source, src_schema in affected_sources:
links = []
for link in source.get_pointers(src_schema).objects(src_schema):
if (not isinstance(link, s_links.Link)
or link.is_pure_computable(src_schema)):
continue
ptr_stor_info = types.get_pointer_storage_info(
link, schema=src_schema)
if ptr_stor_info.table_type != 'link':
continue
links.append(link)
links.sort(
key=lambda l: (l.get_on_target_delete(src_schema),
l.get_name(src_schema)))
if links or modifications:
self._update_action_triggers(
src_schema, source, links, disposition='source')
# All descendants of affected targets also need to have their
# triggers updated, so track them down.
all_affected_targets = set()
for target in affected_targets:
union_of = target.get_union_of(schema)
if union_of:
objtypes = tuple(union_of.objects(schema))
else:
objtypes = (target,)
for objtype in objtypes:
all_affected_targets.add(objtype)
for descendant in objtype.descendants(schema):
if has_table(descendant, schema):
all_affected_targets.add(descendant)
for target in all_affected_targets:
deferred_links = []
deferred_inline_links = []
links = []
inline_links = []
inbound_links = schema.get_referrers(
target, scls_type=s_links.Link, field_name='target')
# We need to look at all inbound links to all ancestors
for ancestor in target.get_ancestors(schema).objects(schema):
inbound_links |= schema.get_referrers(
ancestor, scls_type=s_links.Link, field_name='target')
for link in inbound_links:
if link.is_pure_computable(schema):
continue
action = link.get_on_target_delete(schema)
# Enforcing link deletion policies on targets are
# handled by looking at the inheritance views, when
# restrict is the policy.
# If the policy is allow or delete source, we need to
# actually process this for each link.
if (
(action is DA.Restrict or action is DA.DeferredRestrict)
and link.get_implicit_bases(schema)
):
continue
source = link.get_source(schema)
if source.is_view(schema):
continue
ptr_stor_info = types.get_pointer_storage_info(
link, schema=schema)
if ptr_stor_info.table_type != 'link':
if action is DA.DeferredRestrict:
deferred_inline_links.append(link)
else:
inline_links.append(link)
else:
if action is DA.DeferredRestrict:
deferred_links.append(link)
else:
links.append(link)
links.sort(
key=lambda l: (l.get_on_target_delete(schema),
l.get_name(schema)))
inline_links.sort(
key=lambda l: (l.get_on_target_delete(schema),
l.get_name(schema)))
deferred_links.sort(
key=lambda l: l.get_name(schema))
deferred_inline_links.sort(
key=lambda l: l.get_name(schema))
if links or modifications:
self._update_action_triggers(
schema, target, links, disposition='target')
if inline_links or modifications:
self._update_action_triggers(
schema, target, inline_links,
disposition='target', inline=True)
if deferred_links or modifications:
self._update_action_triggers(
schema, target, deferred_links,
disposition='target', deferred=True)
if deferred_inline_links or modifications:
self._update_action_triggers(
schema, target, deferred_inline_links,
disposition='target', deferred=True,
inline=True)
return schema
def _update_action_triggers(
self,
schema,
objtype: s_objtypes.ObjectType,
links: List[s_links.Link], *,
disposition: str,
deferred: bool=False,
inline: bool=False) -> None:
table_name = common.get_backend_name(
schema, objtype, catenate=False)
trigger_name = self.get_trigger_name(
schema, objtype, disposition=disposition,
deferred=deferred, inline=inline)
proc_name = self.get_trigger_proc_name(
schema, objtype, disposition=disposition,
deferred=deferred, inline=inline)
trigger = dbops.Trigger(
name=trigger_name, table_name=table_name,
events=('delete',), procedure=proc_name,
is_constraint=True, inherit=True, deferred=deferred)
if links:
proc_text = self.get_trigger_proc_text(
objtype, links, disposition=disposition,
inline=inline, schema=schema)
trig_func = dbops.Function(
name=proc_name, text=proc_text, volatility='volatile',
returns='trigger', language='plpgsql')
self.pgops.add(dbops.CreateOrReplaceFunction(trig_func))
self.pgops.add(dbops.CreateTrigger(
trigger, neg_conditions=[dbops.TriggerExists(
trigger_name=trigger_name, table_name=table_name
)]
))
else:
self.pgops.add(
dbops.DropTrigger(
trigger,
conditions=[dbops.TriggerExists(
trigger_name=trigger_name,
table_name=table_name,
)]
)
)
self.pgops.add(
dbops.DropFunction(
name=proc_name,
args=[],
conditions=[dbops.FunctionExists(
name=proc_name,
args=[],
)]
)
)
@dataclasses.dataclass
class InheritanceViewUpdate:
update_ancestors: bool = True
update_descendants: bool = False
class UpdateInheritanceViews(MetaCommand):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.view_updates = {}
self.view_deletions = {}
def apply(self, schema, context):
all_updates = set()
for obj, update_info in self.view_updates.items():
if not schema.has_object(obj.id):
continue
all_updates.add(obj)
if update_info.update_ancestors:
all_updates.update(obj.get_ancestors(schema).objects(schema))
if update_info.update_descendants:
all_updates.update(obj.descendants(schema))
graph = {}
for obj in all_updates:
objname = obj.get_name(schema)
graph[objname] = topological.DepGraphEntry(
item=obj,
deps=obj.get_bases(schema).names(schema),
extra=False,
)
ordered = topological.sort(graph, allow_unresolved=True)
for obj in reversed(list(ordered)):
if has_table(obj, schema):
self.update_inhview(schema, obj)
for obj, obj_schema in self.view_deletions.items():
self.delete_inhview(obj_schema, obj)
def _get_select_from(self, schema, obj, ptrnames):
if isinstance(obj, s_sources.Source):
ptrs = dict(obj.get_pointers(schema).items(schema))
cols = []
for ptrname, alias in ptrnames.items():
ptr = ptrs[ptrname]
ptr_stor_info = types.get_pointer_storage_info(
ptr,
link_bias=isinstance(obj, s_links.Link),
schema=schema,
)
cols.append((ptr_stor_info.column_name, alias))
else:
cols = list(ptrnames.items())
coltext = ',\n'.join(
f'{qi(col)} AS {qi(alias)}' for col, alias in cols)
tabname = common.get_backend_name(
schema,
obj,
catenate=False,
aspect='table',
)
return textwrap.dedent(f'''\
(SELECT
{coltext}
FROM
{q(*tabname)}
)
''')
def update_inhview(self, schema, obj):
inhview_name = common.get_backend_name(
schema, obj, catenate=False, aspect='inhview')
ptrs = {}
if isinstance(obj, s_sources.Source):
pointers = list(obj.get_pointers(schema).items(schema))
pointers.sort(key=lambda p: p[1].id)
for ptrname, ptr in pointers:
ptr_stor_info = types.get_pointer_storage_info(
ptr,
link_bias=isinstance(obj, s_links.Link),
schema=schema,
)
if (
isinstance(obj, s_links.Link)
or ptr_stor_info.table_type == 'ObjectType'
):
ptrs[ptrname] = ptr_stor_info.column_name
else:
# MULTI PROPERTY
ptrs['source'] = 'source'
ptrs['target'] = 'target'
components = [self._get_select_from(schema, obj, ptrs)]
components.extend(
self._get_select_from(schema, descendant, ptrs)
for descendant in obj.descendants(schema)
if has_table(descendant, schema)
)
query = '\nUNION ALL\n'.join(components)
view = dbops.View(
name=inhview_name,
query=query,
)
self.pgops.add(
dbops.DropView(
inhview_name,
priority=1,
conditions=[dbops.ViewExists(inhview_name)],
),
)
self.pgops.add(
dbops.CreateView(
view=view,
priority=1,
),
)
def delete_inhview(self, schema, obj):
inhview_name = common.get_backend_name(
schema, obj, catenate=False, aspect='inhview')
self.pgops.add(
dbops.DropView(
inhview_name,
conditions=[dbops.ViewExists(inhview_name)],
priority=1,
),
)
class ModuleMetaCommand(ObjectMetaCommand):
pass
class CreateModule(ModuleMetaCommand, adapts=s_mod.CreateModule):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = CompositeObjectMetaCommand.apply(self, schema, context)
return s_mod.CreateModule.apply(self, schema, context)
class AlterModule(ModuleMetaCommand, adapts=s_mod.AlterModule):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_mod.AlterModule.apply(self, schema, context=context)
return CompositeObjectMetaCommand.apply(self, schema, context)
class DeleteModule(ModuleMetaCommand, adapts=s_mod.DeleteModule):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = CompositeObjectMetaCommand.apply(self, schema, context)
return s_mod.DeleteModule.apply(self, schema, context)
class CreateDatabase(ObjectMetaCommand, adapts=s_db.CreateDatabase):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_db.CreateDatabase.apply(self, schema, context)
db = self.scls
tenant_id = self._get_tenant_id(context)
db_name = common.get_database_backend_name(
str(self.classname), tenant_id=tenant_id)
tpl_name = common.get_database_backend_name(
self.template or edbdef.EDGEDB_TEMPLATE_DB, tenant_id=tenant_id)
self.pgops.add(
dbops.CreateDatabase(
dbops.Database(
db_name,
metadata=dict(
id=str(db.id),
tenant_id=tenant_id,
builtin=self.get_attribute_value('builtin'),
name=str(self.classname),
),
),
template=tpl_name,
)
)
return schema
class DropDatabase(ObjectMetaCommand, adapts=s_db.DropDatabase):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_db.DropDatabase.apply(self, schema, context)
tenant_id = self._get_tenant_id(context)
db_name = common.get_database_backend_name(
str(self.classname), tenant_id=tenant_id)
self.pgops.add(dbops.DropDatabase(db_name))
return schema
class CreateRole(ObjectMetaCommand, adapts=s_roles.CreateRole):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_roles.CreateRole.apply(self, schema, context)
role = self.scls
schema = ObjectMetaCommand.apply(self, schema, context)
membership = list(role.get_bases(schema).names(schema))
passwd = role.get_password(schema)
superuser_flag = False
members = set()
role_name = str(role.get_name(schema))
backend_params = self._get_backend_params(context)
capabilities = backend_params.instance_params.capabilities
tenant_id = backend_params.instance_params.tenant_id
if role.get_superuser(schema):
membership.append(edbdef.EDGEDB_SUPERGROUP)
# If the cluster is not exposing an explicit superuser role,
# we will make the created Postgres role superuser if we can
if not backend_params.instance_params.base_superuser:
superuser_flag = (
capabilities
& pgcluster.BackendCapabilities.SUPERUSER_ACCESS
)
if backend_params.session_authorization_role is not None:
# When we connect to the backend via a proxy role, we
# must ensure that role is a member of _every_ EdgeDB
# role so that `SET ROLE` can work properly.
members.add(backend_params.session_authorization_role)
role = dbops.Role(
name=common.get_role_backend_name(role_name, tenant_id=tenant_id),
allow_login=True,
superuser=superuser_flag,
password=passwd,
membership=[
common.get_role_backend_name(parent_role, tenant_id=tenant_id)
for parent_role in membership
],
metadata=dict(
id=str(role.id),
name=role_name,
tenant_id=tenant_id,
password_hash=passwd,
builtin=role.get_builtin(schema),
),
)
self.pgops.add(dbops.CreateRole(role))
return schema
class AlterRole(ObjectMetaCommand, adapts=s_roles.AlterRole):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_roles.AlterRole.apply(self, schema, context)
role = self.scls
schema = ObjectMetaCommand.apply(self, schema, context)
backend_params = self._get_backend_params(context)
capabilities = backend_params.instance_params.capabilities
tenant_id = backend_params.instance_params.tenant_id
instance_params = backend_params.instance_params
role_name = str(role.get_name(schema))
kwargs = {}
if self.has_attribute_value('password'):
passwd = self.get_attribute_value('password')
kwargs['password'] = passwd
kwargs['metadata'] = dict(
id=str(role.id),
name=role_name,
tenant_id=tenant_id,
password_hash=passwd,
builtin=role.get_builtin(schema),
)
pg_role_name = common.get_role_backend_name(
role_name, tenant_id=tenant_id)
if self.has_attribute_value('superuser'):
membership = list(role.get_bases(schema).names(schema))
membership.append(edbdef.EDGEDB_SUPERGROUP)
self.pgops.add(
dbops.AlterRoleAddMembership(
name=pg_role_name,
membership=[
common.get_role_backend_name(
parent_role, tenant_id=tenant_id)
for parent_role in membership
],
)
)
superuser_flag = False
# If the cluster is not exposing an explicit superuser role,
# we will make the modified Postgres role superuser if we can
if not instance_params.base_superuser:
superuser_flag = (
capabilities
& pgcluster.BackendCapabilities.SUPERUSER_ACCESS
)
kwargs['superuser'] = superuser_flag
dbrole = dbops.Role(name=pg_role_name, **kwargs)
self.pgops.add(dbops.AlterRole(dbrole))
return schema
class RebaseRole(ObjectMetaCommand, adapts=s_roles.RebaseRole):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_roles.RebaseRole.apply(self, schema, context)
role = self.scls
schema = ObjectMetaCommand.apply(self, schema, context)
tenant_id = self._get_tenant_id(context)
for dropped in self.removed_bases:
self.pgops.add(dbops.AlterRoleDropMember(
name=common.get_role_backend_name(
str(dropped.name), tenant_id=tenant_id),
member=common.get_role_backend_name(
str(role.get_name(schema)), tenant_id=tenant_id),
))
for bases, _pos in self.added_bases:
for added in bases:
self.pgops.add(dbops.AlterRoleAddMember(
name=common.get_role_backend_name(
str(added.name), tenant_id=tenant_id),
member=common.get_role_backend_name(
str(role.get_name(schema)), tenant_id=tenant_id),
))
return schema
class DeleteRole(ObjectMetaCommand, adapts=s_roles.DeleteRole):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_roles.DeleteRole.apply(self, schema, context)
schema = ObjectMetaCommand.apply(self, schema, context)
tenant_id = self._get_tenant_id(context)
self.pgops.add(dbops.DropRole(
common.get_role_backend_name(
str(self.classname), tenant_id=tenant_id)))
return schema
class CreateExtensionPackage(
ObjectMetaCommand,
adapts=s_exts.CreateExtensionPackage,
):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_exts.CreateExtensionPackage.apply(self, schema, context)
schema = ObjectMetaCommand.apply(self, schema, context)
ext_id = str(self.scls.id)
name__internal = str(self.scls.get_name(schema))
name = self.scls.get_displayname(schema)
version = self.scls.get_version(schema)._asdict()
version['stage'] = version['stage'].name.lower()
tenant_id = self._get_tenant_id(context)
tpl_db_name = common.get_database_backend_name(
edbdef.EDGEDB_TEMPLATE_DB, tenant_id=tenant_id)
self.pgops.add(
dbops.UpdateMetadataSection(
dbops.Database(name=tpl_db_name),
section='ExtensionPackage',
metadata={
ext_id: {
'id': ext_id,
'name': name,
'name__internal': name__internal,
'script': self.scls.get_script(schema),
'version': version,
'builtin': self.scls.get_builtin(schema),
'internal': self.scls.get_internal(schema),
}
}
)
)
return schema
class DeleteExtensionPackage(
ObjectMetaCommand,
adapts=s_exts.DeleteExtensionPackage,
):
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
schema = s_exts.DeleteExtensionPackage.apply(self, schema, context)
schema = ObjectMetaCommand.apply(self, schema, context)
tenant_id = self._get_tenant_id(context)
tpl_db_name = common.get_database_backend_name(
edbdef.EDGEDB_TEMPLATE_DB, tenant_id=tenant_id)
ext_id = str(self.scls.id)
self.pgops.add(
dbops.UpdateMetadataSection(
dbops.Database(name=tpl_db_name),
section='ExtensionPackage',
metadata={
ext_id: None
}
)
)
return schema
class CreateExtension(
CreateObject,
adapts=s_exts.CreateExtension,
):
pass
class DeleteExtension(
DeleteObject,
adapts=s_exts.DeleteExtension,
):
pass
class DeltaRoot(MetaCommand, adapts=sd.DeltaRoot):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._renames = {}
def apply(
self,
schema: s_schema.Schema,
context: sd.CommandContext,
) -> s_schema.Schema:
self.update_endpoint_delete_actions = UpdateEndpointDeleteActions()
self.update_inhviews = UpdateInheritanceViews()
schema = sd.DeltaRoot.apply(self, schema, context)
schema = MetaCommand.apply(self, schema, context)
self.update_endpoint_delete_actions.apply(schema, context)
self.pgops.add(self.update_endpoint_delete_actions)
self.update_inhviews.apply(schema, context)
self.pgops.add(self.update_inhviews)
return schema
def is_material(self):
return True
def generate(self, block: dbops.PLBlock) -> None:
for op in self.serialize_ops():
op.generate(block)
def serialize_ops(self):
queues = {}
self._serialize_ops(self, queues)
queues = (i[1] for i in sorted(queues.items(), key=lambda i: i[0]))
return itertools.chain.from_iterable(queues)
def _serialize_ops(self, obj, queues):
for op in obj.pgops:
if isinstance(op, MetaCommand):
self._serialize_ops(op, queues)
else:
queue = queues.get(op.priority)
if not queue:
queues[op.priority] = queue = []
queue.append(op)
class MigrationCommand(ObjectMetaCommand):
pass
class CreateMigration(
MigrationCommand,
CreateObject,
adapts=s_migrations.CreateMigration,
):
pass
class AlterMigration(
MigrationCommand,
AlterObject,
adapts=s_migrations.AlterMigration,
):
pass
class DeleteMigration(
MigrationCommand,
DeleteObject,
adapts=s_migrations.DeleteMigration,
):
pass
| apache-2.0 |
wasade/qiime | qiime/remote.py | 1 | 13012 | #!/usr/bin/env python
from __future__ import division
__author__ = "Jai Ram Rideout"
__copyright__ = "Copyright 2012, The QIIME project"
__credits__ = ["Jai Ram Rideout"]
__license__ = "GPL"
__version__ = "1.8.0-dev"
__maintainer__ = "Jai Ram Rideout"
__email__ = "[email protected]"
"""Contains functionality to interact with remote services."""
from collections import defaultdict
from csv import writer
from re import sub
from socket import gaierror
from StringIO import StringIO
from burrito.util import ApplicationNotFoundError
def raise_gdata_not_found_error(*args, **kwargs):
raise ApplicationNotFoundError("gdata cannot be found.\nIs it installed? "
"Is it in your $PYTHONPATH?\nThis is an optional QIIME "
"dependency, but is required if you plan to use QIIME's remote "
"mapping file features. For more information, please see "
"http://qiime.org/install/install.html.")
# Load gdata if it's available. If it's not, skip it but set up to raise errors
# if the user tries to use it.
try:
from gdata.spreadsheet import SpreadsheetsCellsFeedFromString
from gdata.spreadsheet.service import CellQuery
from gdata.spreadsheet.service import SpreadsheetsService
except ImportError:
# Set functions which cannot be imported to raise_gdata_not_found_error.
SpreadsheetsCellsFeedFromString = CellQuery = SpreadsheetsService = \
raise_gdata_not_found_error
class GoogleSpreadsheetError(Exception):
pass
class GoogleSpreadsheetConnectionError(Exception):
pass
def load_google_spreadsheet(spreadsheet_key, worksheet_name=None):
"""Downloads and exports a Google Spreadsheet in TSV format.
Returns a string containing the spreadsheet contents in TSV format (e.g.
for writing out to a file or parsing).
The first line is assumed to be the spreadsheet header (i.e. containing
column names), which can optionally be followed by one or more comment
lines (starting with '#'). Only the first cell of a comment line will be
parsed (to keep exported spreadsheets consistent with QIIME mapping files'
comments). The (optional) comments section is then followed by the
spreadsheet data.
Some of this code is based on the following websites, as well as the
gdata.spreadsheet.text_db module:
http://www.payne.org/index.php/Reading_Google_Spreadsheets_in_Python
http://stackoverflow.com/a/12031835
Arguments:
spreadsheet_key - the key used to identify the spreadsheet (a string).
Can either be a key or a URL containing the key
worksheet_name - the name of the worksheet to load data from (a
string). If not supplied, will use first worksheet in the
spreadsheet
"""
spreadsheet_key = _extract_spreadsheet_key_from_url(spreadsheet_key)
gd_client = SpreadsheetsService()
try:
worksheets_feed = gd_client.GetWorksheetsFeed(spreadsheet_key,
visibility='public',
projection='basic')
except gaierror:
raise GoogleSpreadsheetConnectionError("Could not establish "
"connection with server. Do "
"you have an active Internet "
"connection?")
if len(worksheets_feed.entry) < 1:
raise GoogleSpreadsheetError("The Google Spreadsheet with key '%s' "
"does not have any worksheets associated "
"with it." % spreadsheet_key)
# Find worksheet that will be exported. If a name has not been provided,
# use the first worksheet.
worksheet = None
if worksheet_name is not None:
for sheet in worksheets_feed.entry:
if sheet.title.text == worksheet_name:
worksheet = sheet
if worksheet is None:
raise GoogleSpreadsheetError("The worksheet name '%s' could not "
"be found in the Google Spreadsheet "
"with key '%s'."
% (worksheet_name, spreadsheet_key))
else:
# Choose the first one.
worksheet = worksheets_feed.entry[0]
# Extract the ID of the worksheet.
worksheet_id = worksheet.id.text.split('/')[-1]
# Now that we have a spreadsheet key and worksheet ID, we can read the
# data. First get the headers (first row). We need this in order to grab
# the rest of the actual data in the correct order (it is returned
# unordered).
headers = _get_spreadsheet_headers(gd_client, spreadsheet_key,
worksheet_id)
if len(headers) < 1:
raise GoogleSpreadsheetError("Could not load spreadsheet header (it "
"appears to be empty). Is your Google "
"Spreadsheet with key '%s' empty?"
% spreadsheet_key)
# Loop through the rest of the rows and build up a list of data (in the
# same row/col order found in the spreadsheet).
spreadsheet_lines = _export_spreadsheet(gd_client, spreadsheet_key,
worksheet_id, headers)
out_lines = StringIO()
tsv_writer = writer(out_lines, delimiter='\t', lineterminator='\n')
tsv_writer.writerows(spreadsheet_lines)
return out_lines.getvalue()
def _extract_spreadsheet_key_from_url(url):
"""Extracts a key from a URL in the form '...key=some_key&foo=42...
If the URL doesn't look valid, assumes the URL is the key and returns it
unmodified.
"""
result = url
if 'key=' in url:
result = url.split('key=')[-1].split('#')[0].split('&')[0]
return result
def _get_spreadsheet_headers(client, spreadsheet_key, worksheet_id):
"""Returns a list of headers (the first line of the spreadsheet).
Will be in the order they appear in the spreadsheet.
"""
headers = []
query = CellQuery()
query.max_row = '1'
query.min_row = '1'
feed = client.GetCellsFeed(spreadsheet_key, worksheet_id, query=query,
visibility='public', projection='values')
# Wish python had a do-while...
while True:
for entry in feed.entry:
headers.append(entry.content.text)
# Get the next set of cells if needed.
next_link = feed.GetNextLink()
if next_link:
feed = client.Get(next_link.href,
converter=SpreadsheetsCellsFeedFromString)
else:
break
return headers
def _export_spreadsheet(client, spreadsheet_key, worksheet_id, headers):
"""Returns a list of lists containing the entire spreadsheet.
This will include the header, any comment lines, and the spreadsheet data.
Blank cells are represented as None. Data will only be read up to the first
blank line that is encountered (this is a limitation of the Google
Spreadsheet API).
Comments are only supported after the header and before any real data is
encountered. The lines must start with [optional whitespace] '#' and only
the first cell is kept in that case (to avoid many empty cells after the
comment cell, which mimics QIIME's mapping file format).
Only cell data that falls under the supplied headers will be included.
"""
# Convert the headers into Google's internal "cleaned" representation.
# These will be used as lookups to pull out cell data.
cleaned_headers = _get_cleaned_headers(headers)
# List feed skips header and returns rows in the order they appear in the
# spreadsheet.
spreadsheet_lines = [headers]
rows_feed = client.GetListFeed(spreadsheet_key, worksheet_id,
visibility='public', projection='values')
while True:
found_data = False
for row in rows_feed.entry:
line = []
# Loop through our headers and use the cleaned version to look up
# the cell data. In certain cases (if the original header was blank
# or only contained special characters) we will not be able to map
# our header, so the best we can do is tell the user to change the
# name of their header to be something simple/alphanumeric.
for header_idx, (header, cleaned_header) in \
enumerate(zip(headers, cleaned_headers)):
try:
cell_data = row.custom[cleaned_header].text
except KeyError:
raise GoogleSpreadsheetError("Could not map header '%s' "
"to Google Spreadsheet's internal representation "
"of the header. We suggest changing the name of "
"the header in your Google Spreadsheet to be "
"alphanumeric if possible, as this will likely "
"solve the issue. Note that the name isn't "
"*required* to be alphanumeric, but it may fix "
"issues with converting to Google Spreadsheet's "
"internal format in some cases." % header)
# Special handling of comments (if it's a comment, only keep
# that cell to avoid several blank cells following it).
if not found_data and header_idx == 0 and \
cell_data.lstrip().startswith('#'):
line.append(cell_data)
break
else:
line.append(cell_data)
found_data = True
spreadsheet_lines.append(line)
# Get the next set of rows if necessary.
next_link = rows_feed.GetNextLink()
if next_link:
rows_feed = client.Get(next_link.href,
converter=SpreadsheetsListFeedFromString)
else:
break
return spreadsheet_lines
def _get_cleaned_headers(headers):
"""Creates a list of "cleaned" headers which spreadsheets accept.
A Google Spreadsheet converts the header names into a "cleaned" internal
representation, which must be used to reference a cell at a particular
header/column. They are all lower case and contain no spaces or special
characters. If two columns have the same name after being sanitized, the
columns further to the right have _2, _3 _4, etc. appended to them.
If there are column names which consist of all special characters, or if
the column header is blank, an obfuscated value will be used for a column
name. This method does not handle blank column names or column names with
only special characters.
Taken from gdata.spreadsheet.text_db.ConvertStringsToColumnHeaders and
modified to handle headers with pound signs or that start with numbers, as
well as correctly handle duplicate cleaned headers.
"""
cleaned_headers = []
for header in headers:
# Google strips special characters, whitespace, and underscores first,
# and then strips any *leading* digits. This order is extremely
# important!
sanitized = sub(r'^\d+', '', sub(r'[\W_]', '', header.lower()))
if len(sanitized) > 0:
cleaned_headers.append(sanitized)
else:
raise GoogleSpreadsheetError("Encountered a header '%s' that was "
"either blank or consisted only of special characters. "
"Could not map the header to the internal representation "
"used by the Google Spreadsheet. Please change the header "
"to consist of at least one alphanumeric character."
% header)
# When the same sanitized header appears multiple times in the first row
# of a spreadsheet, _n is appended to the name to make it unique.
header_count = defaultdict(int)
results = []
for header, cleaned_header in zip(headers, cleaned_headers):
new_header = cleaned_header
if header_count[cleaned_header] > 0:
# Google's numbering starts from _2, hence the +1.
new_header = '%s_%d' % (cleaned_header,
header_count[cleaned_header] + 1)
header_count[cleaned_header] += 1
results.append(new_header)
return results
| gpl-2.0 |
olafhauk/mne-python | mne/utils/numerics.py | 4 | 36095 | # -*- coding: utf-8 -*-
"""Some utility functions."""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
from contextlib import contextmanager
import hashlib
from io import BytesIO, StringIO
from math import sqrt
import numbers
import operator
import os
import os.path as op
from math import ceil
import shutil
import sys
from datetime import datetime, timedelta, timezone
import numpy as np
from scipy import sparse
from ._logging import logger, warn, verbose
from .check import check_random_state, _ensure_int, _validate_type
from ..fixes import _infer_dimension_, svd_flip, stable_cumsum, _safe_svd
from .docs import fill_doc
def split_list(v, n, idx=False):
"""Split list in n (approx) equal pieces, possibly giving indices."""
n = int(n)
tot = len(v)
sz = tot // n
start = stop = 0
for i in range(n - 1):
stop += sz
yield (np.arange(start, stop), v[start:stop]) if idx else v[start:stop]
start += sz
yield (np.arange(start, tot), v[start:]) if idx else v[start]
def array_split_idx(ary, indices_or_sections, axis=0, n_per_split=1):
"""Do what numpy.array_split does, but add indices."""
# this only works for indices_or_sections as int
indices_or_sections = _ensure_int(indices_or_sections)
ary_split = np.array_split(ary, indices_or_sections, axis=axis)
idx_split = np.array_split(np.arange(ary.shape[axis]), indices_or_sections)
idx_split = (np.arange(sp[0] * n_per_split, (sp[-1] + 1) * n_per_split)
for sp in idx_split)
return zip(idx_split, ary_split)
def create_chunks(sequence, size):
"""Generate chunks from a sequence.
Parameters
----------
sequence : iterable
Any iterable object
size : int
The chunksize to be returned
"""
return (sequence[p:p + size] for p in range(0, len(sequence), size))
def sum_squared(X):
"""Compute norm of an array.
Parameters
----------
X : array
Data whose norm must be found.
Returns
-------
value : float
Sum of squares of the input array X.
"""
X_flat = X.ravel(order='F' if np.isfortran(X) else 'C')
return np.dot(X_flat, X_flat)
def _compute_row_norms(data):
"""Compute scaling based on estimated norm."""
norms = np.sqrt(np.sum(data ** 2, axis=1))
norms[norms == 0] = 1.0
return norms
def _reg_pinv(x, reg=0, rank='full', rcond=1e-15):
"""Compute a regularized pseudoinverse of Hermitian matrices.
Regularization is performed by adding a constant value to each diagonal
element of the matrix before inversion. This is known as "diagonal
loading". The loading factor is computed as ``reg * np.trace(x) / len(x)``.
The pseudo-inverse is computed through SVD decomposition and inverting the
singular values. When the matrix is rank deficient, some singular values
will be close to zero and will not be used during the inversion. The number
of singular values to use can either be manually specified or automatically
estimated.
Parameters
----------
x : ndarray, shape (..., n, n)
Square, Hermitian matrices to invert.
reg : float
Regularization parameter. Defaults to 0.
rank : int | None | 'full'
This controls the effective rank of the covariance matrix when
computing the inverse. The rank can be set explicitly by specifying an
integer value. If ``None``, the rank will be automatically estimated.
Since applying regularization will always make the covariance matrix
full rank, the rank is estimated before regularization in this case. If
'full', the rank will be estimated after regularization and hence
will mean using the full rank, unless ``reg=0`` is used.
Defaults to 'full'.
rcond : float | 'auto'
Cutoff for detecting small singular values when attempting to estimate
the rank of the matrix (``rank='auto'``). Singular values smaller than
the cutoff are set to zero. When set to 'auto', a cutoff based on
floating point precision will be used. Defaults to 1e-15.
Returns
-------
x_inv : ndarray, shape (..., n, n)
The inverted matrix.
loading_factor : float
Value added to the diagonal of the matrix during regularization.
rank : int
If ``rank`` was set to an integer value, this value is returned,
else the estimated rank of the matrix, before regularization, is
returned.
"""
from ..rank import _estimate_rank_from_s
if rank is not None and rank != 'full':
rank = int(operator.index(rank))
if x.ndim < 2 or x.shape[-2] != x.shape[-1]:
raise ValueError('Input matrix must be square.')
if not np.allclose(x, x.conj().swapaxes(-2, -1)):
raise ValueError('Input matrix must be Hermitian (symmetric)')
assert x.ndim >= 2 and x.shape[-2] == x.shape[-1]
n = x.shape[-1]
# Decompose the matrix, not necessarily positive semidefinite
from mne.fixes import svd
U, s, Vh = svd(x, hermitian=True)
# Estimate the rank before regularization
tol = 'auto' if rcond == 'auto' else rcond * s[..., :1]
rank_before = _estimate_rank_from_s(s, tol)
# Decompose the matrix again after regularization
loading_factor = reg * np.mean(s, axis=-1)
if reg:
U, s, Vh = svd(
x + loading_factor[..., np.newaxis, np.newaxis] * np.eye(n),
hermitian=True)
# Estimate the rank after regularization
tol = 'auto' if rcond == 'auto' else rcond * s[..., :1]
rank_after = _estimate_rank_from_s(s, tol)
# Warn the user if both all parameters were kept at their defaults and the
# matrix is rank deficient.
if (rank_after < n).any() and reg == 0 and \
rank == 'full' and rcond == 1e-15:
warn('Covariance matrix is rank-deficient and no regularization is '
'done.')
elif isinstance(rank, int) and rank > n:
raise ValueError('Invalid value for the rank parameter (%d) given '
'the shape of the input matrix (%d x %d).' %
(rank, x.shape[0], x.shape[1]))
# Pick the requested number of singular values
mask = np.arange(s.shape[-1]).reshape((1,) * (x.ndim - 2) + (-1,))
if rank is None:
cmp = ret = rank_before
elif rank == 'full':
cmp = rank_after
ret = rank_before
else:
cmp = ret = rank
mask = mask < np.asarray(cmp)[..., np.newaxis]
mask &= s > 0
# Invert only non-zero singular values
s_inv = np.zeros(s.shape)
s_inv[mask] = 1. / s[mask]
# Compute the pseudo inverse
x_inv = np.matmul(U * s_inv[..., np.newaxis, :], Vh)
return x_inv, loading_factor, ret
def _gen_events(n_epochs):
"""Generate event structure from number of epochs."""
events = np.c_[np.arange(n_epochs), np.zeros(n_epochs, int),
np.ones(n_epochs, int)]
return events
def _reject_data_segments(data, reject, flat, decim, info, tstep):
"""Reject data segments using peak-to-peak amplitude."""
from ..epochs import _is_good
from ..io.pick import channel_indices_by_type
data_clean = np.empty_like(data)
idx_by_type = channel_indices_by_type(info)
step = int(ceil(tstep * info['sfreq']))
if decim is not None:
step = int(ceil(step / float(decim)))
this_start = 0
this_stop = 0
drop_inds = []
for first in range(0, data.shape[1], step):
last = first + step
data_buffer = data[:, first:last]
if data_buffer.shape[1] < (last - first):
break # end of the time segment
if _is_good(data_buffer, info['ch_names'], idx_by_type, reject,
flat, ignore_chs=info['bads']):
this_stop = this_start + data_buffer.shape[1]
data_clean[:, this_start:this_stop] = data_buffer
this_start += data_buffer.shape[1]
else:
logger.info("Artifact detected in [%d, %d]" % (first, last))
drop_inds.append((first, last))
data = data_clean[:, :this_stop]
if not data.any():
raise RuntimeError('No clean segment found. Please '
'consider updating your rejection '
'thresholds.')
return data, drop_inds
def _get_inst_data(inst):
"""Get data view from MNE object instance like Raw, Epochs or Evoked."""
from ..io.base import BaseRaw
from ..epochs import BaseEpochs
from .. import Evoked
from ..time_frequency.tfr import _BaseTFR
_validate_type(inst, (BaseRaw, BaseEpochs, Evoked, _BaseTFR), "Instance")
if not inst.preload:
inst.load_data()
return inst._data
def compute_corr(x, y):
"""Compute pearson correlations between a vector and a matrix."""
if len(x) == 0 or len(y) == 0:
raise ValueError('x or y has zero length')
X = np.array(x, float)
Y = np.array(y, float)
X -= X.mean(0)
Y -= Y.mean(0)
x_sd = X.std(0, ddof=1)
# if covariance matrix is fully expanded, Y needs a
# transpose / broadcasting else Y is correct
y_sd = Y.std(0, ddof=1)[:, None if X.shape == Y.shape else Ellipsis]
return (np.dot(X.T, Y) / float(len(X) - 1)) / (x_sd * y_sd)
@fill_doc
def random_permutation(n_samples, random_state=None):
"""Emulate the randperm matlab function.
It returns a vector containing a random permutation of the
integers between 0 and n_samples-1. It returns the same random numbers
than randperm matlab function whenever the random_state is the same
as the matlab's random seed.
This function is useful for comparing against matlab scripts
which use the randperm function.
Note: the randperm(n_samples) matlab function generates a random
sequence between 1 and n_samples, whereas
random_permutation(n_samples, random_state) function generates
a random sequence between 0 and n_samples-1, that is:
randperm(n_samples) = random_permutation(n_samples, random_state) - 1
Parameters
----------
n_samples : int
End point of the sequence to be permuted (excluded, i.e., the end point
is equal to n_samples-1)
%(random_state)s
Returns
-------
randperm : ndarray, int
Randomly permuted sequence between 0 and n-1.
"""
rng = check_random_state(random_state)
# This can't just be rng.permutation(n_samples) because it's not identical
# to what MATLAB produces
idx = rng.uniform(size=n_samples)
randperm = np.argsort(idx)
return randperm
@verbose
def _apply_scaling_array(data, picks_list, scalings, verbose=None):
"""Scale data type-dependently for estimation."""
scalings = _check_scaling_inputs(data, picks_list, scalings)
if isinstance(scalings, dict):
logger.debug(' Scaling using mapping %s.' % (scalings,))
picks_dict = dict(picks_list)
scalings = [(picks_dict[k], v) for k, v in scalings.items()
if k in picks_dict]
for idx, scaling in scalings:
data[idx, :] *= scaling # F - order
else:
logger.debug(' Scaling using computed norms.')
data *= scalings[:, np.newaxis] # F - order
def _invert_scalings(scalings):
if isinstance(scalings, dict):
scalings = {k: 1. / v for k, v in scalings.items()}
elif isinstance(scalings, np.ndarray):
scalings = 1. / scalings
return scalings
def _undo_scaling_array(data, picks_list, scalings):
scalings = _invert_scalings(_check_scaling_inputs(data, picks_list,
scalings))
return _apply_scaling_array(data, picks_list, scalings, verbose=False)
@contextmanager
def _scaled_array(data, picks_list, scalings):
"""Scale, use, unscale array."""
_apply_scaling_array(data, picks_list=picks_list, scalings=scalings)
try:
yield
finally:
_undo_scaling_array(data, picks_list=picks_list, scalings=scalings)
def _apply_scaling_cov(data, picks_list, scalings):
"""Scale resulting data after estimation."""
scalings = _check_scaling_inputs(data, picks_list, scalings)
scales = None
if isinstance(scalings, dict):
n_channels = len(data)
covinds = list(zip(*picks_list))[1]
assert len(data) == sum(len(k) for k in covinds)
assert list(sorted(np.concatenate(covinds))) == list(range(len(data)))
scales = np.zeros(n_channels)
for ch_t, idx in picks_list:
scales[idx] = scalings[ch_t]
elif isinstance(scalings, np.ndarray):
if len(scalings) != len(data):
raise ValueError('Scaling factors and data are of incompatible '
'shape')
scales = scalings
elif scalings is None:
pass
else:
raise RuntimeError('Arff...')
if scales is not None:
assert np.sum(scales == 0.) == 0
data *= (scales[None, :] * scales[:, None])
def _undo_scaling_cov(data, picks_list, scalings):
scalings = _invert_scalings(_check_scaling_inputs(data, picks_list,
scalings))
return _apply_scaling_cov(data, picks_list, scalings)
def _check_scaling_inputs(data, picks_list, scalings):
"""Aux function."""
rescale_dict_ = dict(mag=1e15, grad=1e13, eeg=1e6)
scalings_ = None
if isinstance(scalings, str) and scalings == 'norm':
scalings_ = 1. / _compute_row_norms(data)
elif isinstance(scalings, dict):
rescale_dict_.update(scalings)
scalings_ = rescale_dict_
elif isinstance(scalings, np.ndarray):
scalings_ = scalings
elif scalings is None:
pass
else:
raise NotImplementedError("No way! That's not a rescaling "
'option: %s' % scalings)
return scalings_
def hashfunc(fname, block_size=1048576, hash_type="md5"): # 2 ** 20
"""Calculate the hash for a file.
Parameters
----------
fname : str
Filename.
block_size : int
Block size to use when reading.
Returns
-------
hash_ : str
The hexadecimal digest of the hash.
"""
if hash_type == "md5":
hasher = hashlib.md5()
elif hash_type == "sha1":
hasher = hashlib.sha1()
with open(fname, 'rb') as fid:
while True:
data = fid.read(block_size)
if not data:
break
hasher.update(data)
return hasher.hexdigest()
def _replace_md5(fname):
"""Replace a file based on MD5sum."""
# adapted from sphinx-gallery
assert fname.endswith('.new')
fname_old = fname[:-4]
if op.isfile(fname_old) and hashfunc(fname) == hashfunc(fname_old):
os.remove(fname)
else:
shutil.move(fname, fname_old)
def create_slices(start, stop, step=None, length=1):
"""Generate slices of time indexes.
Parameters
----------
start : int
Index where first slice should start.
stop : int
Index where last slice should maximally end.
length : int
Number of time sample included in a given slice.
step: int | None
Number of time samples separating two slices.
If step = None, step = length.
Returns
-------
slices : list
List of slice objects.
"""
# default parameters
if step is None:
step = length
# slicing
slices = [slice(t, t + length, 1) for t in
range(start, stop - length + 1, step)]
return slices
def _time_mask(times, tmin=None, tmax=None, sfreq=None, raise_error=True,
include_tmax=True):
"""Safely find sample boundaries."""
orig_tmin = tmin
orig_tmax = tmax
tmin = -np.inf if tmin is None else tmin
tmax = np.inf if tmax is None else tmax
if not np.isfinite(tmin):
tmin = times[0]
if not np.isfinite(tmax):
tmax = times[-1]
include_tmax = True # ignore this param when tmax is infinite
if sfreq is not None:
# Push to a bit past the nearest sample boundary first
sfreq = float(sfreq)
tmin = int(round(tmin * sfreq)) / sfreq - 0.5 / sfreq
tmax = int(round(tmax * sfreq)) / sfreq
tmax += (0.5 if include_tmax else -0.5) / sfreq
else:
assert include_tmax # can only be used when sfreq is known
if raise_error and tmin > tmax:
raise ValueError('tmin (%s) must be less than or equal to tmax (%s)'
% (orig_tmin, orig_tmax))
mask = (times >= tmin)
mask &= (times <= tmax)
if raise_error and not mask.any():
extra = '' if include_tmax else 'when include_tmax=False '
raise ValueError('No samples remain when using tmin=%s and tmax=%s %s'
'(original time bounds are [%s, %s])'
% (orig_tmin, orig_tmax, extra, times[0], times[-1]))
return mask
def _freq_mask(freqs, sfreq, fmin=None, fmax=None, raise_error=True):
"""Safely find frequency boundaries."""
orig_fmin = fmin
orig_fmax = fmax
fmin = -np.inf if fmin is None else fmin
fmax = np.inf if fmax is None else fmax
if not np.isfinite(fmin):
fmin = freqs[0]
if not np.isfinite(fmax):
fmax = freqs[-1]
if sfreq is None:
raise ValueError('sfreq can not be None')
# Push 0.5/sfreq past the nearest frequency boundary first
sfreq = float(sfreq)
fmin = int(round(fmin * sfreq)) / sfreq - 0.5 / sfreq
fmax = int(round(fmax * sfreq)) / sfreq + 0.5 / sfreq
if raise_error and fmin > fmax:
raise ValueError('fmin (%s) must be less than or equal to fmax (%s)'
% (orig_fmin, orig_fmax))
mask = (freqs >= fmin)
mask &= (freqs <= fmax)
if raise_error and not mask.any():
raise ValueError('No frequencies remain when using fmin=%s and '
'fmax=%s (original frequency bounds are [%s, %s])'
% (orig_fmin, orig_fmax, freqs[0], freqs[-1]))
return mask
def grand_average(all_inst, interpolate_bads=True, drop_bads=True):
"""Make grand average of a list of Evoked or AverageTFR data.
For :class:`mne.Evoked` data, the function interpolates bad channels based
on the ``interpolate_bads`` parameter. If ``interpolate_bads`` is True,
the grand average file will contain good channels and the bad channels
interpolated from the good MEG/EEG channels.
For :class:`mne.time_frequency.AverageTFR` data, the function takes the
subset of channels not marked as bad in any of the instances.
The ``grand_average.nave`` attribute will be equal to the number
of evoked datasets used to calculate the grand average.
.. note:: A grand average evoked should not be used for source
localization.
Parameters
----------
all_inst : list of Evoked or AverageTFR
The evoked datasets.
interpolate_bads : bool
If True, bad MEG and EEG channels are interpolated. Ignored for
AverageTFR.
drop_bads : bool
If True, drop all bad channels marked as bad in any data set.
If neither interpolate_bads nor drop_bads is True, in the output file,
every channel marked as bad in at least one of the input files will be
marked as bad, but no interpolation or dropping will be performed.
Returns
-------
grand_average : Evoked | AverageTFR
The grand average data. Same type as input.
Notes
-----
.. versionadded:: 0.11.0
"""
# check if all elements in the given list are evoked data
from ..evoked import Evoked
from ..time_frequency import AverageTFR
from ..channels.channels import equalize_channels
if not all_inst:
raise ValueError('Please pass a list of Evoked or AverageTFR objects.')
elif len(all_inst) == 1:
warn('Only a single dataset was passed to mne.grand_average().')
inst_type = type(all_inst[0])
_validate_type(all_inst[0], (Evoked, AverageTFR), 'All elements')
for inst in all_inst:
_validate_type(inst, inst_type, 'All elements', 'of the same type')
# Copy channels to leave the original evoked datasets intact.
all_inst = [inst.copy() for inst in all_inst]
# Interpolates if necessary
if isinstance(all_inst[0], Evoked):
if interpolate_bads:
all_inst = [inst.interpolate_bads() if len(inst.info['bads']) > 0
else inst for inst in all_inst]
from ..evoked import combine_evoked as combine
else: # isinstance(all_inst[0], AverageTFR):
from ..time_frequency.tfr import combine_tfr as combine
if drop_bads:
bads = list({b for inst in all_inst for b in inst.info['bads']})
if bads:
for inst in all_inst:
inst.drop_channels(bads)
equalize_channels(all_inst, copy=False)
# make grand_average object using combine_[evoked/tfr]
grand_average = combine(all_inst, weights='equal')
# change the grand_average.nave to the number of Evokeds
grand_average.nave = len(all_inst)
# change comment field
grand_average.comment = "Grand average (n = %d)" % grand_average.nave
return grand_average
def object_hash(x, h=None):
"""Hash a reasonable python object.
Parameters
----------
x : object
Object to hash. Can be anything comprised of nested versions of:
{dict, list, tuple, ndarray, str, bytes, float, int, None}.
h : hashlib HASH object | None
Optional, object to add the hash to. None creates an MD5 hash.
Returns
-------
digest : int
The digest resulting from the hash.
"""
if h is None:
h = hashlib.md5()
if hasattr(x, 'keys'):
# dict-like types
keys = _sort_keys(x)
for key in keys:
object_hash(key, h)
object_hash(x[key], h)
elif isinstance(x, bytes):
# must come before "str" below
h.update(x)
elif isinstance(x, (str, float, int, type(None))):
h.update(str(type(x)).encode('utf-8'))
h.update(str(x).encode('utf-8'))
elif isinstance(x, (np.ndarray, np.number, np.bool_)):
x = np.asarray(x)
h.update(str(x.shape).encode('utf-8'))
h.update(str(x.dtype).encode('utf-8'))
h.update(x.tobytes())
elif isinstance(x, datetime):
object_hash(_dt_to_stamp(x))
elif hasattr(x, '__len__'):
# all other list-like types
h.update(str(type(x)).encode('utf-8'))
for xx in x:
object_hash(xx, h)
else:
raise RuntimeError('unsupported type: %s (%s)' % (type(x), x))
return int(h.hexdigest(), 16)
def object_size(x, memo=None):
"""Estimate the size of a reasonable python object.
Parameters
----------
x : object
Object to approximate the size of.
Can be anything comprised of nested versions of:
{dict, list, tuple, ndarray, str, bytes, float, int, None}.
memo : dict | None
The memodict.
Returns
-------
size : int
The estimated size in bytes of the object.
"""
# Note: this will not process object arrays properly (since those only)
# hold references
if memo is None:
memo = dict()
id_ = id(x)
if id_ in memo:
return 0 # do not add already existing ones
if isinstance(x, (bytes, str, int, float, type(None))):
size = sys.getsizeof(x)
elif isinstance(x, np.ndarray):
# On newer versions of NumPy, just doing sys.getsizeof(x) works,
# but on older ones you always get something small :(
size = sys.getsizeof(np.array([]))
if x.base is None or id(x.base) not in memo:
size += x.nbytes
elif isinstance(x, np.generic):
size = x.nbytes
elif isinstance(x, dict):
size = sys.getsizeof(x)
for key, value in x.items():
size += object_size(key, memo)
size += object_size(value, memo)
elif isinstance(x, (list, tuple)):
size = sys.getsizeof(x) + sum(object_size(xx, memo) for xx in x)
elif isinstance(x, datetime):
size = object_size(_dt_to_stamp(x), memo)
elif sparse.isspmatrix_csc(x) or sparse.isspmatrix_csr(x):
size = sum(sys.getsizeof(xx)
for xx in [x, x.data, x.indices, x.indptr])
else:
raise RuntimeError('unsupported type: %s (%s)' % (type(x), x))
memo[id_] = size
return size
def _sort_keys(x):
"""Sort and return keys of dict."""
keys = list(x.keys()) # note: not thread-safe
idx = np.argsort([str(k) for k in keys])
keys = [keys[ii] for ii in idx]
return keys
def _array_equal_nan(a, b):
try:
np.testing.assert_array_equal(a, b)
except AssertionError:
return False
return True
def object_diff(a, b, pre=''):
"""Compute all differences between two python variables.
Parameters
----------
a : object
Currently supported: dict, list, tuple, ndarray, int, str, bytes,
float, StringIO, BytesIO.
b : object
Must be same type as ``a``.
pre : str
String to prepend to each line.
Returns
-------
diffs : str
A string representation of the differences.
"""
out = ''
if type(a) != type(b):
# Deal with NamedInt and NamedFloat
for sub in (int, float):
if isinstance(a, sub) and isinstance(b, sub):
break
else:
return pre + ' type mismatch (%s, %s)\n' % (type(a), type(b))
if isinstance(a, dict):
k1s = _sort_keys(a)
k2s = _sort_keys(b)
m1 = set(k2s) - set(k1s)
if len(m1):
out += pre + ' left missing keys %s\n' % (m1)
for key in k1s:
if key not in k2s:
out += pre + ' right missing key %s\n' % key
else:
out += object_diff(a[key], b[key],
pre=(pre + '[%s]' % repr(key)))
elif isinstance(a, (list, tuple)):
if len(a) != len(b):
out += pre + ' length mismatch (%s, %s)\n' % (len(a), len(b))
else:
for ii, (xx1, xx2) in enumerate(zip(a, b)):
out += object_diff(xx1, xx2, pre + '[%s]' % ii)
elif isinstance(a, float):
if not _array_equal_nan(a, b):
out += pre + ' value mismatch (%s, %s)\n' % (a, b)
elif isinstance(a, (str, int, bytes, np.generic)):
if a != b:
out += pre + ' value mismatch (%s, %s)\n' % (a, b)
elif a is None:
if b is not None:
out += pre + ' left is None, right is not (%s)\n' % (b)
elif isinstance(a, np.ndarray):
if not _array_equal_nan(a, b):
out += pre + ' array mismatch\n'
elif isinstance(a, (StringIO, BytesIO)):
if a.getvalue() != b.getvalue():
out += pre + ' StringIO mismatch\n'
elif isinstance(a, datetime):
if (a - b).total_seconds() != 0:
out += pre + ' datetime mismatch\n'
elif sparse.isspmatrix(a):
# sparsity and sparse type of b vs a already checked above by type()
if b.shape != a.shape:
out += pre + (' sparse matrix a and b shape mismatch'
'(%s vs %s)' % (a.shape, b.shape))
else:
c = a - b
c.eliminate_zeros()
if c.nnz > 0:
out += pre + (' sparse matrix a and b differ on %s '
'elements' % c.nnz)
elif hasattr(a, '__getstate__'):
out += object_diff(a.__getstate__(), b.__getstate__(), pre)
else:
raise RuntimeError(pre + ': unsupported type %s (%s)' % (type(a), a))
return out
class _PCA(object):
"""Principal component analysis (PCA)."""
# Adapted from sklearn and stripped down to just use linalg.svd
# and make it easier to later provide a "center" option if we want
def __init__(self, n_components=None, whiten=False):
self.n_components = n_components
self.whiten = whiten
def fit_transform(self, X, y=None):
X = X.copy()
U, S, _ = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0] - 1)
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
if self.n_components is None:
n_components = min(X.shape)
else:
n_components = self.n_components
n_samples, n_features = X.shape
if n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
elif not 0 <= n_components <= min(n_samples, n_features):
raise ValueError("n_components=%r must be between 0 and "
"min(n_samples, n_features)=%r with "
"svd_solver='full'"
% (n_components, min(n_samples, n_features)))
elif n_components >= 1:
if not isinstance(n_components, (numbers.Integral, np.integer)):
raise ValueError("n_components=%r must be of type int "
"when greater than or equal to 1, "
"was of type=%r"
% (n_components, type(n_components)))
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = _safe_svd(X, full_matrices=False)
# flip eigenvectors' sign to enforce deterministic output
U, V = svd_flip(U, V)
components_ = V
# Get variance explained by singular values
explained_variance_ = (S ** 2) / (n_samples - 1)
total_var = explained_variance_.sum()
explained_variance_ratio_ = explained_variance_ / total_var
singular_values_ = S.copy() # Store the singular values.
# Postprocess the number of components required
if n_components == 'mle':
n_components = \
_infer_dimension_(explained_variance_, n_samples, n_features)
elif 0 < n_components < 1.0:
# number of components for which the cumulated explained
# variance percentage is superior to the desired threshold
ratio_cumsum = stable_cumsum(explained_variance_ratio_)
n_components = np.searchsorted(ratio_cumsum, n_components) + 1
# Compute noise covariance using Probabilistic PCA model
# The sigma2 maximum likelihood (cf. eq. 12.46)
if n_components < min(n_features, n_samples):
self.noise_variance_ = explained_variance_[n_components:].mean()
else:
self.noise_variance_ = 0.
self.n_samples_, self.n_features_ = n_samples, n_features
self.components_ = components_[:n_components]
self.n_components_ = n_components
self.explained_variance_ = explained_variance_[:n_components]
self.explained_variance_ratio_ = \
explained_variance_ratio_[:n_components]
self.singular_values_ = singular_values_[:n_components]
return U, S, V
def _mask_to_onsets_offsets(mask):
"""Group boolean mask into contiguous onset:offset pairs."""
assert mask.dtype == bool and mask.ndim == 1
mask = mask.astype(int)
diff = np.diff(mask)
onsets = np.where(diff > 0)[0] + 1
if mask[0]:
onsets = np.concatenate([[0], onsets])
offsets = np.where(diff < 0)[0] + 1
if mask[-1]:
offsets = np.concatenate([offsets, [len(mask)]])
assert len(onsets) == len(offsets)
return onsets, offsets
def _julian_to_dt(jd):
"""Convert Julian integer to a datetime object.
Parameters
----------
jd : int
Julian date - number of days since julian day 0
Julian day number 0 assigned to the day starting at
noon on January 1, 4713 BC, proleptic Julian calendar
November 24, 4714 BC, in the proleptic Gregorian calendar
Returns
-------
jd_date : datetime
Datetime representation of jd
"""
# https://aa.usno.navy.mil/data/docs/JulianDate.php
# Thursday, A.D. 1970 Jan 1 12:00:00.0 2440588.000000
jd_t0 = 2440588
datetime_t0 = datetime(1970, 1, 1, 12, 0, 0, 0, tzinfo=timezone.utc)
dt = timedelta(days=(jd - jd_t0))
return datetime_t0 + dt
def _dt_to_julian(jd_date):
"""Convert datetime object to a Julian integer.
Parameters
----------
jd_date : datetime
Returns
-------
jd : float
Julian date corresponding to jd_date
- number of days since julian day 0
Julian day number 0 assigned to the day starting at
noon on January 1, 4713 BC, proleptic Julian calendar
November 24, 4714 BC, in the proleptic Gregorian calendar
"""
# https://aa.usno.navy.mil/data/docs/JulianDate.php
# Thursday, A.D. 1970 Jan 1 12:00:00.0 2440588.000000
jd_t0 = 2440588
datetime_t0 = datetime(1970, 1, 1, 12, 0, 0, 0, tzinfo=timezone.utc)
dt = jd_date - datetime_t0
return jd_t0 + dt.days
def _cal_to_julian(year, month, day):
"""Convert calendar date (year, month, day) to a Julian integer.
Parameters
----------
year : int
Year as an integer.
month : int
Month as an integer.
day : int
Day as an integer.
Returns
-------
jd: int
Julian date.
"""
return int(_dt_to_julian(datetime(year, month, day, 12, 0, 0,
tzinfo=timezone.utc)))
def _julian_to_cal(jd):
"""Convert calendar date (year, month, day) to a Julian integer.
Parameters
----------
jd: int, float
Julian date.
Returns
-------
year : int
Year as an integer.
month : int
Month as an integer.
day : int
Day as an integer.
"""
tmp_date = _julian_to_dt(jd)
return tmp_date.year, tmp_date.month, tmp_date.day
def _check_dt(dt):
if not isinstance(dt, datetime) or dt.tzinfo is None or \
dt.tzinfo is not timezone.utc:
raise ValueError('Date must be datetime object in UTC: %r' % (dt,))
def _dt_to_stamp(inp_date):
"""Convert a datetime object to a timestamp."""
_check_dt(inp_date)
return int(inp_date.timestamp() // 1), inp_date.microsecond
def _stamp_to_dt(utc_stamp):
"""Convert timestamp to datetime object in Windows-friendly way."""
# The min on windows is 86400
stamp = [int(s) for s in utc_stamp]
if len(stamp) == 1: # In case there is no microseconds information
stamp.append(0)
return (datetime.fromtimestamp(0, tz=timezone.utc) +
timedelta(0, stamp[0], stamp[1])) # day, sec, µs
class _ReuseCycle(object):
"""Cycle over a variable, preferring to reuse earlier indices.
Requires the values in ``x`` to be hashable and unique. This holds
nicely for matplotlib's color cycle, which gives HTML hex color strings.
"""
def __init__(self, x):
self.indices = list()
self.popped = dict()
assert len(x) > 0
self.x = x
def __iter__(self):
while True:
yield self.__next__()
def __next__(self):
if not len(self.indices):
self.indices = list(range(len(self.x)))
self.popped = dict()
idx = self.indices.pop(0)
val = self.x[idx]
assert val not in self.popped
self.popped[val] = idx
return val
def restore(self, val):
try:
idx = self.popped.pop(val)
except KeyError:
warn('Could not find value: %s' % (val,))
else:
loc = np.searchsorted(self.indices, idx)
self.indices.insert(loc, idx)
| bsd-3-clause |
tiefpunkt/thingstore | thingstore/models.py | 1 | 2775 | from django.db import models
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.utils.timezone import now, utc
import calendar, datetime, string, random
# Create your models here.
class Thing(models.Model):
name = models.CharField(max_length=255)
location = models.CharField(max_length=255, blank=True)
description = models.TextField(blank=True)
owner = models.ForeignKey(User)
def __unicode__(self):
return self.name;
def get_absolute_url(self):
return reverse('thingstore.views.thing', args=[str(self.id)])
class Metric(models.Model):
thing = models.ForeignKey(Thing, related_name='metrics')
name = models.CharField(max_length=255)
unit = models.CharField(max_length=64, blank=True)
class Meta:
unique_together = (("name","thing"),)
def __unicode__(self):
return self.name;
""" Return most recent value for metric """
@property
def current_value(self):
try:
return Value.objects.filter(metric = self)[:1].get().value
except Value.DoesNotExist:
return None
""" set current value by adding a new Value with current timestamp"""
@current_value.setter
def current_value(self, value):
v = Value(metric = self, value = value)
v.save()
""" Return datetime of last update """
@property
def last_update(self):
try:
return Value.objects.filter(metric = self)[:1].get().timestamp
except Value.DoesNotExist:
return None
""" Returns a list of Value objects for the last $timeframe_hours
plus the one Value before the timeframe if existing """
def getValues(self, timeframe_hours):
try:
# Get all values within the timeframe
r_values = Value.objects.filter(metric = self, timestamp__gte = now()-datetime.timedelta(hours=timeframe_hours)).order_by('timestamp')
r_list = [ values for values in r_values]
# The invisible Value outside of the Timeframe
inv_value = Value.objects.filter(metric = self, timestamp__lt = now()-datetime.timedelta(hours=timeframe_hours)).order_by('-timestamp')[:1]
if inv_value.count():
vr_list = list(inv_value) + list(r_values)
return vr_list
return r_list
except:
return None;
class Value(models.Model):
metric = models.ForeignKey(Metric, related_name='values')
value = models.FloatField()
timestamp = models.DateTimeField(default=now)
@property
def js_time(self):
return calendar.timegm(self.timestamp.timetuple())*1000
class Meta:
ordering = ['-timestamp']
class APIKey(models.Model):
token = models.CharField(max_length=255, unique = True)
user = models.ForeignKey(User, related_name='apikeys')
@classmethod
def create(cls, user):
apikey = cls(user=user)
apikey.token = ''.join(random.sample(string.lowercase+string.uppercase+string.digits,32))
return apikey
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.