repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
2014c2g1/c2g1 | exts/exts/sphinxcontrib/bibtex/latex_lexer.py | 38 | 12344 | # -*- coding: utf-8 -*-
"""
Simple incremental latex lexer
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import codecs
import collections
import re
class Token(collections.namedtuple("Token", "name text")):
"""Stores information about a matched token."""
__slots__ = () # efficiency
def __new__(cls, name=None, text=None):
return tuple.__new__(
cls,
(name if name is not None else 'unknown',
text if text is not None else b''))
def __nonzero__(self):
return bool(self.text)
def __len__(self):
return len(self.text)
def decode(self, encoding):
if self.name == 'control_word':
return self.text.decode(encoding) + u' '
else:
return self.text.decode(encoding)
# implementation note: we derive from IncrementalDecoder because this
# class serves excellently as a base class for incremental decoders,
# but of course we don't decode yet until later
class LatexLexer(codecs.IncrementalDecoder):
"""A very simple lexer for tex/latex code."""
# implementation note: every token **must** be decodable by inputenc
tokens = [
# comment: for ease, and for speed, we handle it as a token
('comment', br'%.*?\n'),
# control tokens
# in latex, some control tokens skip following whitespace
# ('control-word' and 'control-symbol')
# others do not ('control-symbol-x')
# XXX TBT says no control symbols skip whitespace (except '\ ')
# XXX but tests reveal otherwise?
('control_word', br'[\\][a-zA-Z]+'),
('control_symbol', br'[\\][~' br"'" br'"` =^!]'),
('control_symbol_x', br'[\\][^a-zA-Z]'), # TODO should only match ascii
# parameter tokens
# also support a lone hash so we can lex things like b'#a'
('parameter', br'\#[0-9]|\#'),
# any remaining characters; for ease we also handle space and
# newline as tokens
('space', br' '),
('newline', br'\n'),
('mathshift', br'[$]'),
# note: some chars joined together to make it easier to detect
# symbols that have a special function (i.e. --, ---, etc.)
('chars',
br'---|--|-|[`][`]'
br"|['][']"
br'|[?][`]|[!][`]'
# separate chars because brackets are optional
# e.g. fran\\c cais = fran\\c{c}ais in latex
# so only way to detect \\c acting on c only is this way
br'|[0-9a-zA-Z{}]'
# we have to join everything else together to support
# multibyte encodings: every token must be decodable!!
# this means for instance that \\c öké is NOT equivalent to
# \\c{ö}ké
br'|[^ %#$\n\\]+'),
# trailing garbage which we cannot decode otherwise
# (such as a lone '\' at the end of a buffer)
# is never emitted, but used internally by the buffer
('unknown', br'.'),
]
def __init__(self, errors='strict'):
"""Initialize the codec."""
self.errors = errors
# regular expression used for matching
self.regexp = re.compile(
b"|".join(
b"(?P<" + name.encode() + b">" + regexp + b")"
for name, regexp in self.tokens),
re.DOTALL)
# reset state
self.reset()
def reset(self):
"""Reset state (also called by __init__ to initialize the
state).
"""
# buffer for storing last (possibly incomplete) token
self.raw_buffer = Token()
def getstate(self):
return (self.raw_buffer.text, 0)
def setstate(self, state):
self.raw_buffer = Token('unknown', state[0])
def get_raw_tokens(self, bytes_, final=False):
"""Yield tokens without any further processing. Tokens are one of:
- ``\\<word>``: a control word (i.e. a command)
- ``\\<symbol>``: a control symbol (i.e. \\^ etc.)
- ``#<n>``: a parameter
- a series of byte characters
"""
if not isinstance(bytes_, bytes):
raise TypeError(
'expected bytes but got %s'
% bytes_.__class__.__name__)
if self.raw_buffer:
bytes_ = self.raw_buffer.text + bytes_
self.raw_buffer = Token()
for match in self.regexp.finditer(bytes_):
for name, regexp in self.tokens:
text = match.group(name)
if text is not None:
# yield the buffer token(s)
for token in self.flush_raw_tokens():
yield token
# fill buffer with next token
self.raw_buffer = Token(name, text)
break
else:
# should not happen
raise AssertionError("lexer failed on '%s'" % bytes_)
if final:
for token in self.flush_raw_tokens():
yield token
def flush_raw_tokens(self):
"""Flush the raw token buffer."""
if self.raw_buffer:
yield self.raw_buffer
self.raw_buffer = Token()
class LatexIncrementalLexer(LatexLexer):
"""A very simple incremental lexer for tex/latex code. Roughly
follows the state machine described in Tex By Topic, Chapter 2.
The generated tokens satisfy:
* no newline characters: paragraphs are separated by '\\par'
* spaces following control tokens are compressed
"""
def reset(self):
"""Reset state (also called by __init__ to initialize the
state).
"""
LatexLexer.reset(self)
# three possible states:
# newline (N), skipping spaces (S), and middle of line (M)
self.state = 'N'
# inline math mode?
self.inline_math = False
def getstate(self):
# state 'M' is most common, so let that be zero
return (
self.raw_buffer,
{'M': 0, 'N': 1, 'S': 2}[self.state]
| (4 if self.inline_math else 0)
)
def setstate(self, state):
self.raw_buffer = state[0]
self.state = {0: 'M', 1: 'N', 2: 'S'}[state[1] & 3]
self.inline_math = bool(state[1] & 4)
def get_tokens(self, bytes_, final=False):
"""Yield tokens while maintaining a state. Also skip
whitespace after control words and (some) control symbols.
Replaces newlines by spaces and \\par commands depending on
the context.
"""
# current position relative to the start of bytes_ in the sequence
# of bytes that have been decoded
pos = -len(self.raw_buffer)
for token in self.get_raw_tokens(bytes_, final=final):
pos = pos + len(token)
assert pos >= 0 # first token includes at least self.raw_buffer
if token.name == 'newline':
if self.state == 'N':
# if state was 'N', generate new paragraph
yield Token('control_word', b'\\par')
elif self.state == 'S':
# switch to 'N' state, do not generate a space
self.state = 'N'
elif self.state == 'M':
# switch to 'N' state, generate a space
self.state = 'N'
yield Token('space', b' ')
else:
raise AssertionError(
"unknown tex state '%s'" % self.state)
elif token.name == 'space':
if self.state == 'N':
# remain in 'N' state, no space token generated
pass
elif self.state == 'S':
# remain in 'S' state, no space token generated
pass
elif self.state == 'M':
# in M mode, generate the space,
# but switch to space skip mode
self.state = 'S'
yield token
else:
raise AssertionError(
"unknown state %s" % repr(self.state))
elif token.name == 'char':
self.state = 'M'
yield token
elif token.name == 'mathshift':
self.inline_math = not self.inline_math
yield token
elif token.name == 'parameter':
self.state = 'M'
yield token
elif token.name == 'control_word':
# go to space skip mode
self.state = 'S'
yield token
elif token.name == 'control_symbol':
# go to space skip mode
self.state = 'S'
yield token
elif token.name == 'control_symbol_x':
# don't skip following space, so go to M mode
self.state = 'M'
yield token
elif token.name == 'comment':
# go to newline mode, no token is generated
# note: comment includes the newline
self.state = 'N'
elif token.name == 'chars':
self.state = 'M'
yield token
elif token.name == 'unknown':
if self.errors == 'strict':
# current position within bytes_
# this is the position right after the unknown token
raise UnicodeDecodeError(
"latex", # codec
bytes_, # problematic input
pos - len(token), # start of problematic token
pos, # end of it
"unknown token %s" % repr(token.text))
elif self.errors == 'ignore':
# do nothing
pass
elif self.errors == 'replace':
yield Token('chars', b'?' * len(token))
else:
raise NotImplementedError(
"error mode %s not supported" % repr(self.errors))
class LatexIncrementalDecoder(LatexIncrementalLexer):
"""Simple incremental decoder. Transforms lexed latex tokens into
unicode.
To customize decoding, subclass and override
:meth:`get_unicode_tokens`.
"""
inputenc = "ascii"
"""Input encoding. **Must** extend ascii."""
def get_unicode_tokens(self, bytes_, final=False):
""":meth:`decode` calls this function to produce the final
sequence of unicode strings. This implementation simply
decodes every sequence in *inputenc* encoding. Override to
process the tokens in some other way (for example, for token
translation).
"""
for token in self.get_tokens(bytes_, final=final):
yield token.decode(self.inputenc)
def decode(self, bytes_, final=False):
try:
return u''.join(self.get_unicode_tokens(bytes_, final=final))
except UnicodeDecodeError as e:
# API requires that the encode method raises a ValueError
# in this case
raise ValueError(e)
class LatexIncrementalEncoder(codecs.IncrementalEncoder):
"""Simple incremental encoder for latex."""
inputenc = "ascii"
"""Input encoding. **Must** extend ascii."""
def get_latex_bytes(self, unicode_):
""":meth:`encode` calls this function to produce the final
sequence of latex bytes. This implementation simply
encodes every sequence in *inputenc* encoding. Override to
process the unicode in some other way (for example, for character
translation).
"""
if not isinstance(unicode_, basestring):
raise TypeError(
"expected unicode for encode input, but got {0} instead"
.format(unicode_.__class__.__name__))
for c in unicode_:
yield c.encode(inputenc, self.errors)
def encode(self, unicode_, final=False):
"""Encode unicode string into a latex byte sequence."""
try:
return b''.join(self.get_latex_bytes(unicode_, final=final))
except (UnicodeDecodeError, e):
# API requires that the encode method raises a ValueError
# in this case
raise ValueError(e)
| gpl-2.0 |
jwinzer/OpenSlides | server/openslides/mediafiles/migrations/0004_directories_and_permissions_1.py | 7 | 2062 | # Generated by Django 2.2.2 on 2019-06-28 06:06
from django.db import migrations, models
import openslides.mediafiles.models
import openslides.utils.models
class Migration(migrations.Migration):
dependencies = [("mediafiles", "0003_auto_20190119_1425")]
operations = [
migrations.AlterModelOptions(
name="mediafile",
options={
"default_permissions": (),
"ordering": ("title",),
"permissions": (
("can_see", "Can see the list of files"),
("can_manage", "Can manage files"),
),
},
),
migrations.RenameField(
model_name="mediafile", old_name="timestamp", new_name="create_timestamp"
),
migrations.AddField(
model_name="mediafile",
name="access_groups",
field=models.ManyToManyField(blank=True, to="users.Group"),
),
migrations.AddField(
model_name="mediafile",
name="is_directory",
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name="mediafile",
name="parent",
field=models.ForeignKey(
blank=True,
null=True,
on_delete=models.deletion.SET_NULL,
related_name="children",
to="mediafiles.Mediafile",
),
),
migrations.AddField(
model_name="mediafile",
name="original_filename",
field=models.CharField(default="", max_length=255),
preserve_default=False,
),
migrations.AlterField(
model_name="mediafile",
name="mediafile",
field=models.FileField(
null=True, upload_to=openslides.mediafiles.models.get_file_path
),
),
migrations.AlterField(
model_name="mediafile", name="title", field=models.CharField(max_length=255)
),
]
| mit |
walkers-mv/luigi | luigi/util.py | 22 | 8150 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import functools
import logging
from luigi import six
from luigi import task
from luigi import parameter
from luigi.deprecate_kwarg import deprecate_kwarg # NOQA: removing this breaks code
if six.PY3:
xrange = range
logger = logging.getLogger('luigi-interface')
def common_params(task_instance, task_cls):
"""
Grab all the values in task_instance that are found in task_cls.
"""
if not isinstance(task_cls, task.Register):
raise TypeError("task_cls must be an uninstantiated Task")
task_instance_param_names = dict(task_instance.get_params()).keys()
task_cls_param_names = dict(task_cls.get_params()).keys()
common_param_names = list(set.intersection(set(task_instance_param_names), set(task_cls_param_names)))
common_param_vals = [(key, dict(task_cls.get_params())[key]) for key in common_param_names]
common_kwargs = dict([(key, task_instance.param_kwargs[key]) for key in common_param_names])
vals = dict(task_instance.get_param_values(common_param_vals, [], common_kwargs))
return vals
def task_wraps(P):
# In order to make the behavior of a wrapper class nicer, we set the name of the
# new class to the wrapped class, and copy over the docstring and module as well.
# This makes it possible to pickle the wrapped class etc.
# Btw, this is a slight abuse of functools.wraps. It's meant to be used only for
# functions, but it works for classes too, if you pass updated=[]
return functools.wraps(P, updated=[])
class inherits(object):
"""
Task inheritance.
Usage:
.. code-block:: python
class AnotherTask(luigi.Task):
n = luigi.IntParameter()
# ...
@inherits(AnotherTask):
class MyTask(luigi.Task):
def requires(self):
return self.clone_parent()
def run(self):
print self.n # this will be defined
# ...
"""
def __init__(self, task_to_inherit):
super(inherits, self).__init__()
self.task_to_inherit = task_to_inherit
def __call__(self, task_that_inherits):
for param_name, param_obj in self.task_to_inherit.get_params():
if not hasattr(task_that_inherits, param_name):
setattr(task_that_inherits, param_name, param_obj)
# Modify task_that_inherits by subclassing it and adding methods
@task_wraps(task_that_inherits)
class Wrapped(task_that_inherits):
def clone_parent(_self, **args):
return _self.clone(cls=self.task_to_inherit, **args)
return Wrapped
class requires(object):
"""
Same as @inherits, but also auto-defines the requires method.
"""
def __init__(self, task_to_require):
super(requires, self).__init__()
self.inherit_decorator = inherits(task_to_require)
def __call__(self, task_that_requires):
task_that_requires = self.inherit_decorator(task_that_requires)
# Modify task_that_requres by subclassing it and adding methods
@task_wraps(task_that_requires)
class Wrapped(task_that_requires):
def requires(_self):
return _self.clone_parent()
return Wrapped
class copies(object):
"""
Auto-copies a task.
Usage:
.. code-block:: python
@copies(MyTask):
class CopyOfMyTask(luigi.Task):
def output(self):
return LocalTarget(self.date.strftime('/var/xyz/report-%Y-%m-%d'))
"""
def __init__(self, task_to_copy):
super(copies, self).__init__()
self.requires_decorator = requires(task_to_copy)
def __call__(self, task_that_copies):
task_that_copies = self.requires_decorator(task_that_copies)
# Modify task_that_copies by subclassing it and adding methods
@task_wraps(task_that_copies)
class Wrapped(task_that_copies):
def run(_self):
i, o = _self.input(), _self.output()
f = o.open('w') # TODO: assert that i, o are Target objects and not complex datastructures
for line in i.open('r'):
f.write(line)
f.close()
return Wrapped
def delegates(task_that_delegates):
""" Lets a task call methods on subtask(s).
The way this works is that the subtask is run as a part of the task, but
the task itself doesn't have to care about the requirements of the subtasks.
The subtask doesn't exist from the scheduler's point of view, and
its dependencies are instead required by the main task.
Example:
.. code-block:: python
class PowersOfN(luigi.Task):
n = luigi.IntParameter()
def f(self, x): return x ** self.n
@delegates
class T(luigi.Task):
def subtasks(self): return PowersOfN(5)
def run(self): print self.subtasks().f(42)
"""
if not hasattr(task_that_delegates, 'subtasks'):
# This method can (optionally) define a couple of delegate tasks that
# will be accessible as interfaces, meaning that the task can access
# those tasks and run methods defined on them, etc
raise AttributeError('%s needs to implement the method "subtasks"' % task_that_delegates)
@task_wraps(task_that_delegates)
class Wrapped(task_that_delegates):
def deps(self):
# Overrides method in base class
return task.flatten(self.requires()) + task.flatten([t.deps() for t in task.flatten(self.subtasks())])
def run(self):
for t in task.flatten(self.subtasks()):
t.run()
task_that_delegates.run(self)
return Wrapped
def previous(task):
"""
Return a previous Task of the same family.
By default checks if this task family only has one non-global parameter and if
it is a DateParameter, DateHourParameter or DateIntervalParameter in which case
it returns with the time decremented by 1 (hour, day or interval)
"""
params = task.get_params()
previous_params = {}
previous_date_params = {}
for param_name, param_obj in params:
param_value = getattr(task, param_name)
if isinstance(param_obj, parameter.DateParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(days=1)
elif isinstance(param_obj, parameter.DateMinuteParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(minutes=1)
elif isinstance(param_obj, parameter.DateHourParameter):
previous_date_params[param_name] = param_value - datetime.timedelta(hours=1)
elif isinstance(param_obj, parameter.DateIntervalParameter):
previous_date_params[param_name] = param_value.prev()
else:
previous_params[param_name] = param_value
previous_params.update(previous_date_params)
if len(previous_date_params) == 0:
raise NotImplementedError("No task parameter - can't determine previous task")
elif len(previous_date_params) > 1:
raise NotImplementedError("Too many date-related task parameters - can't determine previous task")
else:
return task.clone(**previous_params)
def get_previous_completed(task, max_steps=10):
prev = task
for _ in xrange(max_steps):
prev = previous(prev)
logger.debug("Checking if %s is complete", prev.task_id)
if prev.complete():
return prev
return None
| apache-2.0 |
blaggacao/server-tools | __unported__/fetchmail_attach_from_folder/wizard/attach_mail_manually.py | 6 | 5137 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2013 Therp BV (<http://therp.nl>)
# All Rights Reserved
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp.osv.orm import TransientModel
class attach_mail_manually(TransientModel):
_name = 'fetchmail.attach.mail.manually'
_columns = {
'folder_id': fields.many2one('fetchmail.server.folder', 'Folder',
readonly=True),
'mail_ids': fields.one2many(
'fetchmail.attach.mail.manually.mail', 'wizard_id', 'Emails'),
}
def default_get(self, cr, uid, fields_list, context=None):
if context is None:
context = {}
defaults = super(attach_mail_manually, self).default_get(cr, uid,
fields_list, context)
for folder in self.pool.get('fetchmail.server.folder').browse(cr, uid,
[context.get('default_folder_id')], context):
defaults['mail_ids']=[]
connection = folder.server_id.connect()
connection.select(folder.path)
result, msgids = connection.search(None,
'FLAGGED' if folder.flag_nonmatching else 'UNDELETED')
if result != 'OK':
logger.error('Could not search mailbox %s on %s' % (
folder.path, this.server))
continue
attach_mail_manually_mail._columns['object_id'].selection=[
(folder.model_id.model, folder.model_id.name)]
for msgid in msgids[0].split():
result, msgdata = connection.fetch(msgid, '(RFC822)')
if result != 'OK':
logger.error('Could not fetch %s in %s on %s' % (
msgid, folder.path, this.server))
continue
mail_message = self.pool.get('mail.thread').message_parse(
cr, uid, msgdata[0][1],
save_original=folder.server_id.original,
context=context)
defaults['mail_ids'].append((0, 0, {
'msgid': msgid,
'subject': mail_message.get('subject', ''),
'date': mail_message.get('date', ''),
'object_id': folder.model_id.model+',False'
}))
connection.close()
return defaults
def attach_mails(self, cr, uid, ids, context=None):
for this in self.browse(cr, uid, ids, context):
for mail in this.mail_ids:
connection = this.folder_id.server_id.connect()
connection.select(this.folder_id.path)
result, msgdata = connection.fetch(mail.msgid, '(RFC822)')
if result != 'OK':
logger.error('Could not fetch %s in %s on %s' % (
msgid, folder.path, this.server))
continue
mail_message = self.pool.get('mail.thread').message_parse(
cr, uid, msgdata[0][1],
save_original=this.folder_id.server_id.original,
context=context)
this.folder_id.server_id.attach_mail(connection,
mail.object_id.id, this.folder_id, mail_message,
mail.msgid)
connection.close()
return {'type': 'ir.actions.act_window_close'}
class attach_mail_manually_mail(TransientModel):
_name = 'fetchmail.attach.mail.manually.mail'
_columns = {
'wizard_id': fields.many2one('fetchmail.attach.mail.manually',
readonly=True),
'msgid': fields.char('Message id', size=16, readonly=True),
'subject': fields.char('Subject', size=128, readonly=True),
'date': fields.datetime('Date', readonly=True),
'object_id': fields.reference('Object',
selection=lambda self, cr, uid, context:
[(m.model, m.name) for m in
self.pool.get('ir.model').browse(cr, uid,
self.pool.get('ir.model').search(cr, uid, []),
context)], size=128),
}
| agpl-3.0 |
Venturi/cms | env/lib/python2.7/site-packages/phonenumbers/shortdata/region_IQ.py | 11 | 1035 | """Auto-generated file, do not edit by hand. IQ metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_IQ = PhoneMetadata(id='IQ', country_code=None, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='[1479]\\d{2,4}', possible_number_pattern='\\d{3,5}'),
toll_free=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
premium_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
emergency=PhoneNumberDesc(national_number_pattern='1(?:0[04]|15|22)', possible_number_pattern='\\d{3}', example_number='122'),
short_code=PhoneNumberDesc(national_number_pattern='1(?:0[04]|15|22)|4432|71117|9988', possible_number_pattern='\\d{3,5}', example_number='4432'),
standard_rate=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
carrier_specific=PhoneNumberDesc(national_number_pattern='4432|71117|9988', possible_number_pattern='\\d{3,5}'),
short_data=True)
| gpl-2.0 |
MaheshBhosale/totem-tosha | distribute-0.6.35/setuptools/command/install_egg_info.py | 357 | 3833 | from setuptools import Command
from setuptools.archive_util import unpack_archive
from distutils import log, dir_util
import os, shutil, pkg_resources
class install_egg_info(Command):
"""Install an .egg-info directory for the package"""
description = "Install an .egg-info directory for the package"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
def finalize_options(self):
self.set_undefined_options('install_lib',('install_dir','install_dir'))
ei_cmd = self.get_finalized_command("egg_info")
basename = pkg_resources.Distribution(
None, None, ei_cmd.egg_name, ei_cmd.egg_version
).egg_name()+'.egg-info'
self.source = ei_cmd.egg_info
self.target = os.path.join(self.install_dir, basename)
self.outputs = [self.target]
def run(self):
self.run_command('egg_info')
target = self.target
if os.path.isdir(self.target) and not os.path.islink(self.target):
dir_util.remove_tree(self.target, dry_run=self.dry_run)
elif os.path.exists(self.target):
self.execute(os.unlink,(self.target,),"Removing "+self.target)
if not self.dry_run:
pkg_resources.ensure_directory(self.target)
self.execute(self.copytree, (),
"Copying %s to %s" % (self.source, self.target)
)
self.install_namespaces()
def get_outputs(self):
return self.outputs
def copytree(self):
# Copy the .egg-info tree to site-packages
def skimmer(src,dst):
# filter out source-control directories; note that 'src' is always
# a '/'-separated path, regardless of platform. 'dst' is a
# platform-specific path.
for skip in '.svn/','CVS/':
if src.startswith(skip) or '/'+skip in src:
return None
self.outputs.append(dst)
log.debug("Copying %s to %s", src, dst)
return dst
unpack_archive(self.source, self.target, skimmer)
def install_namespaces(self):
nsp = self._get_all_ns_packages()
if not nsp: return
filename,ext = os.path.splitext(self.target)
filename += '-nspkg.pth'; self.outputs.append(filename)
log.info("Installing %s",filename)
if not self.dry_run:
f = open(filename,'wt')
for pkg in nsp:
# ensure pkg is not a unicode string under Python 2.7
pkg = str(pkg)
pth = tuple(pkg.split('.'))
trailer = '\n'
if '.' in pkg:
trailer = (
"; m and setattr(sys.modules[%r], %r, m)\n"
% ('.'.join(pth[:-1]), pth[-1])
)
f.write(
"import sys,types,os; "
"p = os.path.join(sys._getframe(1).f_locals['sitedir'], "
"*%(pth)r); "
"ie = os.path.exists(os.path.join(p,'__init__.py')); "
"m = not ie and "
"sys.modules.setdefault(%(pkg)r,types.ModuleType(%(pkg)r)); "
"mp = (m or []) and m.__dict__.setdefault('__path__',[]); "
"(p not in mp) and mp.append(p)%(trailer)s"
% locals()
)
f.close()
def _get_all_ns_packages(self):
nsp = {}
for pkg in self.distribution.namespace_packages or []:
pkg = pkg.split('.')
while pkg:
nsp['.'.join(pkg)] = 1
pkg.pop()
nsp=list(nsp)
nsp.sort() # set up shorter names first
return nsp
| apache-2.0 |
Reflexe/doc_to_pdf | Windows/program/python-core-3.5.0/lib/mailbox.py | 5 | 78418 | """Read/write support for Maildir, mbox, MH, Babyl, and MMDF mailboxes."""
# Notes for authors of new mailbox subclasses:
#
# Remember to fsync() changes to disk before closing a modified file
# or returning from a flush() method. See functions _sync_flush() and
# _sync_close().
import os
import time
import calendar
import socket
import errno
import copy
import warnings
import email
import email.message
import email.generator
import io
import contextlib
try:
import fcntl
except ImportError:
fcntl = None
__all__ = [ 'Mailbox', 'Maildir', 'mbox', 'MH', 'Babyl', 'MMDF',
'Message', 'MaildirMessage', 'mboxMessage', 'MHMessage',
'BabylMessage', 'MMDFMessage']
linesep = os.linesep.encode('ascii')
class Mailbox:
"""A group of messages in a particular place."""
def __init__(self, path, factory=None, create=True):
"""Initialize a Mailbox instance."""
self._path = os.path.abspath(os.path.expanduser(path))
self._factory = factory
def add(self, message):
"""Add message and return assigned key."""
raise NotImplementedError('Method must be implemented by subclass')
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
raise NotImplementedError('Method must be implemented by subclass')
def __delitem__(self, key):
self.remove(key)
def discard(self, key):
"""If the keyed message exists, remove it."""
try:
self.remove(key)
except KeyError:
pass
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
raise NotImplementedError('Method must be implemented by subclass')
def get(self, key, default=None):
"""Return the keyed message, or default if it doesn't exist."""
try:
return self.__getitem__(key)
except KeyError:
return default
def __getitem__(self, key):
"""Return the keyed message; raise KeyError if it doesn't exist."""
if not self._factory:
return self.get_message(key)
else:
with contextlib.closing(self.get_file(key)) as file:
return self._factory(file)
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def get_string(self, key):
"""Return a string representation or raise a KeyError.
Uses email.message.Message to create a 7bit clean string
representation of the message."""
return email.message_from_bytes(self.get_bytes(key)).as_string()
def get_bytes(self, key):
"""Return a byte string representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
raise NotImplementedError('Method must be implemented by subclass')
def iterkeys(self):
"""Return an iterator over keys."""
raise NotImplementedError('Method must be implemented by subclass')
def keys(self):
"""Return a list of keys."""
return list(self.iterkeys())
def itervalues(self):
"""Return an iterator over all messages."""
for key in self.iterkeys():
try:
value = self[key]
except KeyError:
continue
yield value
def __iter__(self):
return self.itervalues()
def values(self):
"""Return a list of messages. Memory intensive."""
return list(self.itervalues())
def iteritems(self):
"""Return an iterator over (key, message) tuples."""
for key in self.iterkeys():
try:
value = self[key]
except KeyError:
continue
yield (key, value)
def items(self):
"""Return a list of (key, message) tuples. Memory intensive."""
return list(self.iteritems())
def __contains__(self, key):
"""Return True if the keyed message exists, False otherwise."""
raise NotImplementedError('Method must be implemented by subclass')
def __len__(self):
"""Return a count of messages in the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
def clear(self):
"""Delete all messages."""
for key in self.keys():
self.discard(key)
def pop(self, key, default=None):
"""Delete the keyed message and return it, or default."""
try:
result = self[key]
except KeyError:
return default
self.discard(key)
return result
def popitem(self):
"""Delete an arbitrary (key, message) pair and return it."""
for key in self.iterkeys():
return (key, self.pop(key)) # This is only run once.
else:
raise KeyError('No messages in mailbox')
def update(self, arg=None):
"""Change the messages that correspond to certain keys."""
if hasattr(arg, 'iteritems'):
source = arg.iteritems()
elif hasattr(arg, 'items'):
source = arg.items()
else:
source = arg
bad_key = False
for key, message in source:
try:
self[key] = message
except KeyError:
bad_key = True
if bad_key:
raise KeyError('No message with key(s)')
def flush(self):
"""Write any pending changes to the disk."""
raise NotImplementedError('Method must be implemented by subclass')
def lock(self):
"""Lock the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
def unlock(self):
"""Unlock the mailbox if it is locked."""
raise NotImplementedError('Method must be implemented by subclass')
def close(self):
"""Flush and close the mailbox."""
raise NotImplementedError('Method must be implemented by subclass')
def _string_to_bytes(self, message):
# If a message is not 7bit clean, we refuse to handle it since it
# likely came from reading invalid messages in text mode, and that way
# lies mojibake.
try:
return message.encode('ascii')
except UnicodeError:
raise ValueError("String input must be ASCII-only; "
"use bytes or a Message instead")
# Whether each message must end in a newline
_append_newline = False
def _dump_message(self, message, target, mangle_from_=False):
# This assumes the target file is open in binary mode.
"""Dump message contents to target file."""
if isinstance(message, email.message.Message):
buffer = io.BytesIO()
gen = email.generator.BytesGenerator(buffer, mangle_from_, 0)
gen.flatten(message)
buffer.seek(0)
data = buffer.read()
data = data.replace(b'\n', linesep)
target.write(data)
if self._append_newline and not data.endswith(linesep):
# Make sure the message ends with a newline
target.write(linesep)
elif isinstance(message, (str, bytes, io.StringIO)):
if isinstance(message, io.StringIO):
warnings.warn("Use of StringIO input is deprecated, "
"use BytesIO instead", DeprecationWarning, 3)
message = message.getvalue()
if isinstance(message, str):
message = self._string_to_bytes(message)
if mangle_from_:
message = message.replace(b'\nFrom ', b'\n>From ')
message = message.replace(b'\n', linesep)
target.write(message)
if self._append_newline and not message.endswith(linesep):
# Make sure the message ends with a newline
target.write(linesep)
elif hasattr(message, 'read'):
if hasattr(message, 'buffer'):
warnings.warn("Use of text mode files is deprecated, "
"use a binary mode file instead", DeprecationWarning, 3)
message = message.buffer
lastline = None
while True:
line = message.readline()
# Universal newline support.
if line.endswith(b'\r\n'):
line = line[:-2] + b'\n'
elif line.endswith(b'\r'):
line = line[:-1] + b'\n'
if not line:
break
if mangle_from_ and line.startswith(b'From '):
line = b'>From ' + line[5:]
line = line.replace(b'\n', linesep)
target.write(line)
lastline = line
if self._append_newline and lastline and not lastline.endswith(linesep):
# Make sure the message ends with a newline
target.write(linesep)
else:
raise TypeError('Invalid message type: %s' % type(message))
class Maildir(Mailbox):
"""A qmail-style Maildir mailbox."""
colon = ':'
def __init__(self, dirname, factory=None, create=True):
"""Initialize a Maildir instance."""
Mailbox.__init__(self, dirname, factory, create)
self._paths = {
'tmp': os.path.join(self._path, 'tmp'),
'new': os.path.join(self._path, 'new'),
'cur': os.path.join(self._path, 'cur'),
}
if not os.path.exists(self._path):
if create:
os.mkdir(self._path, 0o700)
for path in self._paths.values():
os.mkdir(path, 0o700)
else:
raise NoSuchMailboxError(self._path)
self._toc = {}
self._toc_mtimes = {'cur': 0, 'new': 0}
self._last_read = 0 # Records last time we read cur/new
self._skewfactor = 0.1 # Adjust if os/fs clocks are skewing
def add(self, message):
"""Add message and return assigned key."""
tmp_file = self._create_tmp()
try:
self._dump_message(message, tmp_file)
except BaseException:
tmp_file.close()
os.remove(tmp_file.name)
raise
_sync_close(tmp_file)
if isinstance(message, MaildirMessage):
subdir = message.get_subdir()
suffix = self.colon + message.get_info()
if suffix == self.colon:
suffix = ''
else:
subdir = 'new'
suffix = ''
uniq = os.path.basename(tmp_file.name).split(self.colon)[0]
dest = os.path.join(self._path, subdir, uniq + suffix)
if isinstance(message, MaildirMessage):
os.utime(tmp_file.name,
(os.path.getatime(tmp_file.name), message.get_date()))
# No file modification should be done after the file is moved to its
# final position in order to prevent race conditions with changes
# from other programs
try:
if hasattr(os, 'link'):
os.link(tmp_file.name, dest)
os.remove(tmp_file.name)
else:
os.rename(tmp_file.name, dest)
except OSError as e:
os.remove(tmp_file.name)
if e.errno == errno.EEXIST:
raise ExternalClashError('Name clash with existing message: %s'
% dest)
else:
raise
return uniq
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
os.remove(os.path.join(self._path, self._lookup(key)))
def discard(self, key):
"""If the keyed message exists, remove it."""
# This overrides an inapplicable implementation in the superclass.
try:
self.remove(key)
except (KeyError, FileNotFoundError):
pass
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
old_subpath = self._lookup(key)
temp_key = self.add(message)
temp_subpath = self._lookup(temp_key)
if isinstance(message, MaildirMessage):
# temp's subdir and suffix were specified by message.
dominant_subpath = temp_subpath
else:
# temp's subdir and suffix were defaults from add().
dominant_subpath = old_subpath
subdir = os.path.dirname(dominant_subpath)
if self.colon in dominant_subpath:
suffix = self.colon + dominant_subpath.split(self.colon)[-1]
else:
suffix = ''
self.discard(key)
tmp_path = os.path.join(self._path, temp_subpath)
new_path = os.path.join(self._path, subdir, key + suffix)
if isinstance(message, MaildirMessage):
os.utime(tmp_path,
(os.path.getatime(tmp_path), message.get_date()))
# No file modification should be done after the file is moved to its
# final position in order to prevent race conditions with changes
# from other programs
os.rename(tmp_path, new_path)
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
subpath = self._lookup(key)
with open(os.path.join(self._path, subpath), 'rb') as f:
if self._factory:
msg = self._factory(f)
else:
msg = MaildirMessage(f)
subdir, name = os.path.split(subpath)
msg.set_subdir(subdir)
if self.colon in name:
msg.set_info(name.split(self.colon)[-1])
msg.set_date(os.path.getmtime(os.path.join(self._path, subpath)))
return msg
def get_bytes(self, key):
"""Return a bytes representation or raise a KeyError."""
with open(os.path.join(self._path, self._lookup(key)), 'rb') as f:
return f.read().replace(linesep, b'\n')
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
f = open(os.path.join(self._path, self._lookup(key)), 'rb')
return _ProxyFile(f)
def iterkeys(self):
"""Return an iterator over keys."""
self._refresh()
for key in self._toc:
try:
self._lookup(key)
except KeyError:
continue
yield key
def __contains__(self, key):
"""Return True if the keyed message exists, False otherwise."""
self._refresh()
return key in self._toc
def __len__(self):
"""Return a count of messages in the mailbox."""
self._refresh()
return len(self._toc)
def flush(self):
"""Write any pending changes to disk."""
# Maildir changes are always written immediately, so there's nothing
# to do.
pass
def lock(self):
"""Lock the mailbox."""
return
def unlock(self):
"""Unlock the mailbox if it is locked."""
return
def close(self):
"""Flush and close the mailbox."""
return
def list_folders(self):
"""Return a list of folder names."""
result = []
for entry in os.listdir(self._path):
if len(entry) > 1 and entry[0] == '.' and \
os.path.isdir(os.path.join(self._path, entry)):
result.append(entry[1:])
return result
def get_folder(self, folder):
"""Return a Maildir instance for the named folder."""
return Maildir(os.path.join(self._path, '.' + folder),
factory=self._factory,
create=False)
def add_folder(self, folder):
"""Create a folder and return a Maildir instance representing it."""
path = os.path.join(self._path, '.' + folder)
result = Maildir(path, factory=self._factory)
maildirfolder_path = os.path.join(path, 'maildirfolder')
if not os.path.exists(maildirfolder_path):
os.close(os.open(maildirfolder_path, os.O_CREAT | os.O_WRONLY,
0o666))
return result
def remove_folder(self, folder):
"""Delete the named folder, which must be empty."""
path = os.path.join(self._path, '.' + folder)
for entry in os.listdir(os.path.join(path, 'new')) + \
os.listdir(os.path.join(path, 'cur')):
if len(entry) < 1 or entry[0] != '.':
raise NotEmptyError('Folder contains message(s): %s' % folder)
for entry in os.listdir(path):
if entry != 'new' and entry != 'cur' and entry != 'tmp' and \
os.path.isdir(os.path.join(path, entry)):
raise NotEmptyError("Folder contains subdirectory '%s': %s" %
(folder, entry))
for root, dirs, files in os.walk(path, topdown=False):
for entry in files:
os.remove(os.path.join(root, entry))
for entry in dirs:
os.rmdir(os.path.join(root, entry))
os.rmdir(path)
def clean(self):
"""Delete old files in "tmp"."""
now = time.time()
for entry in os.listdir(os.path.join(self._path, 'tmp')):
path = os.path.join(self._path, 'tmp', entry)
if now - os.path.getatime(path) > 129600: # 60 * 60 * 36
os.remove(path)
_count = 1 # This is used to generate unique file names.
def _create_tmp(self):
"""Create a file in the tmp subdirectory and open and return it."""
now = time.time()
hostname = socket.gethostname()
if '/' in hostname:
hostname = hostname.replace('/', r'\057')
if ':' in hostname:
hostname = hostname.replace(':', r'\072')
uniq = "%s.M%sP%sQ%s.%s" % (int(now), int(now % 1 * 1e6), os.getpid(),
Maildir._count, hostname)
path = os.path.join(self._path, 'tmp', uniq)
try:
os.stat(path)
except FileNotFoundError:
Maildir._count += 1
try:
return _create_carefully(path)
except FileExistsError:
pass
# Fall through to here if stat succeeded or open raised EEXIST.
raise ExternalClashError('Name clash prevented file creation: %s' %
path)
def _refresh(self):
"""Update table of contents mapping."""
# If it has been less than two seconds since the last _refresh() call,
# we have to unconditionally re-read the mailbox just in case it has
# been modified, because os.path.mtime() has a 2 sec resolution in the
# most common worst case (FAT) and a 1 sec resolution typically. This
# results in a few unnecessary re-reads when _refresh() is called
# multiple times in that interval, but once the clock ticks over, we
# will only re-read as needed. Because the filesystem might be being
# served by an independent system with its own clock, we record and
# compare with the mtimes from the filesystem. Because the other
# system's clock might be skewing relative to our clock, we add an
# extra delta to our wait. The default is one tenth second, but is an
# instance variable and so can be adjusted if dealing with a
# particularly skewed or irregular system.
if time.time() - self._last_read > 2 + self._skewfactor:
refresh = False
for subdir in self._toc_mtimes:
mtime = os.path.getmtime(self._paths[subdir])
if mtime > self._toc_mtimes[subdir]:
refresh = True
self._toc_mtimes[subdir] = mtime
if not refresh:
return
# Refresh toc
self._toc = {}
for subdir in self._toc_mtimes:
path = self._paths[subdir]
for entry in os.listdir(path):
p = os.path.join(path, entry)
if os.path.isdir(p):
continue
uniq = entry.split(self.colon)[0]
self._toc[uniq] = os.path.join(subdir, entry)
self._last_read = time.time()
def _lookup(self, key):
"""Use TOC to return subpath for given key, or raise a KeyError."""
try:
if os.path.exists(os.path.join(self._path, self._toc[key])):
return self._toc[key]
except KeyError:
pass
self._refresh()
try:
return self._toc[key]
except KeyError:
raise KeyError('No message with key: %s' % key)
# This method is for backward compatibility only.
def next(self):
"""Return the next message in a one-time iteration."""
if not hasattr(self, '_onetime_keys'):
self._onetime_keys = self.iterkeys()
while True:
try:
return self[next(self._onetime_keys)]
except StopIteration:
return None
except KeyError:
continue
class _singlefileMailbox(Mailbox):
"""A single-file mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize a single-file mailbox."""
Mailbox.__init__(self, path, factory, create)
try:
f = open(self._path, 'rb+')
except OSError as e:
if e.errno == errno.ENOENT:
if create:
f = open(self._path, 'wb+')
else:
raise NoSuchMailboxError(self._path)
elif e.errno in (errno.EACCES, errno.EROFS):
f = open(self._path, 'rb')
else:
raise
self._file = f
self._toc = None
self._next_key = 0
self._pending = False # No changes require rewriting the file.
self._pending_sync = False # No need to sync the file
self._locked = False
self._file_length = None # Used to record mailbox size
def add(self, message):
"""Add message and return assigned key."""
self._lookup()
self._toc[self._next_key] = self._append_message(message)
self._next_key += 1
# _append_message appends the message to the mailbox file. We
# don't need a full rewrite + rename, sync is enough.
self._pending_sync = True
return self._next_key - 1
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
self._lookup(key)
del self._toc[key]
self._pending = True
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
self._lookup(key)
self._toc[key] = self._append_message(message)
self._pending = True
def iterkeys(self):
"""Return an iterator over keys."""
self._lookup()
yield from self._toc.keys()
def __contains__(self, key):
"""Return True if the keyed message exists, False otherwise."""
self._lookup()
return key in self._toc
def __len__(self):
"""Return a count of messages in the mailbox."""
self._lookup()
return len(self._toc)
def lock(self):
"""Lock the mailbox."""
if not self._locked:
_lock_file(self._file)
self._locked = True
def unlock(self):
"""Unlock the mailbox if it is locked."""
if self._locked:
_unlock_file(self._file)
self._locked = False
def flush(self):
"""Write any pending changes to disk."""
if not self._pending:
if self._pending_sync:
# Messages have only been added, so syncing the file
# is enough.
_sync_flush(self._file)
self._pending_sync = False
return
# In order to be writing anything out at all, self._toc must
# already have been generated (and presumably has been modified
# by adding or deleting an item).
assert self._toc is not None
# Check length of self._file; if it's changed, some other process
# has modified the mailbox since we scanned it.
self._file.seek(0, 2)
cur_len = self._file.tell()
if cur_len != self._file_length:
raise ExternalClashError('Size of mailbox file changed '
'(expected %i, found %i)' %
(self._file_length, cur_len))
new_file = _create_temporary(self._path)
try:
new_toc = {}
self._pre_mailbox_hook(new_file)
for key in sorted(self._toc.keys()):
start, stop = self._toc[key]
self._file.seek(start)
self._pre_message_hook(new_file)
new_start = new_file.tell()
while True:
buffer = self._file.read(min(4096,
stop - self._file.tell()))
if not buffer:
break
new_file.write(buffer)
new_toc[key] = (new_start, new_file.tell())
self._post_message_hook(new_file)
self._file_length = new_file.tell()
except:
new_file.close()
os.remove(new_file.name)
raise
_sync_close(new_file)
# self._file is about to get replaced, so no need to sync.
self._file.close()
# Make sure the new file's mode is the same as the old file's
mode = os.stat(self._path).st_mode
os.chmod(new_file.name, mode)
try:
os.rename(new_file.name, self._path)
except FileExistsError:
os.remove(self._path)
os.rename(new_file.name, self._path)
self._file = open(self._path, 'rb+')
self._toc = new_toc
self._pending = False
self._pending_sync = False
if self._locked:
_lock_file(self._file, dotlock=False)
def _pre_mailbox_hook(self, f):
"""Called before writing the mailbox to file f."""
return
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
return
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
return
def close(self):
"""Flush and close the mailbox."""
try:
self.flush()
finally:
try:
if self._locked:
self.unlock()
finally:
self._file.close() # Sync has been done by self.flush() above.
def _lookup(self, key=None):
"""Return (start, stop) or raise KeyError."""
if self._toc is None:
self._generate_toc()
if key is not None:
try:
return self._toc[key]
except KeyError:
raise KeyError('No message with key: %s' % key)
def _append_message(self, message):
"""Append message to mailbox and return (start, stop) offsets."""
self._file.seek(0, 2)
before = self._file.tell()
if len(self._toc) == 0 and not self._pending:
# This is the first message, and the _pre_mailbox_hook
# hasn't yet been called. If self._pending is True,
# messages have been removed, so _pre_mailbox_hook must
# have been called already.
self._pre_mailbox_hook(self._file)
try:
self._pre_message_hook(self._file)
offsets = self._install_message(message)
self._post_message_hook(self._file)
except BaseException:
self._file.truncate(before)
raise
self._file.flush()
self._file_length = self._file.tell() # Record current length of mailbox
return offsets
class _mboxMMDF(_singlefileMailbox):
"""An mbox or MMDF mailbox."""
_mangle_from_ = True
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
from_line = self._file.readline().replace(linesep, b'')
string = self._file.read(stop - self._file.tell())
msg = self._message_factory(string.replace(linesep, b'\n'))
msg.set_from(from_line[5:].decode('ascii'))
return msg
def get_string(self, key, from_=False):
"""Return a string representation or raise a KeyError."""
return email.message_from_bytes(
self.get_bytes(key)).as_string(unixfrom=from_)
def get_bytes(self, key, from_=False):
"""Return a string representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
if not from_:
self._file.readline()
string = self._file.read(stop - self._file.tell())
return string.replace(linesep, b'\n')
def get_file(self, key, from_=False):
"""Return a file-like representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
if not from_:
self._file.readline()
return _PartialFile(self._file, self._file.tell(), stop)
def _install_message(self, message):
"""Format a message and blindly write to self._file."""
from_line = None
if isinstance(message, str):
message = self._string_to_bytes(message)
if isinstance(message, bytes) and message.startswith(b'From '):
newline = message.find(b'\n')
if newline != -1:
from_line = message[:newline]
message = message[newline + 1:]
else:
from_line = message
message = b''
elif isinstance(message, _mboxMMDFMessage):
author = message.get_from().encode('ascii')
from_line = b'From ' + author
elif isinstance(message, email.message.Message):
from_line = message.get_unixfrom() # May be None.
if from_line is not None:
from_line = from_line.encode('ascii')
if from_line is None:
from_line = b'From MAILER-DAEMON ' + time.asctime(time.gmtime()).encode()
start = self._file.tell()
self._file.write(from_line + linesep)
self._dump_message(message, self._file, self._mangle_from_)
stop = self._file.tell()
return (start, stop)
class mbox(_mboxMMDF):
"""A classic mbox mailbox."""
_mangle_from_ = True
# All messages must end in a newline character, and
# _post_message_hooks outputs an empty line between messages.
_append_newline = True
def __init__(self, path, factory=None, create=True):
"""Initialize an mbox mailbox."""
self._message_factory = mboxMessage
_mboxMMDF.__init__(self, path, factory, create)
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
f.write(linesep)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
last_was_empty = False
self._file.seek(0)
while True:
line_pos = self._file.tell()
line = self._file.readline()
if line.startswith(b'From '):
if len(stops) < len(starts):
if last_was_empty:
stops.append(line_pos - len(linesep))
else:
# The last line before the "From " line wasn't
# blank, but we consider it a start of a
# message anyway.
stops.append(line_pos)
starts.append(line_pos)
last_was_empty = False
elif not line:
if last_was_empty:
stops.append(line_pos - len(linesep))
else:
stops.append(line_pos)
break
elif line == linesep:
last_was_empty = True
else:
last_was_empty = False
self._toc = dict(enumerate(zip(starts, stops)))
self._next_key = len(self._toc)
self._file_length = self._file.tell()
class MMDF(_mboxMMDF):
"""An MMDF mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize an MMDF mailbox."""
self._message_factory = MMDFMessage
_mboxMMDF.__init__(self, path, factory, create)
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
f.write(b'\001\001\001\001' + linesep)
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
f.write(linesep + b'\001\001\001\001' + linesep)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
self._file.seek(0)
next_pos = 0
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line.startswith(b'\001\001\001\001' + linesep):
starts.append(next_pos)
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line == b'\001\001\001\001' + linesep:
stops.append(line_pos - len(linesep))
break
elif not line:
stops.append(line_pos)
break
elif not line:
break
self._toc = dict(enumerate(zip(starts, stops)))
self._next_key = len(self._toc)
self._file.seek(0, 2)
self._file_length = self._file.tell()
class MH(Mailbox):
"""An MH mailbox."""
def __init__(self, path, factory=None, create=True):
"""Initialize an MH instance."""
Mailbox.__init__(self, path, factory, create)
if not os.path.exists(self._path):
if create:
os.mkdir(self._path, 0o700)
os.close(os.open(os.path.join(self._path, '.mh_sequences'),
os.O_CREAT | os.O_EXCL | os.O_WRONLY, 0o600))
else:
raise NoSuchMailboxError(self._path)
self._locked = False
def add(self, message):
"""Add message and return assigned key."""
keys = self.keys()
if len(keys) == 0:
new_key = 1
else:
new_key = max(keys) + 1
new_path = os.path.join(self._path, str(new_key))
f = _create_carefully(new_path)
closed = False
try:
if self._locked:
_lock_file(f)
try:
try:
self._dump_message(message, f)
except BaseException:
# Unlock and close so it can be deleted on Windows
if self._locked:
_unlock_file(f)
_sync_close(f)
closed = True
os.remove(new_path)
raise
if isinstance(message, MHMessage):
self._dump_sequences(message, new_key)
finally:
if self._locked:
_unlock_file(f)
finally:
if not closed:
_sync_close(f)
return new_key
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
path = os.path.join(self._path, str(key))
try:
f = open(path, 'rb+')
except OSError as e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
else:
f.close()
os.remove(path)
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
path = os.path.join(self._path, str(key))
try:
f = open(path, 'rb+')
except OSError as e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
try:
if self._locked:
_lock_file(f)
try:
os.close(os.open(path, os.O_WRONLY | os.O_TRUNC))
self._dump_message(message, f)
if isinstance(message, MHMessage):
self._dump_sequences(message, key)
finally:
if self._locked:
_unlock_file(f)
finally:
_sync_close(f)
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
try:
if self._locked:
f = open(os.path.join(self._path, str(key)), 'rb+')
else:
f = open(os.path.join(self._path, str(key)), 'rb')
except OSError as e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
with f:
if self._locked:
_lock_file(f)
try:
msg = MHMessage(f)
finally:
if self._locked:
_unlock_file(f)
for name, key_list in self.get_sequences().items():
if key in key_list:
msg.add_sequence(name)
return msg
def get_bytes(self, key):
"""Return a bytes representation or raise a KeyError."""
try:
if self._locked:
f = open(os.path.join(self._path, str(key)), 'rb+')
else:
f = open(os.path.join(self._path, str(key)), 'rb')
except OSError as e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
with f:
if self._locked:
_lock_file(f)
try:
return f.read().replace(linesep, b'\n')
finally:
if self._locked:
_unlock_file(f)
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
try:
f = open(os.path.join(self._path, str(key)), 'rb')
except OSError as e:
if e.errno == errno.ENOENT:
raise KeyError('No message with key: %s' % key)
else:
raise
return _ProxyFile(f)
def iterkeys(self):
"""Return an iterator over keys."""
return iter(sorted(int(entry) for entry in os.listdir(self._path)
if entry.isdigit()))
def __contains__(self, key):
"""Return True if the keyed message exists, False otherwise."""
return os.path.exists(os.path.join(self._path, str(key)))
def __len__(self):
"""Return a count of messages in the mailbox."""
return len(list(self.iterkeys()))
def lock(self):
"""Lock the mailbox."""
if not self._locked:
self._file = open(os.path.join(self._path, '.mh_sequences'), 'rb+')
_lock_file(self._file)
self._locked = True
def unlock(self):
"""Unlock the mailbox if it is locked."""
if self._locked:
_unlock_file(self._file)
_sync_close(self._file)
del self._file
self._locked = False
def flush(self):
"""Write any pending changes to the disk."""
return
def close(self):
"""Flush and close the mailbox."""
if self._locked:
self.unlock()
def list_folders(self):
"""Return a list of folder names."""
result = []
for entry in os.listdir(self._path):
if os.path.isdir(os.path.join(self._path, entry)):
result.append(entry)
return result
def get_folder(self, folder):
"""Return an MH instance for the named folder."""
return MH(os.path.join(self._path, folder),
factory=self._factory, create=False)
def add_folder(self, folder):
"""Create a folder and return an MH instance representing it."""
return MH(os.path.join(self._path, folder),
factory=self._factory)
def remove_folder(self, folder):
"""Delete the named folder, which must be empty."""
path = os.path.join(self._path, folder)
entries = os.listdir(path)
if entries == ['.mh_sequences']:
os.remove(os.path.join(path, '.mh_sequences'))
elif entries == []:
pass
else:
raise NotEmptyError('Folder not empty: %s' % self._path)
os.rmdir(path)
def get_sequences(self):
"""Return a name-to-key-list dictionary to define each sequence."""
results = {}
with open(os.path.join(self._path, '.mh_sequences'), 'r', encoding='ASCII') as f:
all_keys = set(self.keys())
for line in f:
try:
name, contents = line.split(':')
keys = set()
for spec in contents.split():
if spec.isdigit():
keys.add(int(spec))
else:
start, stop = (int(x) for x in spec.split('-'))
keys.update(range(start, stop + 1))
results[name] = [key for key in sorted(keys) \
if key in all_keys]
if len(results[name]) == 0:
del results[name]
except ValueError:
raise FormatError('Invalid sequence specification: %s' %
line.rstrip())
return results
def set_sequences(self, sequences):
"""Set sequences using the given name-to-key-list dictionary."""
f = open(os.path.join(self._path, '.mh_sequences'), 'r+', encoding='ASCII')
try:
os.close(os.open(f.name, os.O_WRONLY | os.O_TRUNC))
for name, keys in sequences.items():
if len(keys) == 0:
continue
f.write(name + ':')
prev = None
completing = False
for key in sorted(set(keys)):
if key - 1 == prev:
if not completing:
completing = True
f.write('-')
elif completing:
completing = False
f.write('%s %s' % (prev, key))
else:
f.write(' %s' % key)
prev = key
if completing:
f.write(str(prev) + '\n')
else:
f.write('\n')
finally:
_sync_close(f)
def pack(self):
"""Re-name messages to eliminate numbering gaps. Invalidates keys."""
sequences = self.get_sequences()
prev = 0
changes = []
for key in self.iterkeys():
if key - 1 != prev:
changes.append((key, prev + 1))
if hasattr(os, 'link'):
os.link(os.path.join(self._path, str(key)),
os.path.join(self._path, str(prev + 1)))
os.unlink(os.path.join(self._path, str(key)))
else:
os.rename(os.path.join(self._path, str(key)),
os.path.join(self._path, str(prev + 1)))
prev += 1
self._next_key = prev + 1
if len(changes) == 0:
return
for name, key_list in sequences.items():
for old, new in changes:
if old in key_list:
key_list[key_list.index(old)] = new
self.set_sequences(sequences)
def _dump_sequences(self, message, key):
"""Inspect a new MHMessage and update sequences appropriately."""
pending_sequences = message.get_sequences()
all_sequences = self.get_sequences()
for name, key_list in all_sequences.items():
if name in pending_sequences:
key_list.append(key)
elif key in key_list:
del key_list[key_list.index(key)]
for sequence in pending_sequences:
if sequence not in all_sequences:
all_sequences[sequence] = [key]
self.set_sequences(all_sequences)
class Babyl(_singlefileMailbox):
"""An Rmail-style Babyl mailbox."""
_special_labels = frozenset({'unseen', 'deleted', 'filed', 'answered',
'forwarded', 'edited', 'resent'})
def __init__(self, path, factory=None, create=True):
"""Initialize a Babyl mailbox."""
_singlefileMailbox.__init__(self, path, factory, create)
self._labels = {}
def add(self, message):
"""Add message and return assigned key."""
key = _singlefileMailbox.add(self, message)
if isinstance(message, BabylMessage):
self._labels[key] = message.get_labels()
return key
def remove(self, key):
"""Remove the keyed message; raise KeyError if it doesn't exist."""
_singlefileMailbox.remove(self, key)
if key in self._labels:
del self._labels[key]
def __setitem__(self, key, message):
"""Replace the keyed message; raise KeyError if it doesn't exist."""
_singlefileMailbox.__setitem__(self, key, message)
if isinstance(message, BabylMessage):
self._labels[key] = message.get_labels()
def get_message(self, key):
"""Return a Message representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
self._file.readline() # Skip b'1,' line specifying labels.
original_headers = io.BytesIO()
while True:
line = self._file.readline()
if line == b'*** EOOH ***' + linesep or not line:
break
original_headers.write(line.replace(linesep, b'\n'))
visible_headers = io.BytesIO()
while True:
line = self._file.readline()
if line == linesep or not line:
break
visible_headers.write(line.replace(linesep, b'\n'))
# Read up to the stop, or to the end
n = stop - self._file.tell()
assert n >= 0
body = self._file.read(n)
body = body.replace(linesep, b'\n')
msg = BabylMessage(original_headers.getvalue() + body)
msg.set_visible(visible_headers.getvalue())
if key in self._labels:
msg.set_labels(self._labels[key])
return msg
def get_bytes(self, key):
"""Return a string representation or raise a KeyError."""
start, stop = self._lookup(key)
self._file.seek(start)
self._file.readline() # Skip b'1,' line specifying labels.
original_headers = io.BytesIO()
while True:
line = self._file.readline()
if line == b'*** EOOH ***' + linesep or not line:
break
original_headers.write(line.replace(linesep, b'\n'))
while True:
line = self._file.readline()
if line == linesep or not line:
break
headers = original_headers.getvalue()
n = stop - self._file.tell()
assert n >= 0
data = self._file.read(n)
data = data.replace(linesep, b'\n')
return headers + data
def get_file(self, key):
"""Return a file-like representation or raise a KeyError."""
return io.BytesIO(self.get_bytes(key).replace(b'\n', linesep))
def get_labels(self):
"""Return a list of user-defined labels in the mailbox."""
self._lookup()
labels = set()
for label_list in self._labels.values():
labels.update(label_list)
labels.difference_update(self._special_labels)
return list(labels)
def _generate_toc(self):
"""Generate key-to-(start, stop) table of contents."""
starts, stops = [], []
self._file.seek(0)
next_pos = 0
label_lists = []
while True:
line_pos = next_pos
line = self._file.readline()
next_pos = self._file.tell()
if line == b'\037\014' + linesep:
if len(stops) < len(starts):
stops.append(line_pos - len(linesep))
starts.append(next_pos)
labels = [label.strip() for label
in self._file.readline()[1:].split(b',')
if label.strip()]
label_lists.append(labels)
elif line == b'\037' or line == b'\037' + linesep:
if len(stops) < len(starts):
stops.append(line_pos - len(linesep))
elif not line:
stops.append(line_pos - len(linesep))
break
self._toc = dict(enumerate(zip(starts, stops)))
self._labels = dict(enumerate(label_lists))
self._next_key = len(self._toc)
self._file.seek(0, 2)
self._file_length = self._file.tell()
def _pre_mailbox_hook(self, f):
"""Called before writing the mailbox to file f."""
babyl = b'BABYL OPTIONS:' + linesep
babyl += b'Version: 5' + linesep
labels = self.get_labels()
labels = (label.encode() for label in labels)
babyl += b'Labels:' + b','.join(labels) + linesep
babyl += b'\037'
f.write(babyl)
def _pre_message_hook(self, f):
"""Called before writing each message to file f."""
f.write(b'\014' + linesep)
def _post_message_hook(self, f):
"""Called after writing each message to file f."""
f.write(linesep + b'\037')
def _install_message(self, message):
"""Write message contents and return (start, stop)."""
start = self._file.tell()
if isinstance(message, BabylMessage):
special_labels = []
labels = []
for label in message.get_labels():
if label in self._special_labels:
special_labels.append(label)
else:
labels.append(label)
self._file.write(b'1')
for label in special_labels:
self._file.write(b', ' + label.encode())
self._file.write(b',,')
for label in labels:
self._file.write(b' ' + label.encode() + b',')
self._file.write(linesep)
else:
self._file.write(b'1,,' + linesep)
if isinstance(message, email.message.Message):
orig_buffer = io.BytesIO()
orig_generator = email.generator.BytesGenerator(orig_buffer, False, 0)
orig_generator.flatten(message)
orig_buffer.seek(0)
while True:
line = orig_buffer.readline()
self._file.write(line.replace(b'\n', linesep))
if line == b'\n' or not line:
break
self._file.write(b'*** EOOH ***' + linesep)
if isinstance(message, BabylMessage):
vis_buffer = io.BytesIO()
vis_generator = email.generator.BytesGenerator(vis_buffer, False, 0)
vis_generator.flatten(message.get_visible())
while True:
line = vis_buffer.readline()
self._file.write(line.replace(b'\n', linesep))
if line == b'\n' or not line:
break
else:
orig_buffer.seek(0)
while True:
line = orig_buffer.readline()
self._file.write(line.replace(b'\n', linesep))
if line == b'\n' or not line:
break
while True:
buffer = orig_buffer.read(4096) # Buffer size is arbitrary.
if not buffer:
break
self._file.write(buffer.replace(b'\n', linesep))
elif isinstance(message, (bytes, str, io.StringIO)):
if isinstance(message, io.StringIO):
warnings.warn("Use of StringIO input is deprecated, "
"use BytesIO instead", DeprecationWarning, 3)
message = message.getvalue()
if isinstance(message, str):
message = self._string_to_bytes(message)
body_start = message.find(b'\n\n') + 2
if body_start - 2 != -1:
self._file.write(message[:body_start].replace(b'\n', linesep))
self._file.write(b'*** EOOH ***' + linesep)
self._file.write(message[:body_start].replace(b'\n', linesep))
self._file.write(message[body_start:].replace(b'\n', linesep))
else:
self._file.write(b'*** EOOH ***' + linesep + linesep)
self._file.write(message.replace(b'\n', linesep))
elif hasattr(message, 'readline'):
if hasattr(message, 'buffer'):
warnings.warn("Use of text mode files is deprecated, "
"use a binary mode file instead", DeprecationWarning, 3)
message = message.buffer
original_pos = message.tell()
first_pass = True
while True:
line = message.readline()
# Universal newline support.
if line.endswith(b'\r\n'):
line = line[:-2] + b'\n'
elif line.endswith(b'\r'):
line = line[:-1] + b'\n'
self._file.write(line.replace(b'\n', linesep))
if line == b'\n' or not line:
if first_pass:
first_pass = False
self._file.write(b'*** EOOH ***' + linesep)
message.seek(original_pos)
else:
break
while True:
line = message.readline()
if not line:
break
# Universal newline support.
if line.endswith(b'\r\n'):
line = line[:-2] + linesep
elif line.endswith(b'\r'):
line = line[:-1] + linesep
elif line.endswith(b'\n'):
line = line[:-1] + linesep
self._file.write(line)
else:
raise TypeError('Invalid message type: %s' % type(message))
stop = self._file.tell()
return (start, stop)
class Message(email.message.Message):
"""Message with mailbox-format-specific properties."""
def __init__(self, message=None):
"""Initialize a Message instance."""
if isinstance(message, email.message.Message):
self._become_message(copy.deepcopy(message))
if isinstance(message, Message):
message._explain_to(self)
elif isinstance(message, bytes):
self._become_message(email.message_from_bytes(message))
elif isinstance(message, str):
self._become_message(email.message_from_string(message))
elif isinstance(message, io.TextIOWrapper):
self._become_message(email.message_from_file(message))
elif hasattr(message, "read"):
self._become_message(email.message_from_binary_file(message))
elif message is None:
email.message.Message.__init__(self)
else:
raise TypeError('Invalid message type: %s' % type(message))
def _become_message(self, message):
"""Assume the non-format-specific state of message."""
type_specific = getattr(message, '_type_specific_attributes', [])
for name in message.__dict__:
if name not in type_specific:
self.__dict__[name] = message.__dict__[name]
def _explain_to(self, message):
"""Copy format-specific state to message insofar as possible."""
if isinstance(message, Message):
return # There's nothing format-specific to explain.
else:
raise TypeError('Cannot convert to specified type')
class MaildirMessage(Message):
"""Message with Maildir-specific properties."""
_type_specific_attributes = ['_subdir', '_info', '_date']
def __init__(self, message=None):
"""Initialize a MaildirMessage instance."""
self._subdir = 'new'
self._info = ''
self._date = time.time()
Message.__init__(self, message)
def get_subdir(self):
"""Return 'new' or 'cur'."""
return self._subdir
def set_subdir(self, subdir):
"""Set subdir to 'new' or 'cur'."""
if subdir == 'new' or subdir == 'cur':
self._subdir = subdir
else:
raise ValueError("subdir must be 'new' or 'cur': %s" % subdir)
def get_flags(self):
"""Return as a string the flags that are set."""
if self._info.startswith('2,'):
return self._info[2:]
else:
return ''
def set_flags(self, flags):
"""Set the given flags and unset all others."""
self._info = '2,' + ''.join(sorted(flags))
def add_flag(self, flag):
"""Set the given flag(s) without changing others."""
self.set_flags(''.join(set(self.get_flags()) | set(flag)))
def remove_flag(self, flag):
"""Unset the given string flag(s) without changing others."""
if self.get_flags():
self.set_flags(''.join(set(self.get_flags()) - set(flag)))
def get_date(self):
"""Return delivery date of message, in seconds since the epoch."""
return self._date
def set_date(self, date):
"""Set delivery date of message, in seconds since the epoch."""
try:
self._date = float(date)
except ValueError:
raise TypeError("can't convert to float: %s" % date)
def get_info(self):
"""Get the message's "info" as a string."""
return self._info
def set_info(self, info):
"""Set the message's "info" string."""
if isinstance(info, str):
self._info = info
else:
raise TypeError('info must be a string: %s' % type(info))
def _explain_to(self, message):
"""Copy Maildir-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
message.set_flags(self.get_flags())
message.set_subdir(self.get_subdir())
message.set_date(self.get_date())
elif isinstance(message, _mboxMMDFMessage):
flags = set(self.get_flags())
if 'S' in flags:
message.add_flag('R')
if self.get_subdir() == 'cur':
message.add_flag('O')
if 'T' in flags:
message.add_flag('D')
if 'F' in flags:
message.add_flag('F')
if 'R' in flags:
message.add_flag('A')
message.set_from('MAILER-DAEMON', time.gmtime(self.get_date()))
elif isinstance(message, MHMessage):
flags = set(self.get_flags())
if 'S' not in flags:
message.add_sequence('unseen')
if 'R' in flags:
message.add_sequence('replied')
if 'F' in flags:
message.add_sequence('flagged')
elif isinstance(message, BabylMessage):
flags = set(self.get_flags())
if 'S' not in flags:
message.add_label('unseen')
if 'T' in flags:
message.add_label('deleted')
if 'R' in flags:
message.add_label('answered')
if 'P' in flags:
message.add_label('forwarded')
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class _mboxMMDFMessage(Message):
"""Message with mbox- or MMDF-specific properties."""
_type_specific_attributes = ['_from']
def __init__(self, message=None):
"""Initialize an mboxMMDFMessage instance."""
self.set_from('MAILER-DAEMON', True)
if isinstance(message, email.message.Message):
unixfrom = message.get_unixfrom()
if unixfrom is not None and unixfrom.startswith('From '):
self.set_from(unixfrom[5:])
Message.__init__(self, message)
def get_from(self):
"""Return contents of "From " line."""
return self._from
def set_from(self, from_, time_=None):
"""Set "From " line, formatting and appending time_ if specified."""
if time_ is not None:
if time_ is True:
time_ = time.gmtime()
from_ += ' ' + time.asctime(time_)
self._from = from_
def get_flags(self):
"""Return as a string the flags that are set."""
return self.get('Status', '') + self.get('X-Status', '')
def set_flags(self, flags):
"""Set the given flags and unset all others."""
flags = set(flags)
status_flags, xstatus_flags = '', ''
for flag in ('R', 'O'):
if flag in flags:
status_flags += flag
flags.remove(flag)
for flag in ('D', 'F', 'A'):
if flag in flags:
xstatus_flags += flag
flags.remove(flag)
xstatus_flags += ''.join(sorted(flags))
try:
self.replace_header('Status', status_flags)
except KeyError:
self.add_header('Status', status_flags)
try:
self.replace_header('X-Status', xstatus_flags)
except KeyError:
self.add_header('X-Status', xstatus_flags)
def add_flag(self, flag):
"""Set the given flag(s) without changing others."""
self.set_flags(''.join(set(self.get_flags()) | set(flag)))
def remove_flag(self, flag):
"""Unset the given string flag(s) without changing others."""
if 'Status' in self or 'X-Status' in self:
self.set_flags(''.join(set(self.get_flags()) - set(flag)))
def _explain_to(self, message):
"""Copy mbox- or MMDF-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
flags = set(self.get_flags())
if 'O' in flags:
message.set_subdir('cur')
if 'F' in flags:
message.add_flag('F')
if 'A' in flags:
message.add_flag('R')
if 'R' in flags:
message.add_flag('S')
if 'D' in flags:
message.add_flag('T')
del message['status']
del message['x-status']
maybe_date = ' '.join(self.get_from().split()[-5:])
try:
message.set_date(calendar.timegm(time.strptime(maybe_date,
'%a %b %d %H:%M:%S %Y')))
except (ValueError, OverflowError):
pass
elif isinstance(message, _mboxMMDFMessage):
message.set_flags(self.get_flags())
message.set_from(self.get_from())
elif isinstance(message, MHMessage):
flags = set(self.get_flags())
if 'R' not in flags:
message.add_sequence('unseen')
if 'A' in flags:
message.add_sequence('replied')
if 'F' in flags:
message.add_sequence('flagged')
del message['status']
del message['x-status']
elif isinstance(message, BabylMessage):
flags = set(self.get_flags())
if 'R' not in flags:
message.add_label('unseen')
if 'D' in flags:
message.add_label('deleted')
if 'A' in flags:
message.add_label('answered')
del message['status']
del message['x-status']
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class mboxMessage(_mboxMMDFMessage):
"""Message with mbox-specific properties."""
class MHMessage(Message):
"""Message with MH-specific properties."""
_type_specific_attributes = ['_sequences']
def __init__(self, message=None):
"""Initialize an MHMessage instance."""
self._sequences = []
Message.__init__(self, message)
def get_sequences(self):
"""Return a list of sequences that include the message."""
return self._sequences[:]
def set_sequences(self, sequences):
"""Set the list of sequences that include the message."""
self._sequences = list(sequences)
def add_sequence(self, sequence):
"""Add sequence to list of sequences including the message."""
if isinstance(sequence, str):
if not sequence in self._sequences:
self._sequences.append(sequence)
else:
raise TypeError('sequence type must be str: %s' % type(sequence))
def remove_sequence(self, sequence):
"""Remove sequence from the list of sequences including the message."""
try:
self._sequences.remove(sequence)
except ValueError:
pass
def _explain_to(self, message):
"""Copy MH-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
sequences = set(self.get_sequences())
if 'unseen' in sequences:
message.set_subdir('cur')
else:
message.set_subdir('cur')
message.add_flag('S')
if 'flagged' in sequences:
message.add_flag('F')
if 'replied' in sequences:
message.add_flag('R')
elif isinstance(message, _mboxMMDFMessage):
sequences = set(self.get_sequences())
if 'unseen' not in sequences:
message.add_flag('RO')
else:
message.add_flag('O')
if 'flagged' in sequences:
message.add_flag('F')
if 'replied' in sequences:
message.add_flag('A')
elif isinstance(message, MHMessage):
for sequence in self.get_sequences():
message.add_sequence(sequence)
elif isinstance(message, BabylMessage):
sequences = set(self.get_sequences())
if 'unseen' in sequences:
message.add_label('unseen')
if 'replied' in sequences:
message.add_label('answered')
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class BabylMessage(Message):
"""Message with Babyl-specific properties."""
_type_specific_attributes = ['_labels', '_visible']
def __init__(self, message=None):
"""Initialize a BabylMessage instance."""
self._labels = []
self._visible = Message()
Message.__init__(self, message)
def get_labels(self):
"""Return a list of labels on the message."""
return self._labels[:]
def set_labels(self, labels):
"""Set the list of labels on the message."""
self._labels = list(labels)
def add_label(self, label):
"""Add label to list of labels on the message."""
if isinstance(label, str):
if label not in self._labels:
self._labels.append(label)
else:
raise TypeError('label must be a string: %s' % type(label))
def remove_label(self, label):
"""Remove label from the list of labels on the message."""
try:
self._labels.remove(label)
except ValueError:
pass
def get_visible(self):
"""Return a Message representation of visible headers."""
return Message(self._visible)
def set_visible(self, visible):
"""Set the Message representation of visible headers."""
self._visible = Message(visible)
def update_visible(self):
"""Update and/or sensibly generate a set of visible headers."""
for header in self._visible.keys():
if header in self:
self._visible.replace_header(header, self[header])
else:
del self._visible[header]
for header in ('Date', 'From', 'Reply-To', 'To', 'CC', 'Subject'):
if header in self and header not in self._visible:
self._visible[header] = self[header]
def _explain_to(self, message):
"""Copy Babyl-specific state to message insofar as possible."""
if isinstance(message, MaildirMessage):
labels = set(self.get_labels())
if 'unseen' in labels:
message.set_subdir('cur')
else:
message.set_subdir('cur')
message.add_flag('S')
if 'forwarded' in labels or 'resent' in labels:
message.add_flag('P')
if 'answered' in labels:
message.add_flag('R')
if 'deleted' in labels:
message.add_flag('T')
elif isinstance(message, _mboxMMDFMessage):
labels = set(self.get_labels())
if 'unseen' not in labels:
message.add_flag('RO')
else:
message.add_flag('O')
if 'deleted' in labels:
message.add_flag('D')
if 'answered' in labels:
message.add_flag('A')
elif isinstance(message, MHMessage):
labels = set(self.get_labels())
if 'unseen' in labels:
message.add_sequence('unseen')
if 'answered' in labels:
message.add_sequence('replied')
elif isinstance(message, BabylMessage):
message.set_visible(self.get_visible())
for label in self.get_labels():
message.add_label(label)
elif isinstance(message, Message):
pass
else:
raise TypeError('Cannot convert to specified type: %s' %
type(message))
class MMDFMessage(_mboxMMDFMessage):
"""Message with MMDF-specific properties."""
class _ProxyFile:
"""A read-only wrapper of a file."""
def __init__(self, f, pos=None):
"""Initialize a _ProxyFile."""
self._file = f
if pos is None:
self._pos = f.tell()
else:
self._pos = pos
def read(self, size=None):
"""Read bytes."""
return self._read(size, self._file.read)
def read1(self, size=None):
"""Read bytes."""
return self._read(size, self._file.read1)
def readline(self, size=None):
"""Read a line."""
return self._read(size, self._file.readline)
def readlines(self, sizehint=None):
"""Read multiple lines."""
result = []
for line in self:
result.append(line)
if sizehint is not None:
sizehint -= len(line)
if sizehint <= 0:
break
return result
def __iter__(self):
"""Iterate over lines."""
while True:
line = self.readline()
if not line:
return
yield line
def tell(self):
"""Return the position."""
return self._pos
def seek(self, offset, whence=0):
"""Change position."""
if whence == 1:
self._file.seek(self._pos)
self._file.seek(offset, whence)
self._pos = self._file.tell()
def close(self):
"""Close the file."""
if hasattr(self, '_file'):
try:
if hasattr(self._file, 'close'):
self._file.close()
finally:
del self._file
def _read(self, size, read_method):
"""Read size bytes using read_method."""
if size is None:
size = -1
self._file.seek(self._pos)
result = read_method(size)
self._pos = self._file.tell()
return result
def __enter__(self):
"""Context management protocol support."""
return self
def __exit__(self, *exc):
self.close()
def readable(self):
return self._file.readable()
def writable(self):
return self._file.writable()
def seekable(self):
return self._file.seekable()
def flush(self):
return self._file.flush()
@property
def closed(self):
if not hasattr(self, '_file'):
return True
if not hasattr(self._file, 'closed'):
return False
return self._file.closed
class _PartialFile(_ProxyFile):
"""A read-only wrapper of part of a file."""
def __init__(self, f, start=None, stop=None):
"""Initialize a _PartialFile."""
_ProxyFile.__init__(self, f, start)
self._start = start
self._stop = stop
def tell(self):
"""Return the position with respect to start."""
return _ProxyFile.tell(self) - self._start
def seek(self, offset, whence=0):
"""Change position, possibly with respect to start or stop."""
if whence == 0:
self._pos = self._start
whence = 1
elif whence == 2:
self._pos = self._stop
whence = 1
_ProxyFile.seek(self, offset, whence)
def _read(self, size, read_method):
"""Read size bytes using read_method, honoring start and stop."""
remaining = self._stop - self._pos
if remaining <= 0:
return b''
if size is None or size < 0 or size > remaining:
size = remaining
return _ProxyFile._read(self, size, read_method)
def close(self):
# do *not* close the underlying file object for partial files,
# since it's global to the mailbox object
if hasattr(self, '_file'):
del self._file
def _lock_file(f, dotlock=True):
"""Lock file f using lockf and dot locking."""
dotlock_done = False
try:
if fcntl:
try:
fcntl.lockf(f, fcntl.LOCK_EX | fcntl.LOCK_NB)
except OSError as e:
if e.errno in (errno.EAGAIN, errno.EACCES, errno.EROFS):
raise ExternalClashError('lockf: lock unavailable: %s' %
f.name)
else:
raise
if dotlock:
try:
pre_lock = _create_temporary(f.name + '.lock')
pre_lock.close()
except OSError as e:
if e.errno in (errno.EACCES, errno.EROFS):
return # Without write access, just skip dotlocking.
else:
raise
try:
if hasattr(os, 'link'):
os.link(pre_lock.name, f.name + '.lock')
dotlock_done = True
os.unlink(pre_lock.name)
else:
os.rename(pre_lock.name, f.name + '.lock')
dotlock_done = True
except FileExistsError:
os.remove(pre_lock.name)
raise ExternalClashError('dot lock unavailable: %s' %
f.name)
except:
if fcntl:
fcntl.lockf(f, fcntl.LOCK_UN)
if dotlock_done:
os.remove(f.name + '.lock')
raise
def _unlock_file(f):
"""Unlock file f using lockf and dot locking."""
if fcntl:
fcntl.lockf(f, fcntl.LOCK_UN)
if os.path.exists(f.name + '.lock'):
os.remove(f.name + '.lock')
def _create_carefully(path):
"""Create a file if it doesn't exist and open for reading and writing."""
fd = os.open(path, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0o666)
try:
return open(path, 'rb+')
finally:
os.close(fd)
def _create_temporary(path):
"""Create a temp file based on path and open for reading and writing."""
return _create_carefully('%s.%s.%s.%s' % (path, int(time.time()),
socket.gethostname(),
os.getpid()))
def _sync_flush(f):
"""Ensure changes to file f are physically on disk."""
f.flush()
if hasattr(os, 'fsync'):
os.fsync(f.fileno())
def _sync_close(f):
"""Close file f, ensuring all changes are physically on disk."""
_sync_flush(f)
f.close()
class Error(Exception):
"""Raised for module-specific errors."""
class NoSuchMailboxError(Error):
"""The specified mailbox does not exist and won't be created."""
class NotEmptyError(Error):
"""The specified mailbox is not empty and deletion was requested."""
class ExternalClashError(Error):
"""Another process caused an action to fail."""
class FormatError(Error):
"""A file appears to have an invalid format."""
| mpl-2.0 |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_1_0/collection_schedule_broker.py | 17 | 28107 | from ..broker import Broker
class CollectionScheduleBroker(Broker):
controller = "collection_schedules"
def index(self, **kwargs):
"""Lists the available collection schedules. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.2
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param device_group_id: The internal NetMRI identifier for device group.
:type device_group_id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param device_group_id: The internal NetMRI identifier for device group.
:type device_group_id: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, device_group_id, device_id, schedule, schedule_type.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each CollectionSchedule. Valid values are id, device_group_id, device_id, schedule, schedule_type. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return collection_schedules: An array of the CollectionSchedule objects that match the specified input criteria.
:rtype collection_schedules: Array of CollectionSchedule
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def show(self, **kwargs):
"""Shows the details for the specified collection schedule.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for schedule.
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return collection_schedule: The collection schedule identified by the specified id.
:rtype collection_schedule: CollectionSchedule
"""
return self.api_request(self._get_method_fullname("show"), kwargs)
def search(self, **kwargs):
"""Lists the available collection schedules matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param device_group_id: The internal NetMRI identifier for device group.
:type device_group_id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param device_group_id: The internal NetMRI identifier for device group.
:type device_group_id: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param device_id: The internal NetMRI identifier for device.
:type device_id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param device_id: The internal NetMRI identifier for device.
:type device_id: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for schedule.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for schedule.
:type id: Array of Integer
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param schedule: Schedule name.
:type schedule: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param schedule: Schedule name.
:type schedule: Array of String
| ``api version min:`` 2.3
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param schedule_type: The type of schedule.
:type schedule_type: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param schedule_type: The type of schedule.
:type schedule_type: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, device_group_id, device_id, schedule, schedule_type.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each CollectionSchedule. Valid values are id, device_group_id, device_id, schedule, schedule_type. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against collection schedules, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: device_group_id, device_id, id, schedule, schedule_type.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return collection_schedules: An array of the CollectionSchedule objects that match the specified input criteria.
:rtype collection_schedules: Array of CollectionSchedule
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available collection schedules matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: device_group_id, device_id, id, schedule, schedule_type.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_device_group_id: The operator to apply to the field device_group_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. device_group_id: The internal NetMRI identifier for device group. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_device_group_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_device_group_id: If op_device_group_id is specified, the field named in this input will be compared to the value in device_group_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_device_group_id must be specified if op_device_group_id is specified.
:type val_f_device_group_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_device_group_id: If op_device_group_id is specified, this value will be compared to the value in device_group_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_device_group_id must be specified if op_device_group_id is specified.
:type val_c_device_group_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_device_id: The operator to apply to the field device_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. device_id: The internal NetMRI identifier for device. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_device_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_device_id: If op_device_id is specified, the field named in this input will be compared to the value in device_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_device_id must be specified if op_device_id is specified.
:type val_f_device_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_device_id: If op_device_id is specified, this value will be compared to the value in device_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_device_id must be specified if op_device_id is specified.
:type val_c_device_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_id: The operator to apply to the field id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. id: The internal NetMRI identifier for schedule. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_id: If op_id is specified, the field named in this input will be compared to the value in id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_id must be specified if op_id is specified.
:type val_f_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_id: If op_id is specified, this value will be compared to the value in id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_id must be specified if op_id is specified.
:type val_c_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_schedule: The operator to apply to the field schedule. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. schedule: Schedule name. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_schedule: If op_schedule is specified, the field named in this input will be compared to the value in schedule using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_schedule must be specified if op_schedule is specified.
:type val_f_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_schedule: If op_schedule is specified, this value will be compared to the value in schedule using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_schedule must be specified if op_schedule is specified.
:type val_c_schedule: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_schedule_type: The operator to apply to the field schedule_type. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. schedule_type: The type of schedule. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_schedule_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_schedule_type: If op_schedule_type is specified, the field named in this input will be compared to the value in schedule_type using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_schedule_type must be specified if op_schedule_type is specified.
:type val_f_schedule_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_schedule_type: If op_schedule_type is specified, this value will be compared to the value in schedule_type using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_schedule_type must be specified if op_schedule_type is specified.
:type val_c_schedule_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, device_group_id, device_id, schedule, schedule_type.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each CollectionSchedule. Valid values are id, device_group_id, device_id, schedule, schedule_type. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return collection_schedules: An array of the CollectionSchedule objects that match the specified input criteria.
:rtype collection_schedules: Array of CollectionSchedule
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def destroy(self, **kwargs):
"""Deletes the specified collection schedule from NetMRI.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for schedule.
:type id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("destroy"), kwargs)
def update(self, **kwargs):
"""Updates a device group specific polling schedule.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: Id of the polling schedule to update.
:type id: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param schedule_cron: Either the cron entry (cron schedule string), or a number that denotes the frequency (in minutes) polling should run.
:type schedule_cron: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param schedule_json: NetMRI internal parameters generated by 'cronscheduler' form transmitted in json format for setting cron schedule string.
:type schedule_json: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("update"), kwargs)
def create(self, **kwargs):
"""Creates device group specific polling schedule.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param device_group_id: Device group Id polling schedule belongs to.
:type device_group_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param device_id: Device Id polling schedule belongs to.
:type device_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param schedule_cron: Either the cron entry (cron schedule string), or a number that denotes the frequency (in minutes) polling should run.
:type schedule_cron: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param schedule_json: NetMRI internal parameters generated by 'cronscheduler' form transmitted in json format for setting cron schedule string.
:type schedule_json: String
**Outputs**
"""
return self.api_request(self._get_method_fullname("create"), kwargs)
def spm_schedules(self, **kwargs):
"""Lists the Switch Port Management polling schedule entries for a device group.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param device_group_id: Device group ID for the requested polling schedules.
:type device_group_id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("spm_schedules"), kwargs)
| apache-2.0 |
pk-sam/crosswalk-test-suite | wrt/tct-rt02-wrt-tests/inst.apk.py | 903 | 3180 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARAMETERS = None
ADB_CMD = "adb"
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s uninstall org.xwalk.%s" % (
ADB_CMD, PARAMETERS.device, os.path.basename(os.path.splitext(file)[0]))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def instPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s install %s" % (ADB_CMD,
PARAMETERS.device, os.path.join(root, file))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.device:
(return_code, output) = doCMD("adb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
if not PARAMETERS.device:
print "No device found"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
akosel/servo | tests/wpt/css-tests/tools/pywebsocket/src/example/bench_wsh.py | 495 | 2322 | # Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A simple load tester for WebSocket clients.
A client program sends a message formatted as "<time> <count> <message>" to
this handler. This handler starts sending total <count> WebSocket messages
containing <message> every <time> seconds. <time> can be a floating point
value. <count> must be an integer value.
"""
import time
def web_socket_do_extra_handshake(request):
pass # Always accept.
def web_socket_transfer_data(request):
line = request.ws_stream.receive_message()
parts = line.split(' ')
if len(parts) != 3:
raise ValueError('Bad parameter format')
wait = float(parts[0])
count = int(parts[1])
message = parts[2]
for i in xrange(count):
request.ws_stream.send_message(message)
time.sleep(wait)
# vi:sts=4 sw=4 et
| mpl-2.0 |
Radagast-red/golem | golem/interface/client/environments.py | 1 | 1641 | from golem.interface.command import group, CommandHelper, Argument, command, CommandResult
@group(name="envs", help="Manage environments")
class Environments(object):
name = Argument('name', help="Environment name")
table_headers = ['name', 'supported', 'active', 'performance', 'description']
sort = Argument(
'--sort',
choices=table_headers,
optional=True,
default=None,
help="Sort environments"
)
@command(argument=sort, help="Show environments")
def show(self, sort):
deferred = Environments.client.get_environments_perf()
result = CommandHelper.wait_for(deferred) or []
values = []
for env in result:
values.append([
env['id'],
str(env['supported']),
str(env['active']),
str(env['performance']),
env['description']
])
return CommandResult.to_tabular(Environments.table_headers, values, sort=sort)
@command(argument=name, help="Enable environment")
def enable(self, name):
deferred = Environments.client.enable_environment(name)
return CommandHelper.wait_for(deferred)
@command(argument=name, help="Disable environment")
def disable(self, name):
deferred = Environments.client.disable_environment(name)
return CommandHelper.wait_for(deferred)
@command(argument=name, help="Recount performance for an environment")
def recount(self, name):
deferred = Environments.client.run_benchmark(name)
return CommandHelper.wait_for(deferred, timeout=1800)
| gpl-3.0 |
pymedusa/SickRage | ext/tornado/platform/auto.py | 4 | 1877 | #
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of platform-specific functionality.
For each function or class described in `tornado.platform.interface`,
the appropriate platform-specific implementation exists in this module.
Most code that needs access to this functionality should do e.g.::
from tornado.platform.auto import set_close_exec
"""
from __future__ import absolute_import, division, print_function
import os
if 'APPENGINE_RUNTIME' in os.environ:
from tornado.platform.common import Waker
def set_close_exec(fd):
pass
elif os.name == 'nt':
from tornado.platform.common import Waker
from tornado.platform.windows import set_close_exec
else:
from tornado.platform.posix import set_close_exec, Waker
try:
# monotime monkey-patches the time module to have a monotonic function
# in versions of python before 3.3.
import monotime
# Silence pyflakes warning about this unused import
monotime
except ImportError:
pass
try:
# monotonic can provide a monotonic function in versions of python before
# 3.3, too.
from monotonic import monotonic as monotonic_time
except ImportError:
try:
from time import monotonic as monotonic_time
except ImportError:
monotonic_time = None
__all__ = ['Waker', 'set_close_exec', 'monotonic_time']
| gpl-3.0 |
google/llvm-propeller | llvm/bindings/python/llvm/tests/test_object.py | 36 | 2201 | from numbers import Integral
from .base import TestBase
from ..object import ObjectFile
from ..object import Relocation
from ..object import Section
from ..object import Symbol
class TestObjectFile(TestBase):
def get_object_file(self):
source = self.get_test_binary()
return ObjectFile(filename=source)
def test_create_from_file(self):
self.get_object_file()
def test_get_sections(self):
o = self.get_object_file()
count = 0
for section in o.get_sections():
count += 1
assert isinstance(section, Section)
assert isinstance(section.name, str)
assert isinstance(section.size, Integral)
assert isinstance(section.contents, str)
assert isinstance(section.address, Integral)
assert len(section.contents) == section.size
self.assertGreater(count, 0)
for section in o.get_sections():
section.cache()
def test_get_symbols(self):
o = self.get_object_file()
count = 0
for symbol in o.get_symbols():
count += 1
assert isinstance(symbol, Symbol)
assert isinstance(symbol.name, str)
assert isinstance(symbol.address, Integral)
assert isinstance(symbol.size, Integral)
self.assertGreater(count, 0)
for symbol in o.get_symbols():
symbol.cache()
def test_symbol_section_accessor(self):
o = self.get_object_file()
for symbol in o.get_symbols():
section = symbol.section
assert isinstance(section, Section)
break
def test_get_relocations(self):
o = self.get_object_file()
for section in o.get_sections():
for relocation in section.get_relocations():
assert isinstance(relocation, Relocation)
assert isinstance(relocation.address, Integral)
assert isinstance(relocation.offset, Integral)
assert isinstance(relocation.type_number, Integral)
assert isinstance(relocation.type_name, str)
assert isinstance(relocation.value_string, str)
| apache-2.0 |
wavewave/madgraph-auto-model | modelrepo/ADMXQLD333/particles.py | 8 | 24125 | # This file was automatically created by FeynRules $Revision: 510 $
# Mathematica version: 7.0 for Linux x86 (64-bit) (February 18, 2009)
# Date: Wed 2 Mar 2011 15:09:55
from __future__ import division
from object_library import all_particles, Particle
import parameters as Param
a = Particle(pdg_code = 22,
name = 'a',
antiname = 'a',
spin = 3,
color = 1,
mass = Param.ZERO,
width = Param.ZERO,
texname = 'a',
antitexname = 'a',
charge = 0,
Y = 0,
GhostNumber = 0)
Z = Particle(pdg_code = 23,
name = 'Z',
antiname = 'Z',
spin = 3,
color = 1,
mass = Param.MZ,
width = Param.WZ,
texname = 'Z',
antitexname = 'Z',
charge = 0,
Y = 0,
GhostNumber = 0)
W__plus__ = Particle(pdg_code = 24,
name = 'W+',
antiname = 'W-',
spin = 3,
color = 1,
mass = Param.MW,
width = Param.WW,
texname = 'W+',
antitexname = 'W+',
charge = 1,
Y = 0,
GhostNumber = 0)
W__minus__ = W__plus__.anti()
g = Particle(pdg_code = 21,
name = 'g',
antiname = 'g',
spin = 3,
color = 8,
mass = Param.ZERO,
width = Param.ZERO,
texname = 'g',
antitexname = 'g',
charge = 0,
Y = 0,
GhostNumber = 0)
n1 = Particle(pdg_code = 1000022,
name = 'n1',
antiname = 'n1',
spin = 2,
color = 1,
mass = Param.Mneu1,
width = Param.Wneu1,
texname = 'n1',
antitexname = 'n1',
charge = 0,
Y = 0,
GhostNumber = 0)
n2 = Particle(pdg_code = 1000023,
name = 'n2',
antiname = 'n2',
spin = 2,
color = 1,
mass = Param.Mneu2,
width = Param.Wneu2,
texname = 'n2',
antitexname = 'n2',
charge = 0,
Y = 0,
GhostNumber = 0)
n3 = Particle(pdg_code = 1000025,
name = 'n3',
antiname = 'n3',
spin = 2,
color = 1,
mass = Param.Mneu3,
width = Param.Wneu3,
texname = 'n3',
antitexname = 'n3',
charge = 0,
Y = 0,
GhostNumber = 0)
n4 = Particle(pdg_code = 1000035,
name = 'n4',
antiname = 'n4',
spin = 2,
color = 1,
mass = Param.Mneu4,
width = Param.Wneu4,
texname = 'n4',
antitexname = 'n4',
charge = 0,
Y = 0,
GhostNumber = 0)
x1__plus__ = Particle(pdg_code = 1000024,
name = 'x1+',
antiname = 'x1-',
spin = 2,
color = 1,
mass = Param.Mch1,
width = Param.Wch1,
texname = 'x1+',
antitexname = 'x1+',
charge = 1,
Y = 0,
GhostNumber = 0)
x1__minus__ = x1__plus__.anti()
x2__plus__ = Particle(pdg_code = 1000037,
name = 'x2+',
antiname = 'x2-',
spin = 2,
color = 1,
mass = Param.Mch2,
width = Param.Wch2,
texname = 'x2+',
antitexname = 'x2+',
charge = 1,
Y = 0,
GhostNumber = 0)
x2__minus__ = x2__plus__.anti()
go = Particle(pdg_code = 1000021,
name = 'go',
antiname = 'go',
spin = 2,
color = 8,
mass = Param.Mgo,
width = Param.Wgo,
texname = 'go',
antitexname = 'go',
charge = 0,
Y = 0,
GhostNumber = 0)
h01 = Particle(pdg_code = 25,
name = 'h1',
antiname = 'h1',
spin = 1,
color = 1,
mass = Param.MH01,
width = Param.WH01,
texname = 'h1',
antitexname = 'h1',
charge = 0,
Y = 0,
GhostNumber = 0)
h02 = Particle(pdg_code = 35,
name = 'h02',
antiname = 'h02',
spin = 1,
color = 1,
mass = Param.MH02,
width = Param.WH02,
texname = 'h02',
antitexname = 'h02',
charge = 0,
Y = 0,
GhostNumber = 0)
A0 = Particle(pdg_code = 36,
name = 'A0',
antiname = 'A0',
spin = 1,
color = 1,
mass = Param.MA0,
width = Param.WA0,
texname = 'A0',
antitexname = 'A0',
charge = 0,
Y = 0,
GhostNumber = 0)
H__plus__ = Particle(pdg_code = 37,
name = 'H+',
antiname = 'H-',
spin = 1,
color = 1,
mass = Param.MH,
width = Param.WH,
texname = 'H+',
antitexname = 'H+',
charge = 1,
Y = 0,
GhostNumber = 0)
H__minus__ = H__plus__.anti()
G0 = Particle(pdg_code = 250,
name = 'G0',
antiname = 'G0',
spin = 1,
color = 1,
mass = Param.MZ,
width = Param.ZERO,
texname = 'G0',
antitexname = 'G0',
GoldstoneBoson = True,
charge = 0,
Y = 0,
GhostNumber = 0)
G__plus__ = Particle(pdg_code = 251,
name = 'G+',
antiname = 'G-',
spin = 1,
color = 1,
mass = Param.MW,
width = Param.ZERO,
texname = 'G+',
antitexname = 'G+',
GoldstoneBoson = True,
charge = 1,
Y = 0,
GhostNumber = 0)
G__minus__ = G__plus__.anti()
ve = Particle(pdg_code = 12,
name = 've',
antiname = 've~',
spin = 2,
color = 1,
mass = Param.Mve,
width = Param.ZERO,
texname = 've',
antitexname = 've',
charge = 0,
Y = 0,
GhostNumber = 0)
ve__tilde__ = ve.anti()
vm = Particle(pdg_code = 14,
name = 'vm',
antiname = 'vm~',
spin = 2,
color = 1,
mass = Param.Mvm,
width = Param.ZERO,
texname = 'vm',
antitexname = 'vm',
charge = 0,
Y = 0,
GhostNumber = 0)
vm__tilde__ = vm.anti()
vt = Particle(pdg_code = 16,
name = 'vt',
antiname = 'vt~',
spin = 2,
color = 1,
mass = Param.Mvt,
width = Param.ZERO,
texname = 'vt',
antitexname = 'vt',
charge = 0,
Y = 0,
GhostNumber = 0)
vt__tilde__ = vt.anti()
e__minus__ = Particle(pdg_code = 11,
name = 'e-',
antiname = 'e+',
spin = 2,
color = 1,
mass = Param.Me,
width = Param.ZERO,
texname = 'e-',
antitexname = 'e-',
charge = -1,
Y = 0,
GhostNumber = 0)
e__plus__ = e__minus__.anti()
mu__minus__ = Particle(pdg_code = 13,
name = 'mu-',
antiname = 'mu+',
spin = 2,
color = 1,
mass = Param.Mm,
width = Param.ZERO,
texname = 'mu-',
antitexname = 'mu-',
charge = -1,
Y = 0,
GhostNumber = 0)
mu__plus__ = mu__minus__.anti()
tau__minus__ = Particle(pdg_code = 15,
name = 'tau-',
antiname = 'tau+',
spin = 2,
color = 1,
mass = Param.Mta,
width = Param.ZERO,
texname = 'tau-',
antitexname = 'tau-',
charge = -1,
Y = 0,
GhostNumber = 0)
tau__plus__ = tau__minus__.anti()
u = Particle(pdg_code = 2,
name = 'u',
antiname = 'u~',
spin = 2,
color = 3,
mass = Param.MU,
width = Param.ZERO,
texname = 'u',
antitexname = 'u',
charge = 2/3,
Y = 0,
GhostNumber = 0)
u__tilde__ = u.anti()
c = Particle(pdg_code = 4,
name = 'c',
antiname = 'c~',
spin = 2,
color = 3,
mass = Param.MC,
width = Param.ZERO,
texname = 'c',
antitexname = 'c',
charge = 2/3,
Y = 0,
GhostNumber = 0)
c__tilde__ = c.anti()
t = Particle(pdg_code = 6,
name = 't',
antiname = 't~',
spin = 2,
color = 3,
mass = Param.MT,
width = Param.WT,
texname = 't',
antitexname = 't',
charge = 2/3,
Y = 0,
GhostNumber = 0)
t__tilde__ = t.anti()
d = Particle(pdg_code = 1,
name = 'd',
antiname = 'd~',
spin = 2,
color = 3,
mass = Param.MD,
width = Param.ZERO,
texname = 'd',
antitexname = 'd',
charge = -1/3,
Y = 0,
GhostNumber = 0)
d__tilde__ = d.anti()
s = Particle(pdg_code = 3,
name = 's',
antiname = 's~',
spin = 2,
color = 3,
mass = Param.MS,
width = Param.ZERO,
texname = 's',
antitexname = 's',
charge = -1/3,
Y = 0,
GhostNumber = 0)
s__tilde__ = s.anti()
b = Particle(pdg_code = 5,
name = 'b',
antiname = 'b~',
spin = 2,
color = 3,
mass = Param.MB,
width = Param.ZERO,
texname = 'b',
antitexname = 'b',
charge = -1/3,
Y = 0,
GhostNumber = 0)
b__tilde__ = b.anti()
sv1 = Particle(pdg_code = 1000012,
name = 'sv1',
antiname = 'sv1~',
spin = 1,
color = 1,
mass = Param.Msn1,
width = Param.Wsn1,
texname = 'sv1',
antitexname = 'sv1',
charge = 0,
Y = 0,
GhostNumber = 0)
sv1__tilde__ = sv1.anti()
sv2 = Particle(pdg_code = 1000014,
name = 'sv2',
antiname = 'sv2~',
spin = 1,
color = 1,
mass = Param.Msn2,
width = Param.Wsn2,
texname = 'sv2',
antitexname = 'sv2',
charge = 0,
Y = 0,
GhostNumber = 0)
sv2__tilde__ = sv2.anti()
sv3 = Particle(pdg_code = 1000016,
name = 'sv3',
antiname = 'sv3~',
spin = 1,
color = 1,
mass = Param.Msn3,
width = Param.Wsn3,
texname = 'sv3',
antitexname = 'sv3',
charge = 0,
Y = 0,
GhostNumber = 0)
sv3__tilde__ = sv3.anti()
sl1__minus__ = Particle(pdg_code = 1000011,
name = 'sl1-',
antiname = 'sl1+',
spin = 1,
color = 1,
mass = Param.Msl1,
width = Param.Wsl1,
texname = 'sl1-',
antitexname = 'sl1-',
charge = -1,
Y = 0,
GhostNumber = 0)
sl1__plus__ = sl1__minus__.anti()
sl2__minus__ = Particle(pdg_code = 1000013,
name = 'sl2-',
antiname = 'sl2+',
spin = 1,
color = 1,
mass = Param.Msl2,
width = Param.Wsl2,
texname = 'sl2-',
antitexname = 'sl2-',
charge = -1,
Y = 0,
GhostNumber = 0)
sl2__plus__ = sl2__minus__.anti()
sl3__minus__ = Particle(pdg_code = 1000015,
name = 'sl3-',
antiname = 'sl3+',
spin = 1,
color = 1,
mass = Param.Msl3,
width = Param.Wsl3,
texname = 'sl3-',
antitexname = 'sl3-',
charge = -1,
Y = 0,
GhostNumber = 0)
sl3__plus__ = sl3__minus__.anti()
sl4__minus__ = Particle(pdg_code = 2000011,
name = 'sl4-',
antiname = 'sl4+',
spin = 1,
color = 1,
mass = Param.Msl4,
width = Param.Wsl4,
texname = 'sl4-',
antitexname = 'sl4-',
charge = -1,
Y = 0,
GhostNumber = 0)
sl4__plus__ = sl4__minus__.anti()
sl5__minus__ = Particle(pdg_code = 2000013,
name = 'sl5-',
antiname = 'sl5+',
spin = 1,
color = 1,
mass = Param.Msl5,
width = Param.Wsl5,
texname = 'sl5-',
antitexname = 'sl5-',
charge = -1,
Y = 0,
GhostNumber = 0)
sl5__plus__ = sl5__minus__.anti()
sl6__minus__ = Particle(pdg_code = 2000015,
name = 'sl6-',
antiname = 'sl6+',
spin = 1,
color = 1,
mass = Param.Msl6,
width = Param.Wsl6,
texname = 'sl6-',
antitexname = 'sl6-',
charge = -1,
Y = 0,
GhostNumber = 0)
sl6__plus__ = sl6__minus__.anti()
su1 = Particle(pdg_code = 1000002,
name = 'su1',
antiname = 'su1~',
spin = 1,
color = 3,
mass = Param.Msu1,
width = Param.Wsu1,
texname = 'su1',
antitexname = 'su1',
charge = 2/3,
Y = 0,
GhostNumber = 0)
su1__tilde__ = su1.anti()
su2 = Particle(pdg_code = 1000004,
name = 'su2',
antiname = 'su2~',
spin = 1,
color = 3,
mass = Param.Msu2,
width = Param.Wsu2,
texname = 'su2',
antitexname = 'su2',
charge = 2/3,
Y = 0,
GhostNumber = 0)
su2__tilde__ = su2.anti()
su3 = Particle(pdg_code = 1000006,
name = 'su3',
antiname = 'su3~',
spin = 1,
color = 3,
mass = Param.Msu3,
width = Param.Wsu3,
texname = 'su3',
antitexname = 'su3',
charge = 2/3,
Y = 0,
GhostNumber = 0)
su3__tilde__ = su3.anti()
su4 = Particle(pdg_code = 2000002,
name = 'su4',
antiname = 'su4~',
spin = 1,
color = 3,
mass = Param.Msu4,
width = Param.Wsu4,
texname = 'su4',
antitexname = 'su4',
charge = 2/3,
Y = 0,
GhostNumber = 0)
su4__tilde__ = su4.anti()
su5 = Particle(pdg_code = 2000004,
name = 'su5',
antiname = 'su5~',
spin = 1,
color = 3,
mass = Param.Msu5,
width = Param.Wsu5,
texname = 'su5',
antitexname = 'su5',
charge = 2/3,
Y = 0,
GhostNumber = 0)
su5__tilde__ = su5.anti()
su6 = Particle(pdg_code = 2000006,
name = 'su6',
antiname = 'su6~',
spin = 1,
color = 3,
mass = Param.Msu6,
width = Param.Wsu6,
texname = 'su6',
antitexname = 'su6',
charge = 2/3,
Y = 0,
GhostNumber = 0)
su6__tilde__ = su6.anti()
sd1 = Particle(pdg_code = 1000001,
name = 'sd1',
antiname = 'sd1~',
spin = 1,
color = 3,
mass = Param.Msd1,
width = Param.Wsd1,
texname = 'sd1',
antitexname = 'sd1',
charge = -1/3,
Y = 0,
GhostNumber = 0)
sd1__tilde__ = sd1.anti()
sd2 = Particle(pdg_code = 1000003,
name = 'sd2',
antiname = 'sd2~',
spin = 1,
color = 3,
mass = Param.Msd2,
width = Param.Wsd2,
texname = 'sd2',
antitexname = 'sd2',
charge = -1/3,
Y = 0,
GhostNumber = 0)
sd2__tilde__ = sd2.anti()
sd3 = Particle(pdg_code = 1000005,
name = 'sd3',
antiname = 'sd3~',
spin = 1,
color = 3,
mass = Param.Msd3,
width = Param.Wsd3,
texname = 'sd3',
antitexname = 'sd3',
charge = -1/3,
Y = 0,
GhostNumber = 0)
sd3__tilde__ = sd3.anti()
sd4 = Particle(pdg_code = 2000001,
name = 'sd4',
antiname = 'sd4~',
spin = 1,
color = 3,
mass = Param.Msd4,
width = Param.Wsd4,
texname = 'sd4',
antitexname = 'sd4',
charge = -1/3,
Y = 0,
GhostNumber = 0)
sd4__tilde__ = sd4.anti()
sd5 = Particle(pdg_code = 2000003,
name = 'sd5',
antiname = 'sd5~',
spin = 1,
color = 3,
mass = Param.Msd5,
width = Param.Wsd5,
texname = 'sd5',
antitexname = 'sd5',
charge = -1/3,
Y = 0,
GhostNumber = 0)
sd5__tilde__ = sd5.anti()
sd6 = Particle(pdg_code = 2000005,
name = 'sd6',
antiname = 'sd6~',
spin = 1,
color = 3,
mass = Param.Msd6,
width = Param.Wsd6,
texname = 'sd6',
antitexname = 'sd6',
charge = -1/3,
Y = 0,
GhostNumber = 0)
sd6__tilde__ = sd6.anti()
ghG = Particle(pdg_code = 9000001,
name = 'ghG',
antiname = 'ghG~',
spin = -1,
color = 8,
mass = Param.ZERO,
width = Param.ZERO,
texname = 'ghG',
antitexname = 'ghG',
charge = 0,
Y = 0,
GhostNumber = 1)
ghG__tilde__ = ghG.anti()
ghA = Particle(pdg_code = 9000002,
name = 'ghA',
antiname = 'ghA~',
spin = -1,
color = 1,
mass = Param.ZERO,
width = Param.ZERO,
texname = 'ghA',
antitexname = 'ghA',
charge = 0,
Y = 0,
GhostNumber = 1)
ghA__tilde__ = ghA.anti()
ghZ = Particle(pdg_code = 9000003,
name = 'ghZ',
antiname = 'ghZ~',
spin = -1,
color = 1,
mass = Param.MZ,
width = Param.WZ,
texname = 'ghZ',
antitexname = 'ghZ',
charge = 0,
Y = 0,
GhostNumber = 1)
ghZ__tilde__ = ghZ.anti()
ghWp = Particle(pdg_code = 9000004,
name = 'ghWp',
antiname = 'ghWp~',
spin = -1,
color = 1,
mass = Param.MW,
width = Param.WW,
texname = 'ghWp',
antitexname = 'ghWp',
charge = 1,
Y = 0,
GhostNumber = 1)
ghWp__tilde__ = ghWp.anti()
ghWm = Particle(pdg_code = 9000005,
name = 'ghWm',
antiname = 'ghWm~',
spin = -1,
color = 1,
mass = Param.MW,
width = Param.WW,
texname = 'ghWm',
antitexname = 'ghWm',
charge = -1,
Y = 0,
GhostNumber = 1)
ghWm__tilde__ = ghWm.anti()
#
# added for ADM
#
# by Deshpreet Singh Bedi and Ian-Woo Kim
# date: Apr 6, 2012
# updated: May 16, 2012
#
# fermion
xx = Particle( pdg_code = 9000101,
name = 'xx',
antiname = 'xx~',
spin = 2,
color = 1,
mass = Param.mxx, # we should change
width = Param.ZERO, # Param.wxx, # we should change
texname = 'xx',
antitexname = 'xx',
charge = 0,
Y = 0,
GhostNumber = 0)
xx__tilde__ = xx.anti()
# scalar
sxxp = Particle(pdg_code = 9000201,
name = 'sxxp',
antiname = 'sxxp~',
spin = 1,
color = 1,
mass = Param.msxxp, # we should change
width = Param.ZERO, # we should change
texname = 'sxp',
antitexname = 's\\tilde{xp}',
charge = 0,
Y = 0,
GhostNumber = 0)
sxxp__tilde__ = sxxp.anti()
sxxn = Particle(pdg_code = 9000202,
name = 'sxxn',
antiname = 'sxxn~',
spin = 1,
color = 1,
mass = Param.msxxn, # we should change
width = Param.ZERO, # we should change
texname = 'sxn',
antitexname = 's\\tilde{xn}',
charge = 0,
Y = 0,
GhostNumber = 0)
sxxn__tilde__ = sxxn.anti()
| bsd-2-clause |
mpenning/exscript | tests/Exscript/FileLoggerTest.py | 6 | 3087 | import sys, unittest, re, os.path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'src'))
from tempfile import mkdtemp
from shutil import rmtree
from Exscript import Host
from Exscript.FileLogger import FileLogger
from LoggerTest import LoggerTest, FakeJob
class FakeError(Exception):
pass
class FileLoggerTest(LoggerTest):
CORRELATE = FileLogger
def setUp(self):
self.tempdir = mkdtemp()
self.logdir = os.path.join(self.tempdir, 'non-existent')
self.logger = FileLogger(self.logdir, clearmem = False)
self.job = FakeJob('fake')
self.logfile = os.path.join(self.logdir, 'fake.log')
self.errfile = self.logfile + '.error'
def tearDown(self):
LoggerTest.tearDown(self)
rmtree(self.tempdir)
def testConstructor(self):
self.assert_(os.path.isdir(self.tempdir))
self.failIf(os.path.exists(self.logfile))
self.failIf(os.path.exists(self.errfile))
def testAddLog(self):
log = LoggerTest.testAddLog(self)
self.assert_(os.path.isfile(self.logfile), 'No such file: ' + self.logfile)
self.failIf(os.path.exists(self.errfile))
return log
def testLog(self):
log = LoggerTest.testLog(self)
self.assert_(os.path.isfile(self.logfile))
self.failIf(os.path.exists(self.errfile))
return log
def testLogAborted(self):
log = LoggerTest.testLogAborted(self)
self.assert_(os.path.isfile(self.logfile))
self.assert_(os.path.isfile(self.errfile))
return log
def testLogSucceeded(self):
log = LoggerTest.testLogSucceeded(self)
self.assert_(os.path.isfile(self.logfile))
self.failIf(os.path.isfile(self.errfile))
return log
def testAddLog2(self):
# Like testAddLog(), but with attempt = 2.
self.logfile = os.path.join(self.logdir, self.job.name + '_retry1.log')
self.errfile = self.logfile + '.error'
self.failIf(os.path.exists(self.logfile))
self.failIf(os.path.exists(self.errfile))
self.logger.add_log(id(self.job), self.job.name, 2)
self.assert_(os.path.isfile(self.logfile))
self.failIf(os.path.exists(self.errfile))
content = open(self.logfile).read()
self.assertEqual(content, '')
def testLog2(self):
# Like testLog(), but with attempt = 2.
self.testAddLog2()
self.logger.log(id(self.job), 'hello world')
self.assert_(os.path.isfile(self.logfile))
self.failIf(os.path.exists(self.errfile))
content = open(self.logfile).read()
self.assertEqual(content, 'hello world')
def testLogSucceeded2(self):
# With attempt = 2.
self.testLog2()
self.logger.log_succeeded(id(self.job))
self.assert_(os.path.isfile(self.logfile))
self.failIf(os.path.exists(self.errfile))
def suite():
return unittest.TestLoader().loadTestsFromTestCase(FileLoggerTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity = 2).run(suite())
| gpl-2.0 |
Suninus/NewsBlur | apps/profile/migrations/0019_dashboard_date.py | 18 | 6206 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Profile.dashboard_date'
db.add_column('profile_profile', 'dashboard_date', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now), keep_default=False)
def backwards(self, orm):
# Deleting field 'Profile.dashboard_date'
db.delete_column('profile_profile', 'dashboard_date')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profile.profile': {
'Meta': {'object_name': 'Profile'},
'collapsed_folders': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'dashboard_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feed_pane_size': ('django.db.models.fields.IntegerField', [], {'default': '240'}),
'has_found_friends': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'has_setup_feeds': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'has_trained_intelligence': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'hide_getting_started': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'hide_mobile': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_premium': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_seen_ip': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'last_seen_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'preferences': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'secret_token': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True', 'blank': 'True'}),
'send_emails': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'stripe_4_digits': ('django.db.models.fields.CharField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'stripe_id': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}),
'timezone': ('vendor.timezones.fields.TimeZoneField', [], {'default': "'America/New_York'", 'max_length': '100'}),
'tutorial_finished': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': "orm['auth.User']"}),
'view_settings': ('django.db.models.fields.TextField', [], {'default': "'{}'"})
}
}
complete_apps = ['profile']
| mit |
gianina-ingenuity/titanium-branch-deep-linking | testbed/x/mobilesdk/osx/5.5.1.GA/common/markdown/preprocessors.py | 112 | 7128 |
"""
PRE-PROCESSORS
=============================================================================
Preprocessors work on source text before we start doing anything too
complicated.
"""
import re
import markdown
HTML_PLACEHOLDER_PREFIX = markdown.STX+"wzxhzdk:"
HTML_PLACEHOLDER = HTML_PLACEHOLDER_PREFIX + "%d" + markdown.ETX
class Processor:
def __init__(self, markdown_instance=None):
if markdown_instance:
self.markdown = markdown_instance
class Preprocessor (Processor):
"""
Preprocessors are run after the text is broken into lines.
Each preprocessor implements a "run" method that takes a pointer to a
list of lines of the document, modifies it as necessary and returns
either the same pointer or a pointer to a new list.
Preprocessors must extend markdown.Preprocessor.
"""
def run(self, lines):
"""
Each subclass of Preprocessor should override the `run` method, which
takes the document as a list of strings split by newlines and returns
the (possibly modified) list of lines.
"""
pass
class HtmlStash:
"""
This class is used for stashing HTML objects that we extract
in the beginning and replace with place-holders.
"""
def __init__ (self):
""" Create a HtmlStash. """
self.html_counter = 0 # for counting inline html segments
self.rawHtmlBlocks=[]
def store(self, html, safe=False):
"""
Saves an HTML segment for later reinsertion. Returns a
placeholder string that needs to be inserted into the
document.
Keyword arguments:
* html: an html segment
* safe: label an html segment as safe for safemode
Returns : a placeholder string
"""
self.rawHtmlBlocks.append((html, safe))
placeholder = HTML_PLACEHOLDER % self.html_counter
self.html_counter += 1
return placeholder
def reset(self):
self.html_counter = 0
self.rawHtmlBlocks = []
class HtmlBlockPreprocessor(Preprocessor):
"""Remove html blocks from the text and store them for later retrieval."""
right_tag_patterns = ["</%s>", "%s>"]
def _get_left_tag(self, block):
return block[1:].replace(">", " ", 1).split()[0].lower()
def _get_right_tag(self, left_tag, block):
for p in self.right_tag_patterns:
tag = p % left_tag
i = block.rfind(tag)
if i > 2:
return tag.lstrip("<").rstrip(">"), i + len(p)-2 + len(left_tag)
return block.rstrip()[-len(left_tag)-2:-1].lower(), len(block)
def _equal_tags(self, left_tag, right_tag):
if left_tag == 'div' or left_tag[0] in ['?', '@', '%']: # handle PHP, etc.
return True
if ("/" + left_tag) == right_tag:
return True
if (right_tag == "--" and left_tag == "--"):
return True
elif left_tag == right_tag[1:] \
and right_tag[0] != "<":
return True
else:
return False
def _is_oneliner(self, tag):
return (tag in ['hr', 'hr/'])
def run(self, lines):
text = "\n".join(lines)
new_blocks = []
text = text.split("\n\n")
items = []
left_tag = ''
right_tag = ''
in_tag = False # flag
while text:
block = text[0]
if block.startswith("\n"):
block = block[1:]
text = text[1:]
if block.startswith("\n"):
block = block[1:]
if not in_tag:
if block.startswith("<"):
left_tag = self._get_left_tag(block)
right_tag, data_index = self._get_right_tag(left_tag, block)
if block[1] == "!":
# is a comment block
left_tag = "--"
right_tag, data_index = self._get_right_tag(left_tag, block)
# keep checking conditions below and maybe just append
if data_index < len(block) \
and markdown.isBlockLevel(left_tag):
text.insert(0, block[data_index:])
block = block[:data_index]
if not (markdown.isBlockLevel(left_tag) \
or block[1] in ["!", "?", "@", "%"]):
new_blocks.append(block)
continue
if self._is_oneliner(left_tag):
new_blocks.append(block.strip())
continue
if block.rstrip().endswith(">") \
and self._equal_tags(left_tag, right_tag):
new_blocks.append(
self.markdown.htmlStash.store(block.strip()))
continue
else: #if not block[1] == "!":
# if is block level tag and is not complete
if markdown.isBlockLevel(left_tag) or left_tag == "--" \
and not block.rstrip().endswith(">"):
items.append(block.strip())
in_tag = True
else:
new_blocks.append(
self.markdown.htmlStash.store(block.strip()))
continue
new_blocks.append(block)
else:
items.append(block.strip())
right_tag, data_index = self._get_right_tag(left_tag, block)
if self._equal_tags(left_tag, right_tag):
# if find closing tag
in_tag = False
new_blocks.append(
self.markdown.htmlStash.store('\n\n'.join(items)))
items = []
if items:
new_blocks.append(self.markdown.htmlStash.store('\n\n'.join(items)))
new_blocks.append('\n')
new_text = "\n\n".join(new_blocks)
return new_text.split("\n")
class ReferencePreprocessor(Preprocessor):
""" Remove reference definitions from text and store for later use. """
RE = re.compile(r'^(\ ?\ ?\ ?)\[([^\]]*)\]:\s*([^ ]*)(.*)$', re.DOTALL)
def run (self, lines):
new_text = [];
for line in lines:
m = self.RE.match(line)
if m:
id = m.group(2).strip().lower()
t = m.group(4).strip() # potential title
if not t:
self.markdown.references[id] = (m.group(3), t)
elif (len(t) >= 2
and (t[0] == t[-1] == "\""
or t[0] == t[-1] == "\'"
or (t[0] == "(" and t[-1] == ")") ) ):
self.markdown.references[id] = (m.group(3), t[1:-1])
else:
new_text.append(line)
else:
new_text.append(line)
return new_text #+ "\n"
| mit |
r-mart/scikit-learn | examples/tree/plot_tree_regression.py | 206 | 1476 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
arista-eosplus/ansible | test/units/module_utils/basic/test_exit_json.py | 18 | 7252 | # -*- coding: utf-8 -*-
# (c) 2015, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import copy
import json
import sys
from ansible.compat.tests import unittest
from ansible.module_utils import basic
from units.mock.procenv import swap_stdin_and_argv, swap_stdout
empty_invocation = {u'module_args': {}}
class TestAnsibleModuleExitJson(unittest.TestCase):
def setUp(self):
args = json.dumps(dict(ANSIBLE_MODULE_ARGS={}))
self.stdin_swap_ctx = swap_stdin_and_argv(stdin_data=args)
self.stdin_swap_ctx.__enter__()
# since we can't use context managers and "with" without overriding run(), call them directly
self.stdout_swap_ctx = swap_stdout()
self.fake_stream = self.stdout_swap_ctx.__enter__()
basic._ANSIBLE_ARGS = None
self.module = basic.AnsibleModule(argument_spec=dict())
def tearDown(self):
# since we can't use context managers and "with" without overriding run(), call them directly to clean up
self.stdin_swap_ctx.__exit__(None, None, None)
self.stdout_swap_ctx.__exit__(None, None, None)
def test_exit_json_no_args_exits(self):
with self.assertRaises(SystemExit) as ctx:
self.module.exit_json()
if isinstance(ctx.exception, int):
# Python2.6... why does sys.exit behave this way?
self.assertEquals(ctx.exception, 0)
else:
self.assertEquals(ctx.exception.code, 0)
return_val = json.loads(self.fake_stream.getvalue())
self.assertEquals(return_val, dict(changed=False, invocation=empty_invocation))
def test_exit_json_args_exits(self):
with self.assertRaises(SystemExit) as ctx:
self.module.exit_json(msg='message')
if isinstance(ctx.exception, int):
# Python2.6... why does sys.exit behave this way?
self.assertEquals(ctx.exception, 0)
else:
self.assertEquals(ctx.exception.code, 0)
return_val = json.loads(self.fake_stream.getvalue())
self.assertEquals(return_val, dict(msg="message", changed=False, invocation=empty_invocation))
def test_fail_json_exits(self):
with self.assertRaises(SystemExit) as ctx:
self.module.fail_json(msg='message')
if isinstance(ctx.exception, int):
# Python2.6... why does sys.exit behave this way?
self.assertEquals(ctx.exception, 1)
else:
self.assertEquals(ctx.exception.code, 1)
return_val = json.loads(self.fake_stream.getvalue())
self.assertEquals(return_val, dict(msg="message", changed=False, failed=True, invocation=empty_invocation))
def test_exit_json_proper_changed(self):
with self.assertRaises(SystemExit) as ctx:
self.module.exit_json(changed=True, msg='success')
return_val = json.loads(self.fake_stream.getvalue())
self.assertEquals(return_val, dict(changed=True, msg='success', invocation=empty_invocation))
class TestAnsibleModuleExitValuesRemoved(unittest.TestCase):
OMIT = 'VALUE_SPECIFIED_IN_NO_LOG_PARAMETER'
dataset = (
(
dict(username='person', password='$ecret k3y'),
dict(one=1, pwd='$ecret k3y', url='https://username:[email protected]/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd=OMIT, url='https://username:[email protected]/login/',
not_secret='following the leader', changed=False, msg='here',
invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
(
dict(username='person', password='password12345'),
dict(one=1, pwd='$ecret k3y', url='https://username:[email protected]/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd='$ecret k3y', url='https://username:********@foo.com/login/',
not_secret='following the leader', changed=False, msg='here',
invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
(
dict(username='person', password='$ecret k3y'),
dict(one=1, pwd='$ecret k3y', url='https://username:$ecret [email protected]/login/',
not_secret='following the leader', msg='here'),
dict(one=1, pwd=OMIT, url='https://username:********@foo.com/login/',
not_secret='following the leader', changed=False, msg='here',
invocation=dict(module_args=dict(password=OMIT, token=None, username='person'))),
),
)
def test_exit_json_removes_values(self):
self.maxDiff = None
for args, return_val, expected in self.dataset:
params = dict(ANSIBLE_MODULE_ARGS=args)
params = json.dumps(params)
with swap_stdin_and_argv(stdin_data=params):
with swap_stdout():
basic._ANSIBLE_ARGS = None
module = basic.AnsibleModule(
argument_spec=dict(
username=dict(),
password=dict(no_log=True),
token=dict(no_log=True),
),
)
with self.assertRaises(SystemExit) as ctx:
self.assertEquals(module.exit_json(**return_val), expected)
self.assertEquals(json.loads(sys.stdout.getvalue()), expected)
def test_fail_json_removes_values(self):
self.maxDiff = None
for args, return_val, expected in self.dataset:
expected = copy.deepcopy(expected)
expected['failed'] = True
params = dict(ANSIBLE_MODULE_ARGS=args)
params = json.dumps(params)
with swap_stdin_and_argv(stdin_data=params):
with swap_stdout():
basic._ANSIBLE_ARGS = None
module = basic.AnsibleModule(
argument_spec=dict(
username=dict(),
password=dict(no_log=True),
token=dict(no_log=True),
),
)
with self.assertRaises(SystemExit) as ctx:
self.assertEquals(module.fail_json(**return_val), expected)
self.assertEquals(json.loads(sys.stdout.getvalue()), expected)
| gpl-3.0 |
jendap/tensorflow | tensorflow/python/util/dispatch.py | 22 | 6842 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Type-based dispatch for TensorFlow ops.
"Operation dispatchers" can be used to override the behavior for TensorFlow ops
when they are called with otherwise unsupported argument types. In particular,
when an operation is called with arguments that would cause it to raise a
TypeError, it falls back on its registered operation dispatchers. If any
registered dispatchers can handle the arguments, then its result is returned.
Otherwise, the original TypeError is raised.
By default, dispatch support is added to the generated op wrappers for any
visible ops by default. Ops that are implemented in Python can opt in to
dispatch support using the `add_dispatch_support` decorator.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
# Private function attribute used to store a list of dispatchers.
DISPATCH_ATTR = "_tf_dispatchers"
class OpDispatcher(object):
"""Abstract base class for TensorFlow operator dispatchers.
Each operation dispatcher acts as an override handler for a single
TensorFlow operation, and its results are used when the handler indicates
that it can handle the operation's arguments (by returning any value other
than `OpDispatcher.NOT_SUPPORTED`).
"""
# Sentinel value that can be returned to indicate that an operation
# dispatcher does not support a given set of arguments.
NOT_SUPPORTED = object()
def handle(self, args, kwargs): # pylint: disable=unused-argument
"""Handle this dispatcher's operation with the specified arguments.
If this operation dispatcher can handle the given arguments, then
return an appropriate value (or raise an appropriate exception).
Args:
args: The arguments to the operation.
kwargs: They keyword arguments to the operation.
Returns:
The result of the operation, or `OpDispatcher.NOT_SUPPORTED` if this
dispatcher can not handle the given arguments.
"""
return self.NOT_SUPPORTED
def register(self, op):
"""Register this dispatcher as a handler for `op`.
Args:
op: Python function: the TensorFlow operation that should be handled. Must
have a dispatch list (which is added automatically for generated ops,
and can be added to Python ops using the `add_dispatch_support`
decorator).
"""
if not hasattr(op, DISPATCH_ATTR):
raise AssertionError("Dispatching not enabled for %s" % op)
getattr(op, DISPATCH_ATTR).append(self)
def dispatch(op, *args, **kwargs):
"""Returns the result from the first successful dispatcher for a given op.
Calls the `handle` method of each `OpDispatcher` that has been registered
to handle `op`, and returns the value from the first successful handler.
Args:
op: Python function: the operation to dispatch for.
*args: The arguments to the operation.
**kwargs: They keyword arguments to the operation.
Returns:
The result of the operation, or `NOT_SUPPORTED` if no registered
dispatcher can handle the given arguments.
"""
for dispatcher in getattr(op, DISPATCH_ATTR):
result = dispatcher.handle(args, kwargs)
if result is not OpDispatcher.NOT_SUPPORTED:
return result
return OpDispatcher.NOT_SUPPORTED
class _TypeBasedDispatcher(OpDispatcher):
"""Dispatcher that handles op if any arguments have a specified type.
Checks the types of the arguments and keyword arguments (including elements
of lists or tuples), and if any argument values have the indicated type(s),
then delegates to an override function.
"""
def __init__(self, override_func, types):
self._types = types
self._override_func = override_func
def _handles(self, args, kwargs):
for arg in itertools.chain(args, kwargs.values()):
if (isinstance(arg, self._types) or
(isinstance(arg, (list, tuple)) and
any(isinstance(elt, self._types) for elt in arg))):
return True
return False
def handle(self, args, kwargs):
if self._handles(args, kwargs):
return self._override_func(*args, **kwargs)
else:
return self.NOT_SUPPORTED
# pylint: disable=g-doc-return-or-yield
def dispatch_for_types(op, *types):
"""Decorator to declare that a Python function overrides an op for a type.
The decorated function is used to override `op` if any of the arguments or
keyword arguments (including elements of lists or tuples) have one of the
specified types.
Example:
```python
@dispatch_for_types(math_ops.add, RaggedTensor, RaggedTensorValue)
def ragged_add(x, y, name=None): ...
```
Args:
op: Python function: the operation that should be overridden.
*types: The argument types for which this function should be used.
"""
def decorator(func):
if tf_inspect.getargspec(func) != tf_inspect.getargspec(op):
raise AssertionError("The decorated function's signature must exactly "
"match the signature of the overridden op.")
_TypeBasedDispatcher(func, types).register(op)
return func
return decorator
# pylint: enable=g-doc-return-or-yield
def add_dispatch_list(target):
"""Decorator that adds a dispatch_list attribute to an op."""
if hasattr(target, DISPATCH_ATTR):
raise AssertionError("%s already has a dispatch list" % target)
setattr(target, DISPATCH_ATTR, [])
return target
def add_dispatch_support(target):
"""Decorator that adds a dispatch handling wrapper to an op."""
def wrapper(*args, **kwargs):
"""Call target, and fall back on dispatchers if there is a TypeError."""
try:
return target(*args, **kwargs)
except (TypeError, ValueError):
# Note: convert_to_eager_tensor currently raises a ValueError, not a
# TypeError, when given unexpected types. So we need to catch both.
result = dispatch(wrapper, *args, **kwargs)
if result is not OpDispatcher.NOT_SUPPORTED:
return result
else:
raise
add_dispatch_list(wrapper)
return tf_decorator.make_decorator(target, wrapper)
| apache-2.0 |
YongseopKim/crosswalk-test-suite | webapi/tct-alarm-tizen-tests/inst.apk.py | 903 | 3180 | #!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARAMETERS = None
ADB_CMD = "adb"
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s uninstall org.xwalk.%s" % (
ADB_CMD, PARAMETERS.device, os.path.basename(os.path.splitext(file)[0]))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def instPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".apk"):
cmd = "%s -s %s install %s" % (ADB_CMD,
PARAMETERS.device, os.path.join(root, file))
(return_code, output) = doCMD(cmd)
for line in output:
if "Failure" in line:
action_status = False
break
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.device:
(return_code, output) = doCMD("adb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
if not PARAMETERS.device:
print "No device found"
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
| bsd-3-clause |
ClydeSpace-GroundStation/GroundStation | Utilities/Supporting_Libraries/gr-bruninga-master/python/fsk_demod.py | 1 | 5330 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 <+YOU OR YOUR COMPANY+>.
#
# This is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio import blocks
from gnuradio import digital
from gnuradio import filter
from gnuradio.filter import firdes
from scipy import signal
import math
import bruninga
class fsk_demod(gr.hier_block2):
"""
Tidied up version of the demodulator found in examples/aprs-rx.grc
samp_rate should be the incoming audio sample rate.
"""
def __init__(self, inc_samp_rate):
gr.hier_block2.__init__(self,
"fsk_demod",
gr.io_signature(1, 1, gr.sizeof_float), # Input signature
gr.io_signature(1, 1, gr.sizeof_char) # Output signature
)
self.inc_samp_rate = inc_samp_rate
self.sps = sps = 4
self.baud_rate = baud_rate = 1200
self.samp_rate = samp_rate = sps * baud_rate * 4
self.mark = mark = 2200
self.space = space = 1200
self.center = center = int((mark + space) / 2)
##################################################
# Blocks
##################################################
# Stage 1: Force resampling to 19.2ksps
self.rational_resampler_xxx_0 = filter.rational_resampler_fff(
interpolation=samp_rate,
decimation=self.inc_samp_rate,
taps=None,
fractional_bw=None,
)
# Stage 2: Bandpass Filter
self.bpf_width = bpf_width = 800
self.bpf_trans = bpf_trans = 200
self.band_pass_filter_0 = filter.fir_filter_fff(1, firdes.band_pass(
1, samp_rate, 1700-bpf_width, 1700+bpf_width, bpf_trans, firdes.WIN_RECTANGULAR, 6.76))
# Stage 3: Tone Detection
self.window_len = window_len = self.samp_rate/self.baud_rate*2
self.window = window = signal.windows.cosine(window_len)
self.freq_xlating_fir_filter_xxx_0_0 = filter.freq_xlating_fir_filter_fcf(4, (window), mark, samp_rate)
self.freq_xlating_fir_filter_xxx_0 = filter.freq_xlating_fir_filter_fcf(4, (window), space, samp_rate)
self.blocks_complex_to_mag_0_0 = blocks.complex_to_mag(1)
self.blocks_complex_to_mag_0 = blocks.complex_to_mag(1)
# Stage 4: AGC
self.decay = decay = 0.00022
self.attack = attack = 0.8
self.bruninga_direwolf_agc_0_0 = bruninga.direwolf_agc(attack, decay)
self.bruninga_direwolf_agc_0 = bruninga.direwolf_agc(attack, decay)
self.blocks_sub_xx_1 = blocks.sub_ff(1)
# Stage 5: Clock Recovery
self.gain_mu = gain_mu = 0.45
self.digital_clock_recovery_mm_xx_0 = digital.clock_recovery_mm_ff(self.sps*(1+0.0), 0.25*gain_mu*gain_mu, 0.5, gain_mu, 0.05)
# Stage 6: Differential Decoding
self.digital_diff_decoder_bb_0 = digital.diff_decoder_bb(2)
self.blocks_not_xx_0 = blocks.not_bb()
self.blocks_and_const_xx_0 = blocks.and_const_bb(1)
# Stage 7: Output
self.digital_binary_slicer_fb_0 = digital.binary_slicer_fb()
##################################################
# Connections
##################################################
self.connect((self, 0), (self.rational_resampler_xxx_0, 0))
self.connect((self.rational_resampler_xxx_0, 0), (self.band_pass_filter_0, 0))
self.connect((self.band_pass_filter_0, 0), (self.freq_xlating_fir_filter_xxx_0_0, 0))
self.connect((self.band_pass_filter_0, 0), (self.freq_xlating_fir_filter_xxx_0, 0))
self.connect((self.freq_xlating_fir_filter_xxx_0, 0), (self.blocks_complex_to_mag_0, 0))
self.connect((self.freq_xlating_fir_filter_xxx_0_0, 0), (self.blocks_complex_to_mag_0_0, 0))
self.connect((self.blocks_complex_to_mag_0, 0), (self.bruninga_direwolf_agc_0, 0))
self.connect((self.blocks_complex_to_mag_0_0, 0), (self.bruninga_direwolf_agc_0_0, 0))
self.connect((self.bruninga_direwolf_agc_0_0, 0), (self.blocks_sub_xx_1, 1))
self.connect((self.bruninga_direwolf_agc_0, 0), (self.blocks_sub_xx_1, 0))
self.connect((self.blocks_sub_xx_1, 0), (self.digital_clock_recovery_mm_xx_0, 0))
self.connect((self.digital_clock_recovery_mm_xx_0, 0), (self.digital_binary_slicer_fb_0, 0))
self.connect((self.digital_diff_decoder_bb_0, 0), (self.blocks_not_xx_0, 0))
self.connect((self.blocks_not_xx_0, 0), (self.blocks_and_const_xx_0, 0))
self.connect((self.blocks_and_const_xx_0, 0), (self, 0))
self.connect((self.digital_binary_slicer_fb_0, 0), (self.digital_diff_decoder_bb_0, 0))
| mit |
ariafan/S5501-3.10 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py | 12980 | 5411 | # SchedGui.py - Python extension for perf script, basic GUI code for
# traces drawing and overview.
#
# Copyright (C) 2010 by Frederic Weisbecker <[email protected]>
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
try:
import wx
except ImportError:
raise ImportError, "You need to install the wxpython lib for this script"
class RootFrame(wx.Frame):
Y_OFFSET = 100
RECT_HEIGHT = 100
RECT_SPACE = 50
EVENT_MARKING_WIDTH = 5
def __init__(self, sched_tracer, title, parent = None, id = -1):
wx.Frame.__init__(self, parent, id, title)
(self.screen_width, self.screen_height) = wx.GetDisplaySize()
self.screen_width -= 10
self.screen_height -= 10
self.zoom = 0.5
self.scroll_scale = 20
self.sched_tracer = sched_tracer
self.sched_tracer.set_root_win(self)
(self.ts_start, self.ts_end) = sched_tracer.interval()
self.update_width_virtual()
self.nr_rects = sched_tracer.nr_rectangles() + 1
self.height_virtual = RootFrame.Y_OFFSET + (self.nr_rects * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
# whole window panel
self.panel = wx.Panel(self, size=(self.screen_width, self.screen_height))
# scrollable container
self.scroll = wx.ScrolledWindow(self.panel)
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale)
self.scroll.EnableScrolling(True, True)
self.scroll.SetFocus()
# scrollable drawing area
self.scroll_panel = wx.Panel(self.scroll, size=(self.screen_width - 15, self.screen_height / 2))
self.scroll_panel.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll_panel.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll_panel.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Bind(wx.EVT_PAINT, self.on_paint)
self.scroll.Bind(wx.EVT_KEY_DOWN, self.on_key_press)
self.scroll.Bind(wx.EVT_LEFT_DOWN, self.on_mouse_down)
self.scroll.Fit()
self.Fit()
self.scroll_panel.SetDimensions(-1, -1, self.width_virtual, self.height_virtual, wx.SIZE_USE_EXISTING)
self.txt = None
self.Show(True)
def us_to_px(self, val):
return val / (10 ** 3) * self.zoom
def px_to_us(self, val):
return (val / self.zoom) * (10 ** 3)
def scroll_start(self):
(x, y) = self.scroll.GetViewStart()
return (x * self.scroll_scale, y * self.scroll_scale)
def scroll_start_us(self):
(x, y) = self.scroll_start()
return self.px_to_us(x)
def paint_rectangle_zone(self, nr, color, top_color, start, end):
offset_px = self.us_to_px(start - self.ts_start)
width_px = self.us_to_px(end - self.ts_start)
offset_py = RootFrame.Y_OFFSET + (nr * (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE))
width_py = RootFrame.RECT_HEIGHT
dc = self.dc
if top_color is not None:
(r, g, b) = top_color
top_color = wx.Colour(r, g, b)
brush = wx.Brush(top_color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, RootFrame.EVENT_MARKING_WIDTH)
width_py -= RootFrame.EVENT_MARKING_WIDTH
offset_py += RootFrame.EVENT_MARKING_WIDTH
(r ,g, b) = color
color = wx.Colour(r, g, b)
brush = wx.Brush(color, wx.SOLID)
dc.SetBrush(brush)
dc.DrawRectangle(offset_px, offset_py, width_px, width_py)
def update_rectangles(self, dc, start, end):
start += self.ts_start
end += self.ts_start
self.sched_tracer.fill_zone(start, end)
def on_paint(self, event):
dc = wx.PaintDC(self.scroll_panel)
self.dc = dc
width = min(self.width_virtual, self.screen_width)
(x, y) = self.scroll_start()
start = self.px_to_us(x)
end = self.px_to_us(x + width)
self.update_rectangles(dc, start, end)
def rect_from_ypixel(self, y):
y -= RootFrame.Y_OFFSET
rect = y / (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
height = y % (RootFrame.RECT_HEIGHT + RootFrame.RECT_SPACE)
if rect < 0 or rect > self.nr_rects - 1 or height > RootFrame.RECT_HEIGHT:
return -1
return rect
def update_summary(self, txt):
if self.txt:
self.txt.Destroy()
self.txt = wx.StaticText(self.panel, -1, txt, (0, (self.screen_height / 2) + 50))
def on_mouse_down(self, event):
(x, y) = event.GetPositionTuple()
rect = self.rect_from_ypixel(y)
if rect == -1:
return
t = self.px_to_us(x) + self.ts_start
self.sched_tracer.mouse_down(rect, t)
def update_width_virtual(self):
self.width_virtual = self.us_to_px(self.ts_end - self.ts_start)
def __zoom(self, x):
self.update_width_virtual()
(xpos, ypos) = self.scroll.GetViewStart()
xpos = self.us_to_px(x) / self.scroll_scale
self.scroll.SetScrollbars(self.scroll_scale, self.scroll_scale, self.width_virtual / self.scroll_scale, self.height_virtual / self.scroll_scale, xpos, ypos)
self.Refresh()
def zoom_in(self):
x = self.scroll_start_us()
self.zoom *= 2
self.__zoom(x)
def zoom_out(self):
x = self.scroll_start_us()
self.zoom /= 2
self.__zoom(x)
def on_key_press(self, event):
key = event.GetRawKeyCode()
if key == ord("+"):
self.zoom_in()
return
if key == ord("-"):
self.zoom_out()
return
key = event.GetKeyCode()
(x, y) = self.scroll.GetViewStart()
if key == wx.WXK_RIGHT:
self.scroll.Scroll(x + 1, y)
elif key == wx.WXK_LEFT:
self.scroll.Scroll(x - 1, y)
elif key == wx.WXK_DOWN:
self.scroll.Scroll(x, y + 1)
elif key == wx.WXK_UP:
self.scroll.Scroll(x, y - 1)
| gpl-2.0 |
ideaworld/BioDesigner | design/views.py | 1 | 13070 | """
@author: Bowen
"""
from django.shortcuts import render
from django.db.models import Q
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, get_object_or_404, render_to_response
from django.template import RequestContext, loader
from django.core.mail import send_mail
from django.utils import timezone
from django.views.decorators.csrf import csrf_exempt
import hashlib
import json
import datetime
import random
import traceback
from search_part import ambiguousSearch, getPart
from accounts.models import User
from design.models import project, functions, tracks, user_project, tracks, chain, track_functions
from design.project import searchProject, getUserProject, getChain, getChainList
from design.recommend import getApriorRecommend, getMarkovRecommend, getBetweenMarkovRecommend
from design.file import getSequenceResultImage
from design.simulation import reaction_simulator
@csrf_exempt
def searchParts(request):
keyword = request.GET.get('keyword')
try:
funcs = request.GET.get('funcs')
except:
funcs = ''
results = ambiguousSearch(keyword, funcs)
return HttpResponse(json.dumps(results), content_type="text/json")
@csrf_exempt
def getParts(request):
partName = request.GET.get('partname')
results = getPart(partName)
return HttpResponse(json.dumps(results), content_type="text/json")
@csrf_exempt
def dashboardView(request):
try:
isLoggedIn = request.session['isLoggedIn']
if isLoggedIn:
chainId = int(request.GET.get('id'))
template = loader.get_template('home/dashboard.html')
context = RequestContext(request, {
'username':str(request.session['username']),
'id' : chainId
})
return HttpResponse(template.render(context))
else:
return HttpResponseRedirect('/')
except KeyError:
return HttpResponseRedirect('/')
@csrf_exempt
def projectView(request):
try:
isLoggedIn = request.session['isLoggedIn']
if isLoggedIn:
template = loader.get_template('home/project_new.html')
context = RequestContext(request, {
'username':request.session['username']
})
return HttpResponse(template.render(context))
else:
return HttpResponseRedirect('/')
except KeyError:
return HttpResponseRedirect('/')
@csrf_exempt
def oldprojectView(request):
try:
isLoggedIn = request.session['isLoggedIn']
if isLoggedIn:
template = loader.get_template('home/project.html')
context = RequestContext(request, {
'username':request.session['username']
})
return HttpResponse(template.render(context))
else:
return HttpResponseRedirect('/')
except KeyError:
return HttpResponseRedirect('/')
def isAccountActive(request):
try:
username = request.session['username']
return User.objects.get(username=username).is_confirmed
except KeyError:
return False
def isLoggedIn(request):
try:
isLoggedIn = request.session['isLoggedIn']
return isLoggedIn
except KeyError:
return False
def getCurrentUserObj(request):
try:
username = request.session['username']
userObj = User.objects.get(username=username)
return userObj
except:
return None
def newProject(name, user, track):
try:
projectObj = project(project_name=name, creator=user, track_id=track)
projectObj.save()
userPjctObj = user_project(user=user, project=projectObj)
userPjctObj.save()
return True, projectObj
except:
return False, null
@csrf_exempt
def createProject(request):
result = {
'isSuccessful': False,
}
if not isLoggedIn(request):
return HttpResponse(json.dumps(result), content_type="application/json")
received_json_data = json.loads(request.body)
name = received_json_data['name']
userObj = getCurrentUserObj(request)
#function_id = int(request.POST.get('function', ''))
# track_id = int(request.POST.get('track', ''))
track_id = int(received_json_data['track'])
createResult = newProject(name, userObj, track_id)
result['isSuccessful'] = createResult[0]
result['project_name'] = name
result['id'] = createResult[1].id
result['track'] = createResult[1].track.track
result['creator'] = userObj.username
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def getProjectChain(request):
result = {
'isSuccessful' : False,
}
if not isLoggedIn(request):
return HttpResponse(json.dumps(result), content_type="application/json")
project_id = request.GET.get('id', '')
chainResult = getChain(project_id)
result['isSuccessful'] = chainResult[0]
result['chain'] = chainResult[1]
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def getProject(request):
keyword = request.GET.get('keyword')
userObj = getCurrentUserObj(request)
if not userObj:
result = {'isSuccessful' : False}
return HttpResponse(json.dumps(result), content_type="application/json")
result = searchProject(keyword, userObj)
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def getUserProjects(request):
userObj = getCurrentUserObj(request)
if not userObj:
result = {'isSuccessful' : False}
return HttpResponse(json.dumps(result), content_type="application/json")
result = getUserProject(userObj)
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def getProjectChains(request):
projectId = request.GET.get('id','')
result = getChainList(int(projectId))
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def createNewDevice(request):
result = {
'isSuccessful': False,
}
if not isLoggedIn(request):
return HttpResponse(json.dumps(result), content_type="application/json")
received_json_data = json.loads(request.body)
name = received_json_data['name']
projectId = received_json_data['id']
newChain = chain(name=name, project_id=int(projectId))
try:
newChain.save()
result['isSuccessful'] = True
result['name'] = name
result['id'] = projectId
except:
pass
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def saveChain(request):
result = {'isSuccessful':True,}
chainContent = request.POST.get('chain','')
chainId = int(request.POST.get('id',''))
try:
chainObj = chain.objects.get(id=chainId)
chainObj.sequence = chainContent
chainObj.isModified = True
chainObj.save()
except:
result['isSuccessful'] = False
return HttpResponse(json.dumps(result),content_type="application/json")
@csrf_exempt
def getARecommend(request):
chainStr = request.GET.get('seq', '')
funcStr = request.GET.get('funcs', '')
if chainStr.startswith('_'):
chainStr = chainStr[1:]
if chainStr.endswith('_'):
chainStr = chainStr[:-1]
return HttpResponse(json.dumps(getApriorRecommend(chainStr, funcStr)), content_type="application/json")
@csrf_exempt
def getMRecommend(request):
part_id = request.GET.get('part')
return HttpResponse(json.dumps(getMarkovRecommend(part_id)), content_type="application/json")
@csrf_exempt
def getTracks(request):
trackList = tracks.objects.all().order_by('id');
result = {
'isSuccessful' : False,
}
trackInfos = list()
for t in trackList:
tmp = {
'id' : t.id,
'track' : t.track
}
trackInfos.append(tmp)
result['isSuccessful'] = True
result['tracks'] = trackInfos
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def getChainLength(request):
chainId = request.GET.get('id')
result = {
'isSuccessful' : True,
}
try:
chainObj = chain.objects.get(id=chainId)
se = chainObj.sequence
if se.startswith('_'):
se = se[1:]
chainLength = len(se.split('_'))
result['length'] = chainLength
except:
result['isSuccessful'] = False
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def changeProjectName(request):
projectId = request.POST.get('id','')
newName = request.POST.get('name','')
result = {
'isSuccessful': True,
}
try:
projectObj = project.objects.get(id=projectId)
projectObj.project_name = newName
projectObj.save()
except:
result['isSuccessful'] = False
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def changeProjectTrack(request):
projectId = request.POST.get('id', '')
newTrackId = request.POST.get('track_id', '')
result = {
'isSuccessful': True,
}
try:
projectObj = project.objects.get(id=projectId)
trackObj = tracks.objects.get(id=newTrackId)
projectObj.track = trackObj
projectObj.save()
except:
result['isSuccessful'] = False
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def get_between_recommend(request):
part_twin = request.GET.get('pairs', '').split('_')
first = part_twin[0]
second = part_twin[1]
return HttpResponse(json.dumps(getBetweenMarkovRecommend(first)), content_type="application/json")
@csrf_exempt
def deleteProject(request):
request_json = json.loads(request.body)
# projectId = request.POST.get('id', '')
projectId = request_json['id']
result = {
'isSuccessful' : True,
}
try:
projectObj = project.objects.get(id=projectId)
projectObj.is_deleted = 1
projectObj.save()
except:
result['isSuccessful'] = False
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def getTrackFunctions(request):
track_id = request.GET.get('track_id')
result = {
'isSuccessful' : True
}
try:
tf_list = track_functions.objects.filter(track_id=track_id)
function_list = list()
for tf_obj in tf_list:
function_info = {
'id': tf_obj.function.id,
'name': tf_obj.function.function,
}
function_list.append(function_info)
result['functions'] = function_list
except:
result['isSuccessful'] = False
traceback.print_exc()
return HttpResponse(json.dumps(result), content_type='application/json')
@csrf_exempt
def getResultImage(request):
result = {
'isSuccessful': True,
}
chainId = request.GET.get('id')
try:
chainObj = chain.objects.get(id=chainId)
if not chainObj.isModified:
result['filepath'] = chainObj.image_file_path
else:
chainStr = chainObj.sequence
if chainStr.startswith('_'):
chainStr = chainStr[1:]
if chainStr == "" or chainStr == None:
result['isSuccessful'] = False
else:
chainName = chainObj.name
width = 80 * len(chainStr.split('_'))
height = 100
if width > 800:
width = 800
height = 100 * (len(chainStr.split('_')) / 10);
result['filepath'] = getSequenceResultImage(chainStr, width, height, chainName)
chainObj.isModified = False
chainObj.image_file_path = result['filepath']
chainObj.save()
except:
traceback.print_exc()
result['isSuccessful'] = False
return HttpResponse(json.dumps(result), content_type="application/json")
@csrf_exempt
def simulationView(request):
try:
isLoggedIn = request.session['isLoggedIn']
if isLoggedIn:
template = loader.get_template('home/simulation.html')
context = RequestContext(request, {
})
return HttpResponse(template.render(context))
else:
return HttpResponseRedirect('/')
except KeyError:
return HttpResponseRedirect('/')
@csrf_exempt
def simulate(request):
#received_json_data = request.POST
received_json_data = json.loads(request.body)
reactions = received_json_data['reactions']
martials = received_json_data['martials']
reation_time = received_json_data['reaction_time']
try:
rs = reaction_simulator(reactions, martials, reation_time)
rs.doSimulation()
result = rs.getProcess()
except:
result = {}
return HttpResponse(json.dumps(result), content_type="application/json")
| apache-2.0 |
smesdaghi/geonode | geonode/geoserver/context_processors.py | 31 | 1889 | #########################################################################
#
# Copyright (C) 2012 OpenPlans
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from django.conf import settings
from django.core.urlresolvers import reverse
from geonode.geoserver.helpers import ogc_server_settings
def geoserver_urls(request):
"""Global values to pass to templates"""
defaults = dict(
GEOSERVER_BASE_URL=ogc_server_settings.public_url,
UPLOADER_URL=reverse('data_upload') if getattr(
settings,
'UPLOADER',
dict()).get(
'BACKEND',
'geonode.rest') == 'geonode.importer' else reverse('layer_upload'),
MAPFISH_PRINT_ENABLED=ogc_server_settings.MAPFISH_PRINT_ENABLED,
PRINT_NG_ENABLED=ogc_server_settings.PRINT_NG_ENABLED,
GEONODE_SECURITY_ENABLED=ogc_server_settings.GEONODE_SECURITY_ENABLED,
GEOGIG_ENABLED=ogc_server_settings.GEOGIG_ENABLED,
TIME_ENABLED=getattr(
settings,
'UPLOADER',
dict()).get(
'OPTIONS',
dict()).get(
'TIME_ENABLED',
False),
)
return defaults
| gpl-3.0 |
wallnerryan/quantum_migrate | quantum/openstack/common/context.py | 14 | 2619 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Simple class that stores security context information in the web request.
Projects should subclass this class if they wish to enhance the request
context or provide additional information in their specific WSGI pipeline.
"""
import itertools
import uuid
def generate_request_id():
return 'req-' + str(uuid.uuid4())
class RequestContext(object):
"""
Stores information about the security context under which the user
accesses the system, as well as additional request information.
"""
def __init__(self, auth_token=None, user=None, tenant=None, is_admin=False,
read_only=False, show_deleted=False, request_id=None):
self.auth_token = auth_token
self.user = user
self.tenant = tenant
self.is_admin = is_admin
self.read_only = read_only
self.show_deleted = show_deleted
if not request_id:
request_id = generate_request_id()
self.request_id = request_id
def to_dict(self):
return {'user': self.user,
'tenant': self.tenant,
'is_admin': self.is_admin,
'read_only': self.read_only,
'show_deleted': self.show_deleted,
'auth_token': self.auth_token,
'request_id': self.request_id}
def get_admin_context(show_deleted="no"):
context = RequestContext(None,
tenant=None,
is_admin=True,
show_deleted=show_deleted)
return context
def get_context_from_function_and_args(function, args, kwargs):
"""Find an arg of type RequestContext and return it.
This is useful in a couple of decorators where we don't
know much about the function we're wrapping.
"""
for arg in itertools.chain(kwargs.values(), args):
if isinstance(arg, RequestContext):
return arg
return None
| apache-2.0 |
vrenaville/OCB | addons/association/__init__.py | 886 | 1054 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
peterb12/nand2tetris | src/JackCompiler/VMWriter.py | 1 | 1417 | from jc_types import Scope
class VMWriter:
arithmeticOperations = ["add", "sub", "and", "or", "not", "neg", "eq", "gt", "lt"]
scopeTable = { Scope.STATIC : "static",
Scope.FIELD : "dunno?",
Scope.ARG : "argument",
Scope.VAR : "local" }
def __init__(self, dirName, fname):
self.f = open(dirName + "/" + fname, "w")
# For convenience in debugging.
def _emit(self, string):
# print(string)
self.f.write(string + "\n")
def writePush(self, segment, index):
self._emit("push " + segment + " " + str(index))
def writePop(self, segment, index):
self._emit("pop " + segment + " " + str(index))
def writeArithmetic(self, command):
if (command in self.arithmeticOperations):
self._emit(command)
else:
assert False, "Internal compiler error."
def writeLabel(self, label):
self._emit("label " + label)
def writeGoto(self, label):
self._emit("goto " + label)
def writeIf(self, label):
self._emit("if-goto " + label)
def writeCall(self, name, nArgs):
self._emit("call " + name + " " + str(nArgs))
def writeFunction(self, name, nLocals):
self._emit("function " + name + " " + str(nLocals))
def writeReturn(self):
self._emit("return")
def close(self):
self.f.close()
| bsd-3-clause |
madgik/exareme | Exareme-Docker/src/exareme/exareme-tools/madis/src/lib/pyreadline/modes/notemacs.py | 1 | 24681 | # -*- coding: utf-8 -*-
# *****************************************************************************
# Copyright (C) 2003-2006 Gary Bishop.
# Copyright (C) 2006 Jorgen Stenarson. <[email protected]>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
# *****************************************************************************
import pyreadline.lineeditor.lineobj as lineobj
from pyreadline.logger import log
import basemode
class NotEmacsMode(basemode.BaseMode):
mode = "notemacs"
def __init__(self, rlobj):
super(NotEmacsMode, self).__init__(rlobj)
def __repr__(self):
return "<NotEmacsMode>"
def _readline_from_keyboard(self):
c = self.console
while 1:
self._update_line()
event = c.getkeypress()
if self.next_meta:
self.next_meta = False
control, meta, shift, code = event.keyinfo
event.keyinfo = (control, True, shift, code)
# Process exit keys. Only exit on empty line
if event.keyinfo in self.exit_dispatch:
if lineobj.EndOfLine(self.l_buffer) == 0:
raise EOFError
dispatch_func = self.key_dispatch.get(event.keyinfo, self.self_insert)
log("readline from keyboard:%s" % (event.keyinfo,))
r = None
if dispatch_func:
r = dispatch_func(event)
self.l_buffer.push_undo()
self.previous_func = dispatch_func
if r:
self._update_line()
break
def readline(self, prompt=''):
'''Try to act like GNU readline.'''
# handle startup_hook
if self.first_prompt:
self.first_prompt = False
if self.startup_hook:
try:
self.startup_hook()
except:
print 'startup hook failed'
traceback.print_exc()
c = self.console
self.l_buffer.reset_line()
self.prompt = prompt
self._print_prompt()
if self.pre_input_hook:
try:
self.pre_input_hook()
except:
print 'pre_input_hook failed'
traceback.print_exc()
self.pre_input_hook = None
log("in readline: %s" % self.paste_line_buffer)
if len(self.paste_line_buffer) > 0:
self.l_buffer = lineobj.ReadlineTextBuffer(self.paste_line_buffer[0])
self._update_line()
self.paste_line_buffer = self.paste_line_buffer[1:]
c.write('\r\n')
else:
self._readline_from_keyboard()
c.write('\r\n')
self.add_history(self.l_buffer.copy())
log('returning(%s)' % self.l_buffer.get_line_text())
return self.l_buffer.get_line_text() + '\n'
### Methods below here are bindable emacs functions
def beginning_of_line(self, e): # (C-a)
'''Move to the start of the current line. '''
self.l_buffer.beginning_of_line()
def end_of_line(self, e): # (C-e)
'''Move to the end of the line. '''
self.l_buffer.end_of_line()
def forward_char(self, e): # (C-f)
'''Move forward a character. '''
self.l_buffer.forward_char()
def backward_char(self, e): # (C-b)
'''Move back a character. '''
self.l_buffer.backward_char()
def forward_word(self, e): # (M-f)
'''Move forward to the end of the next word. Words are composed of
letters and digits.'''
self.l_buffer.forward_word()
def backward_word(self, e): # (M-b)
'''Move back to the start of the current or previous word. Words are
composed of letters and digits.'''
self.l_buffer.backward_word()
def clear_screen(self, e): # (C-l)
'''Clear the screen and redraw the current line, leaving the current
line at the top of the screen.'''
self.console.page()
def redraw_current_line(self, e): # ()
'''Refresh the current line. By default, this is unbound.'''
pass
def accept_line(self, e): # (Newline or Return)
'''Accept the line regardless of where the cursor is. If this line
is non-empty, it may be added to the history list for future recall
with add_history(). If this line is a modified history line, the
history line is restored to its original state.'''
return True
######### History commands
def previous_history(self, e): # (C-p)
'''Move back through the history list, fetching the previous command. '''
self._history.previous_history(self.l_buffer)
def next_history(self, e): # (C-n)
'''Move forward through the history list, fetching the next command. '''
self._history.next_history(self.l_buffer)
def beginning_of_history(self, e): # (M-<)
'''Move to the first line in the history.'''
self._history.beginning_of_history()
def end_of_history(self, e): # (M->)
'''Move to the end of the input history, i.e., the line currently
being entered.'''
self._history.end_of_history(self.l_buffer)
def _i_search(self, searchfun, direction, init_event):
c = self.console
line = self.get_line_buffer()
query = ''
hc_start = self._history.history_cursor # + direction
while 1:
x, y = self.prompt_end_pos
c.pos(0, y)
if direction < 0:
prompt = 'reverse-i-search'
else:
prompt = 'forward-i-search'
scroll = c.write_scrolling("%s`%s': %s" % (prompt, query, line))
self._update_prompt_pos(scroll)
self._clear_after()
event = c.getkeypress()
if event.keysym == 'BackSpace':
if len(query) > 0:
query = query[:-1]
self._history.history_cursor = hc_start
else:
self._bell()
elif event.char in string.letters + string.digits + string.punctuation + ' ':
self._history.history_cursor = hc_start
query += event.char
elif event.keyinfo == init_event.keyinfo:
self._history.history_cursor += direction
line = searchfun(query)
pass
else:
if event.keysym != 'Return':
self._bell()
break
line = searchfun(query)
px, py = self.prompt_begin_pos
c.pos(0, py)
self.l_buffer.set_line(line)
self._print_prompt()
self._history.history_cursor = len(self._history.history)
def reverse_search_history(self, e): # (C-r)
'''Search backward starting at the current line and moving up
through the history as necessary. This is an incremental search.'''
# print "HEJ"
# self.console.bell()
self._i_search(self._history.reverse_search_history, -1, e)
def forward_search_history(self, e): # (C-s)
'''Search forward starting at the current line and moving down
through the the history as necessary. This is an incremental search.'''
# print "HEJ"
# self.console.bell()
self._i_search(self._history.forward_search_history, 1, e)
def non_incremental_reverse_search_history(self, e): # (M-p)
'''Search backward starting at the current line and moving up
through the history as necessary using a non-incremental search for
a string supplied by the user.'''
self._history.non_incremental_reverse_search_history(self.l_buffer)
def non_incremental_forward_search_history(self, e): # (M-n)
'''Search forward starting at the current line and moving down
through the the history as necessary using a non-incremental search
for a string supplied by the user.'''
self._history.non_incremental_reverse_search_history(self.l_buffer)
def history_search_forward(self, e): # ()
'''Search forward through the history for the string of characters
between the start of the current line and the point. This is a
non-incremental search. By default, this command is unbound.'''
self.l_buffer = self._history.history_search_forward(self.l_buffer)
def history_search_backward(self, e): # ()
'''Search backward through the history for the string of characters
between the start of the current line and the point. This is a
non-incremental search. By default, this command is unbound.'''
self.l_buffer = self._history.history_search_backward(self.l_buffer)
def yank_nth_arg(self, e): # (M-C-y)
'''Insert the first argument to the previous command (usually the
second word on the previous line) at point. With an argument n,
insert the nth word from the previous command (the words in the
previous command begin with word 0). A negative argument inserts the
nth word from the end of the previous command.'''
pass
def yank_last_arg(self, e): # (M-. or M-_)
'''Insert last argument to the previous command (the last word of
the previous history entry). With an argument, behave exactly like
yank-nth-arg. Successive calls to yank-last-arg move back through
the history list, inserting the last argument of each line in turn.'''
pass
def delete_char(self, e): # (C-d)
'''Delete the character at point. If point is at the beginning of
the line, there are no characters in the line, and the last
character typed was not bound to delete-char, then return EOF.'''
self.l_buffer.delete_char()
def backward_delete_char(self, e): # (Rubout)
'''Delete the character behind the cursor. A numeric argument means
to kill the characters instead of deleting them.'''
self.l_buffer.backward_delete_char()
def forward_backward_delete_char(self, e): # ()
'''Delete the character under the cursor, unless the cursor is at
the end of the line, in which case the character behind the cursor
is deleted. By default, this is not bound to a key.'''
pass
def quoted_insert(self, e): # (C-q or C-v)
'''Add the next character typed to the line verbatim. This is how to
insert key sequences like C-q, for example.'''
e = self.console.getkeypress()
self.insert_text(e.char)
def tab_insert(self, e): # (M-TAB)
'''Insert a tab character. '''
ws = ' ' * (self.tabstop - (self.line_cursor % self.tabstop))
self.insert_text(ws)
def self_insert(self, e): # (a, b, A, 1, !, ...)
'''Insert yourself. '''
if ord(e.char) != 0: # don't insert null character in buffer, can happen with dead keys.
self.insert_text(e.char)
def transpose_chars(self, e): # (C-t)
'''Drag the character before the cursor forward over the character
at the cursor, moving the cursor forward as well. If the insertion
point is at the end of the line, then this transposes the last two
characters of the line. Negative arguments have no effect.'''
self.l_buffer.transpose_chars()
def transpose_words(self, e): # (M-t)
'''Drag the word before point past the word after point, moving
point past that word as well. If the insertion point is at the end
of the line, this transposes the last two words on the line.'''
self.l_buffer.transpose_words()
def upcase_word(self, e): # (M-u)
'''Uppercase the current (or following) word. With a negative
argument, uppercase the previous word, but do not move the cursor.'''
self.l_buffer.upcase_word()
def downcase_word(self, e): # (M-l)
'''Lowercase the current (or following) word. With a negative
argument, lowercase the previous word, but do not move the cursor.'''
self.l_buffer.downcase_word()
def capitalize_word(self, e): # (M-c)
'''Capitalize the current (or following) word. With a negative
argument, capitalize the previous word, but do not move the cursor.'''
self.l_buffer.capitalize_word()
def overwrite_mode(self, e): # ()
'''Toggle overwrite mode. With an explicit positive numeric
argument, switches to overwrite mode. With an explicit non-positive
numeric argument, switches to insert mode. This command affects only
emacs mode; vi mode does overwrite differently. Each call to
readline() starts in insert mode. In overwrite mode, characters
bound to self-insert replace the text at point rather than pushing
the text to the right. Characters bound to backward-delete-char
replace the character before point with a space.'''
pass
def kill_line(self, e): # (C-k)
'''Kill the text from point to the end of the line. '''
self.l_buffer.kill_line()
def backward_kill_line(self, e): # (C-x Rubout)
'''Kill backward to the beginning of the line. '''
self.l_buffer.backward_kill_line()
def unix_line_discard(self, e): # (C-u)
'''Kill backward from the cursor to the beginning of the current line. '''
# how is this different from backward_kill_line?
self.l_buffer.unix_line_discard()
def kill_whole_line(self, e): # ()
'''Kill all characters on the current line, no matter where point
is. By default, this is unbound.'''
self.l_buffer.kill_whole_line()
def kill_word(self, e): # (M-d)
'''Kill from point to the end of the current word, or if between
words, to the end of the next word. Word boundaries are the same as
forward-word.'''
self.l_buffer.kill_word()
def backward_kill_word(self, e): # (M-DEL)
'''Kill the word behind point. Word boundaries are the same as
backward-word. '''
self.l_buffer.backward_kill_word()
def unix_word_rubout(self, e): # (C-w)
'''Kill the word behind point, using white space as a word
boundary. The killed text is saved on the kill-ring.'''
self.l_buffer.unix_word_rubout()
def delete_horizontal_space(self, e): # ()
'''Delete all spaces and tabs around point. By default, this is unbound. '''
pass
def kill_region(self, e): # ()
'''Kill the text in the current region. By default, this command is unbound. '''
pass
def copy_region_as_kill(self, e): # ()
'''Copy the text in the region to the kill buffer, so it can be
yanked right away. By default, this command is unbound.'''
pass
def copy_region_to_clipboard(self, e): # ()
'''Copy the text in the region to the windows clipboard.'''
if self.enable_win32_clipboard:
mark = min(self.l_buffer.mark, len(self.l_buffer.line_buffer))
cursor = min(self.l_buffer.point, len(self.l_buffer.line_buffer))
if self.l_buffer.mark == -1:
return
begin = min(cursor, mark)
end = max(cursor, mark)
toclipboard = "".join(self.l_buffer.line_buffer[begin:end])
clipboard.SetClipboardText(str(toclipboard))
def copy_backward_word(self, e): # ()
'''Copy the word before point to the kill buffer. The word
boundaries are the same as backward-word. By default, this command
is unbound.'''
pass
def copy_forward_word(self, e): # ()
'''Copy the word following point to the kill buffer. The word
boundaries are the same as forward-word. By default, this command is
unbound.'''
pass
def paste(self, e):
'''Paste windows clipboard'''
if self.enable_win32_clipboard:
txt = clipboard.get_clipboard_text_and_convert(False)
self.insert_text(txt)
def paste_mulitline_code(self, e):
'''Paste windows clipboard'''
reg = re.compile("\r?\n")
if self.enable_win32_clipboard:
txt = clipboard.get_clipboard_text_and_convert(False)
t = reg.split(txt)
t = [row for row in t if row.strip() != ""] # remove empty lines
if t != [""]:
self.insert_text(t[0])
self.add_history(self.l_buffer.copy())
self.paste_line_buffer = t[1:]
log("multi: %s" % self.paste_line_buffer)
return True
else:
return False
def ipython_paste(self, e):
'''Paste windows clipboard. If enable_ipython_paste_list_of_lists is
True then try to convert tabseparated data to repr of list of lists or
repr of array'''
if self.enable_win32_clipboard:
txt = clipboard.get_clipboard_text_and_convert(
self.enable_ipython_paste_list_of_lists)
if self.enable_ipython_paste_for_paths:
if len(txt) < 300 and ("\t" not in txt) and ("\n" not in txt):
txt = txt.replace("\\", "/").replace(" ", r"\ ")
self.insert_text(txt)
def yank(self, e): # (C-y)
'''Yank the top of the kill ring into the buffer at point. '''
pass
def yank_pop(self, e): # (M-y)
'''Rotate the kill-ring, and yank the new top. You can only do this
if the prior command is yank or yank-pop.'''
pass
def digit_argument(self, e): # (M-0, M-1, ... M--)
'''Add this digit to the argument already accumulating, or start a
new argument. M-- starts a negative argument.'''
pass
def universal_argument(self, e): # ()
'''This is another way to specify an argument. If this command is
followed by one or more digits, optionally with a leading minus
sign, those digits define the argument. If the command is followed
by digits, executing universal-argument again ends the numeric
argument, but is otherwise ignored. As a special case, if this
command is immediately followed by a character that is neither a
digit or minus sign, the argument count for the next command is
multiplied by four. The argument count is initially one, so
executing this function the first time makes the argument count
four, a second time makes the argument count sixteen, and so on. By
default, this is not bound to a key.'''
pass
def delete_char_or_list(self, e): # ()
'''Deletes the character under the cursor if not at the beginning or
end of the line (like delete-char). If at the end of the line,
behaves identically to possible-completions. This command is unbound
by default.'''
pass
def start_kbd_macro(self, e): # (C-x ()
'''Begin saving the characters typed into the current keyboard macro. '''
pass
def end_kbd_macro(self, e): # (C-x ))
'''Stop saving the characters typed into the current keyboard macro
and save the definition.'''
pass
def call_last_kbd_macro(self, e): # (C-x e)
'''Re-execute the last keyboard macro defined, by making the
characters in the macro appear as if typed at the keyboard.'''
pass
def re_read_init_file(self, e): # (C-x C-r)
'''Read in the contents of the inputrc file, and incorporate any
bindings or variable assignments found there.'''
pass
def abort(self, e): # (C-g)
'''Abort the current editing command and ring the terminals bell
(subject to the setting of bell-style).'''
self._bell()
def do_uppercase_version(self, e): # (M-a, M-b, M-x, ...)
'''If the metafied character x is lowercase, run the command that is
bound to the corresponding uppercase character.'''
pass
def prefix_meta(self, e): # (ESC)
'''Metafy the next character typed. This is for keyboards without a
meta key. Typing ESC f is equivalent to typing M-f. '''
self.next_meta = True
def undo(self, e): # (C-_ or C-x C-u)
'''Incremental undo, separately remembered for each line.'''
self.l_buffer.pop_undo()
def revert_line(self, e): # (M-r)
'''Undo all changes made to this line. This is like executing the
undo command enough times to get back to the beginning.'''
pass
def tilde_expand(self, e): # (M-~)
'''Perform tilde expansion on the current word.'''
pass
def set_mark(self, e): # (C-@)
'''Set the mark to the point. If a numeric argument is supplied, the
mark is set to that position.'''
self.l_buffer.set_mark()
def exchange_point_and_mark(self, e): # (C-x C-x)
'''Swap the point with the mark. The current cursor position is set
to the saved position, and the old cursor position is saved as the
mark.'''
pass
def character_search(self, e): # (C-])
'''A character is read and point is moved to the next occurrence of
that character. A negative count searches for previous occurrences.'''
pass
def character_search_backward(self, e): # (M-C-])
'''A character is read and point is moved to the previous occurrence
of that character. A negative count searches for subsequent
occurrences.'''
pass
def insert_comment(self, e): # (M-#)
'''Without a numeric argument, the value of the comment-begin
variable is inserted at the beginning of the current line. If a
numeric argument is supplied, this command acts as a toggle: if the
characters at the beginning of the line do not match the value of
comment-begin, the value is inserted, otherwise the characters in
comment-begin are deleted from the beginning of the line. In either
case, the line is accepted as if a newline had been typed.'''
pass
def dump_functions(self, e): # ()
'''Print all of the functions and their key bindings to the Readline
output stream. If a numeric argument is supplied, the output is
formatted in such a way that it can be made part of an inputrc
file. This command is unbound by default.'''
pass
def dump_variables(self, e): # ()
'''Print all of the settable variables and their values to the
Readline output stream. If a numeric argument is supplied, the
output is formatted in such a way that it can be made part of an
inputrc file. This command is unbound by default.'''
pass
def dump_macros(self, e): # ()
'''Print all of the Readline key sequences bound to macros and the
strings they output. If a numeric argument is supplied, the output
is formatted in such a way that it can be made part of an inputrc
file. This command is unbound by default.'''
pass
# Create key bindings:
def init_editing_mode(self, e): # (C-e)
'''When in vi command mode, this causes a switch to emacs editing
mode.'''
self._bind_exit_key('Control-d')
self._bind_exit_key('Control-z')
# I often accidentally hold the shift or control while typing space
self._bind_key('Shift-space', self.self_insert)
self._bind_key('Control-space', self.self_insert)
self._bind_key('Return', self.accept_line)
self._bind_key('Left', self.backward_char)
self._bind_key('Control-b', self.backward_char)
self._bind_key('Right', self.forward_char)
self._bind_key('Control-f', self.forward_char)
self._bind_key('BackSpace', self.backward_delete_char)
self._bind_key('Home', self.beginning_of_line)
self._bind_key('End', self.end_of_line)
self._bind_key('Delete', self.delete_char)
self._bind_key('Control-d', self.delete_char)
self._bind_key('Clear', self.clear_screen)
# make it case insensitive
def commonprefix(m):
"Given a list of pathnames, returns the longest common leading component"
if not m: return ''
prefix = m[0]
for item in m:
for i in range(len(prefix)):
if prefix[:i + 1].lower() != item[:i + 1].lower():
prefix = prefix[:i]
if i == 0: return ''
break
return prefix
| mit |
tangyiyong/odoo | addons/document/std_index.py | 430 | 7457 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from content_index import indexer, cntIndex
from subprocess import Popen, PIPE
import StringIO
import odt2txt
import sys, zipfile, xml.dom.minidom
import logging
_logger = logging.getLogger(__name__)
def _to_unicode(s):
try:
return s.decode('utf-8')
except UnicodeError:
try:
return s.decode('latin')
except UnicodeError:
try:
return s.encode('ascii')
except UnicodeError:
return s
def textToString(element):
buffer = u""
for node in element.childNodes :
if node.nodeType == xml.dom.Node.TEXT_NODE :
buffer += node.nodeValue
elif node.nodeType == xml.dom.Node.ELEMENT_NODE :
buffer += textToString(node)
return buffer
class TxtIndex(indexer):
def _getMimeTypes(self):
return ['text/plain','text/html','text/diff','text/xml', 'text/*',
'application/xml']
def _getExtensions(self):
return ['.txt', '.py']
def _doIndexContent(self, content):
return content
cntIndex.register(TxtIndex())
class PptxIndex(indexer):
def _getMimeTypes(self):
return [ 'application/vnd.openxmlformats-officedocument.presentationml.presentation']
def _getExtensions(self):
return ['.pptx']
def _doIndexFile(self, fname):
def toString () :
""" Converts the document to a string. """
buffer = u""
for val in ["a:t"]:
for paragraph in content.getElementsByTagName(val) :
buffer += textToString(paragraph) + "\n"
return buffer
data = []
zip = zipfile.ZipFile(fname)
files = filter(lambda x: x.startswith('ppt/slides/slide'), zip.namelist())
for i in range(1, len(files) + 1):
content = xml.dom.minidom.parseString(zip.read('ppt/slides/slide%s.xml' % str(i)))
res = toString().encode('ascii','replace')
data.append(res)
return _to_unicode('\n'.join(data))
cntIndex.register(PptxIndex())
class DocIndex(indexer):
def _getMimeTypes(self):
return [ 'application/ms-word']
def _getExtensions(self):
return ['.doc']
def _doIndexFile(self, fname):
try:
pop = Popen(['antiword', fname], shell=False, stdout=PIPE)
(data, _) = pop.communicate()
return _to_unicode(data)
except OSError:
_logger.warning("Failed attempt to execute antiword (MS Word reader). Antiword is necessary to index the file %s of MIME type %s. Detailed error available at DEBUG level.", fname, self._getMimeTypes()[0])
_logger.debug("Trace of the failed file indexing attempt.", exc_info=True)
return u''
cntIndex.register(DocIndex())
class DocxIndex(indexer):
def _getMimeTypes(self):
return [ 'application/vnd.openxmlformats-officedocument.wordprocessingml.document']
def _getExtensions(self):
return ['.docx']
def _doIndexFile(self, fname):
zip = zipfile.ZipFile(fname)
content = xml.dom.minidom.parseString(zip.read("word/document.xml"))
def toString () :
""" Converts the document to a string. """
buffer = u""
for val in ["w:p", "w:h", "text:list"]:
for paragraph in content.getElementsByTagName(val) :
buffer += textToString(paragraph) + "\n"
return buffer
res = toString().encode('ascii','replace')
return _to_unicode(res)
cntIndex.register(DocxIndex())
class XlsxIndex(indexer):
def _getMimeTypes(self):
return [ 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet']
def _getExtensions(self):
return ['.xlsx']
def _doIndexFile(self, fname):
zip = zipfile.ZipFile(fname)
content = xml.dom.minidom.parseString(zip.read("xl/sharedStrings.xml"))
def toString () :
""" Converts the document to a string. """
buffer = u""
for val in ["t"]:
for paragraph in content.getElementsByTagName(val) :
buffer += textToString(paragraph) + "\n"
return buffer
res = toString().encode('ascii','replace')
return _to_unicode(res)
cntIndex.register(XlsxIndex())
class PdfIndex(indexer):
def _getMimeTypes(self):
return [ 'application/pdf']
def _getExtensions(self):
return ['.pdf']
def _doIndexFile(self, fname):
try:
pop = Popen(['pdftotext', '-enc', 'UTF-8', '-nopgbrk', fname, '-'], shell=False, stdout=PIPE)
(data, _) = pop.communicate()
return _to_unicode(data)
except OSError:
_logger.warning("Failed attempt to execute pdftotext. This program is necessary to index the file %s of MIME type %s. Detailed error available at DEBUG level.", fname, self._getMimeTypes()[0])
_logger.debug("Trace of the failed file indexing attempt.", exc_info=True)
return u''
cntIndex.register(PdfIndex())
class ImageNoIndex(indexer):
def _getMimeTypes(self):
return [ 'image/*']
def _getExtensions(self):
#better return no extension, and let 'file' do its magic
return []
#return ['.png','.jpg','.gif','.jpeg','.bmp','.tiff']
def _doIndexContent(self, content):
return 'image'
cntIndex.register(ImageNoIndex())
# other opendocument formats:
# chart-template chart database
# formula-template formula graphics-template graphics
# image
# presentation-template presentation spreadsheet-template spreadsheet
class OpenDoc(indexer):
""" Index OpenDocument files.
Q: is it really worth it to index spreadsheets, or do we only get a
meaningless list of numbers (cell contents) ?
"""
def _getMimeTypes(self):
otypes = [ 'text', 'text-web', 'text-template', 'text-master' ]
return map(lambda a: 'application/vnd.oasis.opendocument.'+a, otypes)
def _getExtensions(self):
return ['.odt', '.ott', ] # '.ods'
def _doIndexContent(self, content):
s = StringIO.StringIO(content)
o = odt2txt.OpenDocumentTextFile(s)
result = _to_unicode(o.toString())
s.close()
return result
cntIndex.register(OpenDoc())
#eof
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
c0defreak/python-for-android | python3-alpha/extra_modules/gdata/Crypto/Util/RFC1751.py | 43 | 20130 | #!/usr/bin/python
# rfc1751.py : Converts between 128-bit strings and a human-readable
# sequence of words, as defined in RFC1751: "A Convention for
# Human-Readable 128-bit Keys", by Daniel L. McDonald.
__revision__ = "$Id: RFC1751.py,v 1.6 2003/04/04 15:15:10 akuchling Exp $"
import string, binascii
from functools import reduce
binary={0:'0000', 1:'0001', 2:'0010', 3:'0011', 4:'0100', 5:'0101',
6:'0110', 7:'0111', 8:'1000', 9:'1001', 10:'1010', 11:'1011',
12:'1100', 13:'1101', 14:'1110', 15:'1111'}
def _key2bin(s):
"Convert a key into a string of binary digits"
kl=[ord(x) for x in s]
kl=[binary[x/16]+binary[x&15] for x in kl]
return ''.join(kl)
def _extract(key, start, length):
"""Extract a bitstring from a string of binary digits, and return its
numeric value."""
k=key[start:start+length]
return reduce(lambda x,y: x*2+ord(y)-48, k, 0)
def key_to_english (key):
"""key_to_english(key:string) : string
Transform an arbitrary key into a string containing English words.
The key length must be a multiple of 8.
"""
english=''
for index in range(0, len(key), 8): # Loop over 8-byte subkeys
subkey=key[index:index+8]
# Compute the parity of the key
skbin=_key2bin(subkey) ; p=0
for i in range(0, 64, 2): p=p+_extract(skbin, i, 2)
# Append parity bits to the subkey
skbin=_key2bin(subkey+chr((p<<6) & 255))
for i in range(0, 64, 11):
english=english+wordlist[_extract(skbin, i, 11)]+' '
return english[:-1] # Remove the trailing space
def english_to_key (str):
"""english_to_key(string):string
Transform a string into a corresponding key.
The string must contain words separated by whitespace; the number
of words must be a multiple of 6.
"""
L=string.split(string.upper(str)) ; key=''
for index in range(0, len(L), 6):
sublist=L[index:index+6] ; char=9*[0] ; bits=0
for i in sublist:
index = wordlist.index(i)
shift = (8-(bits+11)%8) %8
y = index << shift
cl, cc, cr = (y>>16), (y>>8)&0xff, y & 0xff
if (shift>5):
char[bits/8] = char[bits/8] | cl
char[bits/8+1] = char[bits/8+1] | cc
char[bits/8+2] = char[bits/8+2] | cr
elif shift>-3:
char[bits/8] = char[bits/8] | cc
char[bits/8+1] = char[bits/8+1] | cr
else: char[bits/8] = char[bits/8] | cr
bits=bits+11
subkey=reduce(lambda x,y:x+chr(y), char, '')
# Check the parity of the resulting key
skbin=_key2bin(subkey)
p=0
for i in range(0, 64, 2): p=p+_extract(skbin, i, 2)
if (p&3) != _extract(skbin, 64, 2):
raise ValueError("Parity error in resulting key")
key=key+subkey[0:8]
return key
wordlist=[ "A", "ABE", "ACE", "ACT", "AD", "ADA", "ADD",
"AGO", "AID", "AIM", "AIR", "ALL", "ALP", "AM", "AMY", "AN", "ANA",
"AND", "ANN", "ANT", "ANY", "APE", "APS", "APT", "ARC", "ARE", "ARK",
"ARM", "ART", "AS", "ASH", "ASK", "AT", "ATE", "AUG", "AUK", "AVE",
"AWE", "AWK", "AWL", "AWN", "AX", "AYE", "BAD", "BAG", "BAH", "BAM",
"BAN", "BAR", "BAT", "BAY", "BE", "BED", "BEE", "BEG", "BEN", "BET",
"BEY", "BIB", "BID", "BIG", "BIN", "BIT", "BOB", "BOG", "BON", "BOO",
"BOP", "BOW", "BOY", "BUB", "BUD", "BUG", "BUM", "BUN", "BUS", "BUT",
"BUY", "BY", "BYE", "CAB", "CAL", "CAM", "CAN", "CAP", "CAR", "CAT",
"CAW", "COD", "COG", "COL", "CON", "COO", "COP", "COT", "COW", "COY",
"CRY", "CUB", "CUE", "CUP", "CUR", "CUT", "DAB", "DAD", "DAM", "DAN",
"DAR", "DAY", "DEE", "DEL", "DEN", "DES", "DEW", "DID", "DIE", "DIG",
"DIN", "DIP", "DO", "DOE", "DOG", "DON", "DOT", "DOW", "DRY", "DUB",
"DUD", "DUE", "DUG", "DUN", "EAR", "EAT", "ED", "EEL", "EGG", "EGO",
"ELI", "ELK", "ELM", "ELY", "EM", "END", "EST", "ETC", "EVA", "EVE",
"EWE", "EYE", "FAD", "FAN", "FAR", "FAT", "FAY", "FED", "FEE", "FEW",
"FIB", "FIG", "FIN", "FIR", "FIT", "FLO", "FLY", "FOE", "FOG", "FOR",
"FRY", "FUM", "FUN", "FUR", "GAB", "GAD", "GAG", "GAL", "GAM", "GAP",
"GAS", "GAY", "GEE", "GEL", "GEM", "GET", "GIG", "GIL", "GIN", "GO",
"GOT", "GUM", "GUN", "GUS", "GUT", "GUY", "GYM", "GYP", "HA", "HAD",
"HAL", "HAM", "HAN", "HAP", "HAS", "HAT", "HAW", "HAY", "HE", "HEM",
"HEN", "HER", "HEW", "HEY", "HI", "HID", "HIM", "HIP", "HIS", "HIT",
"HO", "HOB", "HOC", "HOE", "HOG", "HOP", "HOT", "HOW", "HUB", "HUE",
"HUG", "HUH", "HUM", "HUT", "I", "ICY", "IDA", "IF", "IKE", "ILL",
"INK", "INN", "IO", "ION", "IQ", "IRA", "IRE", "IRK", "IS", "IT",
"ITS", "IVY", "JAB", "JAG", "JAM", "JAN", "JAR", "JAW", "JAY", "JET",
"JIG", "JIM", "JO", "JOB", "JOE", "JOG", "JOT", "JOY", "JUG", "JUT",
"KAY", "KEG", "KEN", "KEY", "KID", "KIM", "KIN", "KIT", "LA", "LAB",
"LAC", "LAD", "LAG", "LAM", "LAP", "LAW", "LAY", "LEA", "LED", "LEE",
"LEG", "LEN", "LEO", "LET", "LEW", "LID", "LIE", "LIN", "LIP", "LIT",
"LO", "LOB", "LOG", "LOP", "LOS", "LOT", "LOU", "LOW", "LOY", "LUG",
"LYE", "MA", "MAC", "MAD", "MAE", "MAN", "MAO", "MAP", "MAT", "MAW",
"MAY", "ME", "MEG", "MEL", "MEN", "MET", "MEW", "MID", "MIN", "MIT",
"MOB", "MOD", "MOE", "MOO", "MOP", "MOS", "MOT", "MOW", "MUD", "MUG",
"MUM", "MY", "NAB", "NAG", "NAN", "NAP", "NAT", "NAY", "NE", "NED",
"NEE", "NET", "NEW", "NIB", "NIL", "NIP", "NIT", "NO", "NOB", "NOD",
"NON", "NOR", "NOT", "NOV", "NOW", "NU", "NUN", "NUT", "O", "OAF",
"OAK", "OAR", "OAT", "ODD", "ODE", "OF", "OFF", "OFT", "OH", "OIL",
"OK", "OLD", "ON", "ONE", "OR", "ORB", "ORE", "ORR", "OS", "OTT",
"OUR", "OUT", "OVA", "OW", "OWE", "OWL", "OWN", "OX", "PA", "PAD",
"PAL", "PAM", "PAN", "PAP", "PAR", "PAT", "PAW", "PAY", "PEA", "PEG",
"PEN", "PEP", "PER", "PET", "PEW", "PHI", "PI", "PIE", "PIN", "PIT",
"PLY", "PO", "POD", "POE", "POP", "POT", "POW", "PRO", "PRY", "PUB",
"PUG", "PUN", "PUP", "PUT", "QUO", "RAG", "RAM", "RAN", "RAP", "RAT",
"RAW", "RAY", "REB", "RED", "REP", "RET", "RIB", "RID", "RIG", "RIM",
"RIO", "RIP", "ROB", "ROD", "ROE", "RON", "ROT", "ROW", "ROY", "RUB",
"RUE", "RUG", "RUM", "RUN", "RYE", "SAC", "SAD", "SAG", "SAL", "SAM",
"SAN", "SAP", "SAT", "SAW", "SAY", "SEA", "SEC", "SEE", "SEN", "SET",
"SEW", "SHE", "SHY", "SIN", "SIP", "SIR", "SIS", "SIT", "SKI", "SKY",
"SLY", "SO", "SOB", "SOD", "SON", "SOP", "SOW", "SOY", "SPA", "SPY",
"SUB", "SUD", "SUE", "SUM", "SUN", "SUP", "TAB", "TAD", "TAG", "TAN",
"TAP", "TAR", "TEA", "TED", "TEE", "TEN", "THE", "THY", "TIC", "TIE",
"TIM", "TIN", "TIP", "TO", "TOE", "TOG", "TOM", "TON", "TOO", "TOP",
"TOW", "TOY", "TRY", "TUB", "TUG", "TUM", "TUN", "TWO", "UN", "UP",
"US", "USE", "VAN", "VAT", "VET", "VIE", "WAD", "WAG", "WAR", "WAS",
"WAY", "WE", "WEB", "WED", "WEE", "WET", "WHO", "WHY", "WIN", "WIT",
"WOK", "WON", "WOO", "WOW", "WRY", "WU", "YAM", "YAP", "YAW", "YE",
"YEA", "YES", "YET", "YOU", "ABED", "ABEL", "ABET", "ABLE", "ABUT",
"ACHE", "ACID", "ACME", "ACRE", "ACTA", "ACTS", "ADAM", "ADDS",
"ADEN", "AFAR", "AFRO", "AGEE", "AHEM", "AHOY", "AIDA", "AIDE",
"AIDS", "AIRY", "AJAR", "AKIN", "ALAN", "ALEC", "ALGA", "ALIA",
"ALLY", "ALMA", "ALOE", "ALSO", "ALTO", "ALUM", "ALVA", "AMEN",
"AMES", "AMID", "AMMO", "AMOK", "AMOS", "AMRA", "ANDY", "ANEW",
"ANNA", "ANNE", "ANTE", "ANTI", "AQUA", "ARAB", "ARCH", "AREA",
"ARGO", "ARID", "ARMY", "ARTS", "ARTY", "ASIA", "ASKS", "ATOM",
"AUNT", "AURA", "AUTO", "AVER", "AVID", "AVIS", "AVON", "AVOW",
"AWAY", "AWRY", "BABE", "BABY", "BACH", "BACK", "BADE", "BAIL",
"BAIT", "BAKE", "BALD", "BALE", "BALI", "BALK", "BALL", "BALM",
"BAND", "BANE", "BANG", "BANK", "BARB", "BARD", "BARE", "BARK",
"BARN", "BARR", "BASE", "BASH", "BASK", "BASS", "BATE", "BATH",
"BAWD", "BAWL", "BEAD", "BEAK", "BEAM", "BEAN", "BEAR", "BEAT",
"BEAU", "BECK", "BEEF", "BEEN", "BEER",
"BEET", "BELA", "BELL", "BELT", "BEND", "BENT", "BERG", "BERN",
"BERT", "BESS", "BEST", "BETA", "BETH", "BHOY", "BIAS", "BIDE",
"BIEN", "BILE", "BILK", "BILL", "BIND", "BING", "BIRD", "BITE",
"BITS", "BLAB", "BLAT", "BLED", "BLEW", "BLOB", "BLOC", "BLOT",
"BLOW", "BLUE", "BLUM", "BLUR", "BOAR", "BOAT", "BOCA", "BOCK",
"BODE", "BODY", "BOGY", "BOHR", "BOIL", "BOLD", "BOLO", "BOLT",
"BOMB", "BONA", "BOND", "BONE", "BONG", "BONN", "BONY", "BOOK",
"BOOM", "BOON", "BOOT", "BORE", "BORG", "BORN", "BOSE", "BOSS",
"BOTH", "BOUT", "BOWL", "BOYD", "BRAD", "BRAE", "BRAG", "BRAN",
"BRAY", "BRED", "BREW", "BRIG", "BRIM", "BROW", "BUCK", "BUDD",
"BUFF", "BULB", "BULK", "BULL", "BUNK", "BUNT", "BUOY", "BURG",
"BURL", "BURN", "BURR", "BURT", "BURY", "BUSH", "BUSS", "BUST",
"BUSY", "BYTE", "CADY", "CAFE", "CAGE", "CAIN", "CAKE", "CALF",
"CALL", "CALM", "CAME", "CANE", "CANT", "CARD", "CARE", "CARL",
"CARR", "CART", "CASE", "CASH", "CASK", "CAST", "CAVE", "CEIL",
"CELL", "CENT", "CERN", "CHAD", "CHAR", "CHAT", "CHAW", "CHEF",
"CHEN", "CHEW", "CHIC", "CHIN", "CHOU", "CHOW", "CHUB", "CHUG",
"CHUM", "CITE", "CITY", "CLAD", "CLAM", "CLAN", "CLAW", "CLAY",
"CLOD", "CLOG", "CLOT", "CLUB", "CLUE", "COAL", "COAT", "COCA",
"COCK", "COCO", "CODA", "CODE", "CODY", "COED", "COIL", "COIN",
"COKE", "COLA", "COLD", "COLT", "COMA", "COMB", "COME", "COOK",
"COOL", "COON", "COOT", "CORD", "CORE", "CORK", "CORN", "COST",
"COVE", "COWL", "CRAB", "CRAG", "CRAM", "CRAY", "CREW", "CRIB",
"CROW", "CRUD", "CUBA", "CUBE", "CUFF", "CULL", "CULT", "CUNY",
"CURB", "CURD", "CURE", "CURL", "CURT", "CUTS", "DADE", "DALE",
"DAME", "DANA", "DANE", "DANG", "DANK", "DARE", "DARK", "DARN",
"DART", "DASH", "DATA", "DATE", "DAVE", "DAVY", "DAWN", "DAYS",
"DEAD", "DEAF", "DEAL", "DEAN", "DEAR", "DEBT", "DECK", "DEED",
"DEEM", "DEER", "DEFT", "DEFY", "DELL", "DENT", "DENY", "DESK",
"DIAL", "DICE", "DIED", "DIET", "DIME", "DINE", "DING", "DINT",
"DIRE", "DIRT", "DISC", "DISH", "DISK", "DIVE", "DOCK", "DOES",
"DOLE", "DOLL", "DOLT", "DOME", "DONE", "DOOM", "DOOR", "DORA",
"DOSE", "DOTE", "DOUG", "DOUR", "DOVE", "DOWN", "DRAB", "DRAG",
"DRAM", "DRAW", "DREW", "DRUB", "DRUG", "DRUM", "DUAL", "DUCK",
"DUCT", "DUEL", "DUET", "DUKE", "DULL", "DUMB", "DUNE", "DUNK",
"DUSK", "DUST", "DUTY", "EACH", "EARL", "EARN", "EASE", "EAST",
"EASY", "EBEN", "ECHO", "EDDY", "EDEN", "EDGE", "EDGY", "EDIT",
"EDNA", "EGAN", "ELAN", "ELBA", "ELLA", "ELSE", "EMIL", "EMIT",
"EMMA", "ENDS", "ERIC", "EROS", "EVEN", "EVER", "EVIL", "EYED",
"FACE", "FACT", "FADE", "FAIL", "FAIN", "FAIR", "FAKE", "FALL",
"FAME", "FANG", "FARM", "FAST", "FATE", "FAWN", "FEAR", "FEAT",
"FEED", "FEEL", "FEET", "FELL", "FELT", "FEND", "FERN", "FEST",
"FEUD", "FIEF", "FIGS", "FILE", "FILL", "FILM", "FIND", "FINE",
"FINK", "FIRE", "FIRM", "FISH", "FISK", "FIST", "FITS", "FIVE",
"FLAG", "FLAK", "FLAM", "FLAT", "FLAW", "FLEA", "FLED", "FLEW",
"FLIT", "FLOC", "FLOG", "FLOW", "FLUB", "FLUE", "FOAL", "FOAM",
"FOGY", "FOIL", "FOLD", "FOLK", "FOND", "FONT", "FOOD", "FOOL",
"FOOT", "FORD", "FORE", "FORK", "FORM", "FORT", "FOSS", "FOUL",
"FOUR", "FOWL", "FRAU", "FRAY", "FRED", "FREE", "FRET", "FREY",
"FROG", "FROM", "FUEL", "FULL", "FUME", "FUND", "FUNK", "FURY",
"FUSE", "FUSS", "GAFF", "GAGE", "GAIL", "GAIN", "GAIT", "GALA",
"GALE", "GALL", "GALT", "GAME", "GANG", "GARB", "GARY", "GASH",
"GATE", "GAUL", "GAUR", "GAVE", "GAWK", "GEAR", "GELD", "GENE",
"GENT", "GERM", "GETS", "GIBE", "GIFT", "GILD", "GILL", "GILT",
"GINA", "GIRD", "GIRL", "GIST", "GIVE", "GLAD", "GLEE", "GLEN",
"GLIB", "GLOB", "GLOM", "GLOW", "GLUE", "GLUM", "GLUT", "GOAD",
"GOAL", "GOAT", "GOER", "GOES", "GOLD", "GOLF", "GONE", "GONG",
"GOOD", "GOOF", "GORE", "GORY", "GOSH", "GOUT", "GOWN", "GRAB",
"GRAD", "GRAY", "GREG", "GREW", "GREY", "GRID", "GRIM", "GRIN",
"GRIT", "GROW", "GRUB", "GULF", "GULL", "GUNK", "GURU", "GUSH",
"GUST", "GWEN", "GWYN", "HAAG", "HAAS", "HACK", "HAIL", "HAIR",
"HALE", "HALF", "HALL", "HALO", "HALT", "HAND", "HANG", "HANK",
"HANS", "HARD", "HARK", "HARM", "HART", "HASH", "HAST", "HATE",
"HATH", "HAUL", "HAVE", "HAWK", "HAYS", "HEAD", "HEAL", "HEAR",
"HEAT", "HEBE", "HECK", "HEED", "HEEL", "HEFT", "HELD", "HELL",
"HELM", "HERB", "HERD", "HERE", "HERO", "HERS", "HESS", "HEWN",
"HICK", "HIDE", "HIGH", "HIKE", "HILL", "HILT", "HIND", "HINT",
"HIRE", "HISS", "HIVE", "HOBO", "HOCK", "HOFF", "HOLD", "HOLE",
"HOLM", "HOLT", "HOME", "HONE", "HONK", "HOOD", "HOOF", "HOOK",
"HOOT", "HORN", "HOSE", "HOST", "HOUR", "HOVE", "HOWE", "HOWL",
"HOYT", "HUCK", "HUED", "HUFF", "HUGE", "HUGH", "HUGO", "HULK",
"HULL", "HUNK", "HUNT", "HURD", "HURL", "HURT", "HUSH", "HYDE",
"HYMN", "IBIS", "ICON", "IDEA", "IDLE", "IFFY", "INCA", "INCH",
"INTO", "IONS", "IOTA", "IOWA", "IRIS", "IRMA", "IRON", "ISLE",
"ITCH", "ITEM", "IVAN", "JACK", "JADE", "JAIL", "JAKE", "JANE",
"JAVA", "JEAN", "JEFF", "JERK", "JESS", "JEST", "JIBE", "JILL",
"JILT", "JIVE", "JOAN", "JOBS", "JOCK", "JOEL", "JOEY", "JOHN",
"JOIN", "JOKE", "JOLT", "JOVE", "JUDD", "JUDE", "JUDO", "JUDY",
"JUJU", "JUKE", "JULY", "JUNE", "JUNK", "JUNO", "JURY", "JUST",
"JUTE", "KAHN", "KALE", "KANE", "KANT", "KARL", "KATE", "KEEL",
"KEEN", "KENO", "KENT", "KERN", "KERR", "KEYS", "KICK", "KILL",
"KIND", "KING", "KIRK", "KISS", "KITE", "KLAN", "KNEE", "KNEW",
"KNIT", "KNOB", "KNOT", "KNOW", "KOCH", "KONG", "KUDO", "KURD",
"KURT", "KYLE", "LACE", "LACK", "LACY", "LADY", "LAID", "LAIN",
"LAIR", "LAKE", "LAMB", "LAME", "LAND", "LANE", "LANG", "LARD",
"LARK", "LASS", "LAST", "LATE", "LAUD", "LAVA", "LAWN", "LAWS",
"LAYS", "LEAD", "LEAF", "LEAK", "LEAN", "LEAR", "LEEK", "LEER",
"LEFT", "LEND", "LENS", "LENT", "LEON", "LESK", "LESS", "LEST",
"LETS", "LIAR", "LICE", "LICK", "LIED", "LIEN", "LIES", "LIEU",
"LIFE", "LIFT", "LIKE", "LILA", "LILT", "LILY", "LIMA", "LIMB",
"LIME", "LIND", "LINE", "LINK", "LINT", "LION", "LISA", "LIST",
"LIVE", "LOAD", "LOAF", "LOAM", "LOAN", "LOCK", "LOFT", "LOGE",
"LOIS", "LOLA", "LONE", "LONG", "LOOK", "LOON", "LOOT", "LORD",
"LORE", "LOSE", "LOSS", "LOST", "LOUD", "LOVE", "LOWE", "LUCK",
"LUCY", "LUGE", "LUKE", "LULU", "LUND", "LUNG", "LURA", "LURE",
"LURK", "LUSH", "LUST", "LYLE", "LYNN", "LYON", "LYRA", "MACE",
"MADE", "MAGI", "MAID", "MAIL", "MAIN", "MAKE", "MALE", "MALI",
"MALL", "MALT", "MANA", "MANN", "MANY", "MARC", "MARE", "MARK",
"MARS", "MART", "MARY", "MASH", "MASK", "MASS", "MAST", "MATE",
"MATH", "MAUL", "MAYO", "MEAD", "MEAL", "MEAN", "MEAT", "MEEK",
"MEET", "MELD", "MELT", "MEMO", "MEND", "MENU", "MERT", "MESH",
"MESS", "MICE", "MIKE", "MILD", "MILE", "MILK", "MILL", "MILT",
"MIMI", "MIND", "MINE", "MINI", "MINK", "MINT", "MIRE", "MISS",
"MIST", "MITE", "MITT", "MOAN", "MOAT", "MOCK", "MODE", "MOLD",
"MOLE", "MOLL", "MOLT", "MONA", "MONK", "MONT", "MOOD", "MOON",
"MOOR", "MOOT", "MORE", "MORN", "MORT", "MOSS", "MOST", "MOTH",
"MOVE", "MUCH", "MUCK", "MUDD", "MUFF", "MULE", "MULL", "MURK",
"MUSH", "MUST", "MUTE", "MUTT", "MYRA", "MYTH", "NAGY", "NAIL",
"NAIR", "NAME", "NARY", "NASH", "NAVE", "NAVY", "NEAL", "NEAR",
"NEAT", "NECK", "NEED", "NEIL", "NELL", "NEON", "NERO", "NESS",
"NEST", "NEWS", "NEWT", "NIBS", "NICE", "NICK", "NILE", "NINA",
"NINE", "NOAH", "NODE", "NOEL", "NOLL", "NONE", "NOOK", "NOON",
"NORM", "NOSE", "NOTE", "NOUN", "NOVA", "NUDE", "NULL", "NUMB",
"OATH", "OBEY", "OBOE", "ODIN", "OHIO", "OILY", "OINT", "OKAY",
"OLAF", "OLDY", "OLGA", "OLIN", "OMAN", "OMEN", "OMIT", "ONCE",
"ONES", "ONLY", "ONTO", "ONUS", "ORAL", "ORGY", "OSLO", "OTIS",
"OTTO", "OUCH", "OUST", "OUTS", "OVAL", "OVEN", "OVER", "OWLY",
"OWNS", "QUAD", "QUIT", "QUOD", "RACE", "RACK", "RACY", "RAFT",
"RAGE", "RAID", "RAIL", "RAIN", "RAKE", "RANK", "RANT", "RARE",
"RASH", "RATE", "RAVE", "RAYS", "READ", "REAL", "REAM", "REAR",
"RECK", "REED", "REEF", "REEK", "REEL", "REID", "REIN", "RENA",
"REND", "RENT", "REST", "RICE", "RICH", "RICK", "RIDE", "RIFT",
"RILL", "RIME", "RING", "RINK", "RISE", "RISK", "RITE", "ROAD",
"ROAM", "ROAR", "ROBE", "ROCK", "RODE", "ROIL", "ROLL", "ROME",
"ROOD", "ROOF", "ROOK", "ROOM", "ROOT", "ROSA", "ROSE", "ROSS",
"ROSY", "ROTH", "ROUT", "ROVE", "ROWE", "ROWS", "RUBE", "RUBY",
"RUDE", "RUDY", "RUIN", "RULE", "RUNG", "RUNS", "RUNT", "RUSE",
"RUSH", "RUSK", "RUSS", "RUST", "RUTH", "SACK", "SAFE", "SAGE",
"SAID", "SAIL", "SALE", "SALK", "SALT", "SAME", "SAND", "SANE",
"SANG", "SANK", "SARA", "SAUL", "SAVE", "SAYS", "SCAN", "SCAR",
"SCAT", "SCOT", "SEAL", "SEAM", "SEAR", "SEAT", "SEED", "SEEK",
"SEEM", "SEEN", "SEES", "SELF", "SELL", "SEND", "SENT", "SETS",
"SEWN", "SHAG", "SHAM", "SHAW", "SHAY", "SHED", "SHIM", "SHIN",
"SHOD", "SHOE", "SHOT", "SHOW", "SHUN", "SHUT", "SICK", "SIDE",
"SIFT", "SIGH", "SIGN", "SILK", "SILL", "SILO", "SILT", "SINE",
"SING", "SINK", "SIRE", "SITE", "SITS", "SITU", "SKAT", "SKEW",
"SKID", "SKIM", "SKIN", "SKIT", "SLAB", "SLAM", "SLAT", "SLAY",
"SLED", "SLEW", "SLID", "SLIM", "SLIT", "SLOB", "SLOG", "SLOT",
"SLOW", "SLUG", "SLUM", "SLUR", "SMOG", "SMUG", "SNAG", "SNOB",
"SNOW", "SNUB", "SNUG", "SOAK", "SOAR", "SOCK", "SODA", "SOFA",
"SOFT", "SOIL", "SOLD", "SOME", "SONG", "SOON", "SOOT", "SORE",
"SORT", "SOUL", "SOUR", "SOWN", "STAB", "STAG", "STAN", "STAR",
"STAY", "STEM", "STEW", "STIR", "STOW", "STUB", "STUN", "SUCH",
"SUDS", "SUIT", "SULK", "SUMS", "SUNG", "SUNK", "SURE", "SURF",
"SWAB", "SWAG", "SWAM", "SWAN", "SWAT", "SWAY", "SWIM", "SWUM",
"TACK", "TACT", "TAIL", "TAKE", "TALE", "TALK", "TALL", "TANK",
"TASK", "TATE", "TAUT", "TEAL", "TEAM", "TEAR", "TECH", "TEEM",
"TEEN", "TEET", "TELL", "TEND", "TENT", "TERM", "TERN", "TESS",
"TEST", "THAN", "THAT", "THEE", "THEM", "THEN", "THEY", "THIN",
"THIS", "THUD", "THUG", "TICK", "TIDE", "TIDY", "TIED", "TIER",
"TILE", "TILL", "TILT", "TIME", "TINA", "TINE", "TINT", "TINY",
"TIRE", "TOAD", "TOGO", "TOIL", "TOLD", "TOLL", "TONE", "TONG",
"TONY", "TOOK", "TOOL", "TOOT", "TORE", "TORN", "TOTE", "TOUR",
"TOUT", "TOWN", "TRAG", "TRAM", "TRAY", "TREE", "TREK", "TRIG",
"TRIM", "TRIO", "TROD", "TROT", "TROY", "TRUE", "TUBA", "TUBE",
"TUCK", "TUFT", "TUNA", "TUNE", "TUNG", "TURF", "TURN", "TUSK",
"TWIG", "TWIN", "TWIT", "ULAN", "UNIT", "URGE", "USED", "USER",
"USES", "UTAH", "VAIL", "VAIN", "VALE", "VARY", "VASE", "VAST",
"VEAL", "VEDA", "VEIL", "VEIN", "VEND", "VENT", "VERB", "VERY",
"VETO", "VICE", "VIEW", "VINE", "VISE", "VOID", "VOLT", "VOTE",
"WACK", "WADE", "WAGE", "WAIL", "WAIT", "WAKE", "WALE", "WALK",
"WALL", "WALT", "WAND", "WANE", "WANG", "WANT", "WARD", "WARM",
"WARN", "WART", "WASH", "WAST", "WATS", "WATT", "WAVE", "WAVY",
"WAYS", "WEAK", "WEAL", "WEAN", "WEAR", "WEED", "WEEK", "WEIR",
"WELD", "WELL", "WELT", "WENT", "WERE", "WERT", "WEST", "WHAM",
"WHAT", "WHEE", "WHEN", "WHET", "WHOA", "WHOM", "WICK", "WIFE",
"WILD", "WILL", "WIND", "WINE", "WING", "WINK", "WINO", "WIRE",
"WISE", "WISH", "WITH", "WOLF", "WONT", "WOOD", "WOOL", "WORD",
"WORE", "WORK", "WORM", "WORN", "WOVE", "WRIT", "WYNN", "YALE",
"YANG", "YANK", "YARD", "YARN", "YAWL", "YAWN", "YEAH", "YEAR",
"YELL", "YOGA", "YOKE" ]
if __name__=='__main__':
data = [('EB33F77EE73D4053', 'TIDE ITCH SLOW REIN RULE MOT'),
('CCAC2AED591056BE4F90FD441C534766',
'RASH BUSH MILK LOOK BAD BRIM AVID GAFF BAIT ROT POD LOVE'),
('EFF81F9BFBC65350920CDD7416DE8009',
'TROD MUTE TAIL WARM CHAR KONG HAAG CITY BORE O TEAL AWL')
]
for key, words in data:
print('Trying key', key)
key=binascii.a2b_hex(key)
w2=key_to_english(key)
if w2!=words:
print('key_to_english fails on key', repr(key), ', producing', str(w2))
k2=english_to_key(words)
if k2!=key:
print('english_to_key fails on key', repr(key), ', producing', repr(k2))
| apache-2.0 |
tp-openstack-m2dl/TP-MapReduce | python/mapreduce.py | 1 | 1232 | from multiprocessing import Pool
"""
A generator function for chopping up a given list into chunks of
length n.
"""
def chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i+n]
def MapReduce(Map, Reduce, text):
l = min(len(text),8)
# Fragment the string data into 8 chunks
partitioned_text = list(chunks(text, len(text) / l))
# Build a pool of l processes
pool = Pool(processes=l,)
# Generate count tuples for title-cased tokens
single_count_tuples = pool.map(Map, partitioned_text)
# Organize the count tuples; lists of tuples by token key
token_to_tuples = Partition(single_count_tuples)
# Collapse the lists of tuples into total term frequencies
term_frequencies = pool.map(Reduce, token_to_tuples.items())
return term_frequencies
"""
Group the sublists of (token, 1) pairs into a term-frequency-list
map, so that the Reduce operation later can work on sorted
term counts. The returned result is a dictionary with the structure
{token : [(token, 1), ...] .. }
"""
def Partition(L):
tf = {}
for sublist in L:
for p in sublist:
# Append the tuple to the list in the map
try:
tf[p[0]].append (p)
except KeyError:
tf[p[0]] = [p]
return tf
| apache-2.0 |
pitch-sands/i-MPI | flask/Lib/site-packages/openid/message.py | 146 | 21562 | """Extension argument processing code
"""
__all__ = ['Message', 'NamespaceMap', 'no_default', 'registerNamespaceAlias',
'OPENID_NS', 'BARE_NS', 'OPENID1_NS', 'OPENID2_NS', 'SREG_URI',
'IDENTIFIER_SELECT']
import copy
import warnings
import urllib
from openid import oidutil
from openid import kvform
try:
ElementTree = oidutil.importElementTree()
except ImportError:
# No elementtree found, so give up, but don't fail to import,
# since we have fallbacks.
ElementTree = None
# This doesn't REALLY belong here, but where is better?
IDENTIFIER_SELECT = 'http://specs.openid.net/auth/2.0/identifier_select'
# URI for Simple Registration extension, the only commonly deployed
# OpenID 1.x extension, and so a special case
SREG_URI = 'http://openid.net/sreg/1.0'
# The OpenID 1.X namespace URI
OPENID1_NS = 'http://openid.net/signon/1.0'
THE_OTHER_OPENID1_NS = 'http://openid.net/signon/1.1'
OPENID1_NAMESPACES = OPENID1_NS, THE_OTHER_OPENID1_NS
# The OpenID 2.0 namespace URI
OPENID2_NS = 'http://specs.openid.net/auth/2.0'
# The namespace consisting of pairs with keys that are prefixed with
# "openid." but not in another namespace.
NULL_NAMESPACE = oidutil.Symbol('Null namespace')
# The null namespace, when it is an allowed OpenID namespace
OPENID_NS = oidutil.Symbol('OpenID namespace')
# The top-level namespace, excluding all pairs with keys that start
# with "openid."
BARE_NS = oidutil.Symbol('Bare namespace')
# Limit, in bytes, of identity provider and return_to URLs, including
# response payload. See OpenID 1.1 specification, Appendix D.
OPENID1_URL_LIMIT = 2047
# All OpenID protocol fields. Used to check namespace aliases.
OPENID_PROTOCOL_FIELDS = [
'ns', 'mode', 'error', 'return_to', 'contact', 'reference',
'signed', 'assoc_type', 'session_type', 'dh_modulus', 'dh_gen',
'dh_consumer_public', 'claimed_id', 'identity', 'realm',
'invalidate_handle', 'op_endpoint', 'response_nonce', 'sig',
'assoc_handle', 'trust_root', 'openid',
]
class UndefinedOpenIDNamespace(ValueError):
"""Raised if the generic OpenID namespace is accessed when there
is no OpenID namespace set for this message."""
class InvalidOpenIDNamespace(ValueError):
"""Raised if openid.ns is not a recognized value.
For recognized values, see L{Message.allowed_openid_namespaces}
"""
def __str__(self):
s = "Invalid OpenID Namespace"
if self.args:
s += " %r" % (self.args[0],)
return s
# Sentinel used for Message implementation to indicate that getArg
# should raise an exception instead of returning a default.
no_default = object()
# Global namespace / alias registration map. See
# registerNamespaceAlias.
registered_aliases = {}
class NamespaceAliasRegistrationError(Exception):
"""
Raised when an alias or namespace URI has already been registered.
"""
pass
def registerNamespaceAlias(namespace_uri, alias):
"""
Registers a (namespace URI, alias) mapping in a global namespace
alias map. Raises NamespaceAliasRegistrationError if either the
namespace URI or alias has already been registered with a
different value. This function is required if you want to use a
namespace with an OpenID 1 message.
"""
global registered_aliases
if registered_aliases.get(alias) == namespace_uri:
return
if namespace_uri in registered_aliases.values():
raise NamespaceAliasRegistrationError, \
'Namespace uri %r already registered' % (namespace_uri,)
if alias in registered_aliases:
raise NamespaceAliasRegistrationError, \
'Alias %r already registered' % (alias,)
registered_aliases[alias] = namespace_uri
class Message(object):
"""
In the implementation of this object, None represents the global
namespace as well as a namespace with no key.
@cvar namespaces: A dictionary specifying specific
namespace-URI to alias mappings that should be used when
generating namespace aliases.
@ivar ns_args: two-level dictionary of the values in this message,
grouped by namespace URI. The first level is the namespace
URI.
"""
allowed_openid_namespaces = [OPENID1_NS, THE_OTHER_OPENID1_NS, OPENID2_NS]
def __init__(self, openid_namespace=None):
"""Create an empty Message.
@raises InvalidOpenIDNamespace: if openid_namespace is not in
L{Message.allowed_openid_namespaces}
"""
self.args = {}
self.namespaces = NamespaceMap()
if openid_namespace is None:
self._openid_ns_uri = None
else:
implicit = openid_namespace in OPENID1_NAMESPACES
self.setOpenIDNamespace(openid_namespace, implicit)
def fromPostArgs(cls, args):
"""Construct a Message containing a set of POST arguments.
"""
self = cls()
# Partition into "openid." args and bare args
openid_args = {}
for key, value in args.items():
if isinstance(value, list):
raise TypeError("query dict must have one value for each key, "
"not lists of values. Query is %r" % (args,))
try:
prefix, rest = key.split('.', 1)
except ValueError:
prefix = None
if prefix != 'openid':
self.args[(BARE_NS, key)] = value
else:
openid_args[rest] = value
self._fromOpenIDArgs(openid_args)
return self
fromPostArgs = classmethod(fromPostArgs)
def fromOpenIDArgs(cls, openid_args):
"""Construct a Message from a parsed KVForm message.
@raises InvalidOpenIDNamespace: if openid.ns is not in
L{Message.allowed_openid_namespaces}
"""
self = cls()
self._fromOpenIDArgs(openid_args)
return self
fromOpenIDArgs = classmethod(fromOpenIDArgs)
def _fromOpenIDArgs(self, openid_args):
ns_args = []
# Resolve namespaces
for rest, value in openid_args.iteritems():
try:
ns_alias, ns_key = rest.split('.', 1)
except ValueError:
ns_alias = NULL_NAMESPACE
ns_key = rest
if ns_alias == 'ns':
self.namespaces.addAlias(value, ns_key)
elif ns_alias == NULL_NAMESPACE and ns_key == 'ns':
# null namespace
self.setOpenIDNamespace(value, False)
else:
ns_args.append((ns_alias, ns_key, value))
# Implicitly set an OpenID namespace definition (OpenID 1)
if not self.getOpenIDNamespace():
self.setOpenIDNamespace(OPENID1_NS, True)
# Actually put the pairs into the appropriate namespaces
for (ns_alias, ns_key, value) in ns_args:
ns_uri = self.namespaces.getNamespaceURI(ns_alias)
if ns_uri is None:
# we found a namespaced arg without a namespace URI defined
ns_uri = self._getDefaultNamespace(ns_alias)
if ns_uri is None:
ns_uri = self.getOpenIDNamespace()
ns_key = '%s.%s' % (ns_alias, ns_key)
else:
self.namespaces.addAlias(ns_uri, ns_alias, implicit=True)
self.setArg(ns_uri, ns_key, value)
def _getDefaultNamespace(self, mystery_alias):
"""OpenID 1 compatibility: look for a default namespace URI to
use for this alias."""
global registered_aliases
# Only try to map an alias to a default if it's an
# OpenID 1.x message.
if self.isOpenID1():
return registered_aliases.get(mystery_alias)
else:
return None
def setOpenIDNamespace(self, openid_ns_uri, implicit):
"""Set the OpenID namespace URI used in this message.
@raises InvalidOpenIDNamespace: if the namespace is not in
L{Message.allowed_openid_namespaces}
"""
if openid_ns_uri not in self.allowed_openid_namespaces:
raise InvalidOpenIDNamespace(openid_ns_uri)
self.namespaces.addAlias(openid_ns_uri, NULL_NAMESPACE, implicit)
self._openid_ns_uri = openid_ns_uri
def getOpenIDNamespace(self):
return self._openid_ns_uri
def isOpenID1(self):
return self.getOpenIDNamespace() in OPENID1_NAMESPACES
def isOpenID2(self):
return self.getOpenIDNamespace() == OPENID2_NS
def fromKVForm(cls, kvform_string):
"""Create a Message from a KVForm string"""
return cls.fromOpenIDArgs(kvform.kvToDict(kvform_string))
fromKVForm = classmethod(fromKVForm)
def copy(self):
return copy.deepcopy(self)
def toPostArgs(self):
"""Return all arguments with openid. in front of namespaced arguments.
"""
args = {}
# Add namespace definitions to the output
for ns_uri, alias in self.namespaces.iteritems():
if self.namespaces.isImplicit(ns_uri):
continue
if alias == NULL_NAMESPACE:
ns_key = 'openid.ns'
else:
ns_key = 'openid.ns.' + alias
args[ns_key] = ns_uri
for (ns_uri, ns_key), value in self.args.iteritems():
key = self.getKey(ns_uri, ns_key)
args[key] = value.encode('UTF-8')
return args
def toArgs(self):
"""Return all namespaced arguments, failing if any
non-namespaced arguments exist."""
# FIXME - undocumented exception
post_args = self.toPostArgs()
kvargs = {}
for k, v in post_args.iteritems():
if not k.startswith('openid.'):
raise ValueError(
'This message can only be encoded as a POST, because it '
'contains arguments that are not prefixed with "openid."')
else:
kvargs[k[7:]] = v
return kvargs
def toFormMarkup(self, action_url, form_tag_attrs=None,
submit_text="Continue"):
"""Generate HTML form markup that contains the values in this
message, to be HTTP POSTed as x-www-form-urlencoded UTF-8.
@param action_url: The URL to which the form will be POSTed
@type action_url: str
@param form_tag_attrs: Dictionary of attributes to be added to
the form tag. 'accept-charset' and 'enctype' have defaults
that can be overridden. If a value is supplied for
'action' or 'method', it will be replaced.
@type form_tag_attrs: {unicode: unicode}
@param submit_text: The text that will appear on the submit
button for this form.
@type submit_text: unicode
@returns: A string containing (X)HTML markup for a form that
encodes the values in this Message object.
@rtype: str or unicode
"""
if ElementTree is None:
raise RuntimeError('This function requires ElementTree.')
assert action_url is not None
form = ElementTree.Element('form')
if form_tag_attrs:
for name, attr in form_tag_attrs.iteritems():
form.attrib[name] = attr
form.attrib['action'] = action_url
form.attrib['method'] = 'post'
form.attrib['accept-charset'] = 'UTF-8'
form.attrib['enctype'] = 'application/x-www-form-urlencoded'
for name, value in self.toPostArgs().iteritems():
attrs = {'type': 'hidden',
'name': name,
'value': value}
form.append(ElementTree.Element('input', attrs))
submit = ElementTree.Element(
'input', {'type':'submit', 'value':submit_text})
form.append(submit)
return ElementTree.tostring(form)
def toURL(self, base_url):
"""Generate a GET URL with the parameters in this message
attached as query parameters."""
return oidutil.appendArgs(base_url, self.toPostArgs())
def toKVForm(self):
"""Generate a KVForm string that contains the parameters in
this message. This will fail if the message contains arguments
outside of the 'openid.' prefix.
"""
return kvform.dictToKV(self.toArgs())
def toURLEncoded(self):
"""Generate an x-www-urlencoded string"""
args = self.toPostArgs().items()
args.sort()
return urllib.urlencode(args)
def _fixNS(self, namespace):
"""Convert an input value into the internally used values of
this object
@param namespace: The string or constant to convert
@type namespace: str or unicode or BARE_NS or OPENID_NS
"""
if namespace == OPENID_NS:
if self._openid_ns_uri is None:
raise UndefinedOpenIDNamespace('OpenID namespace not set')
else:
namespace = self._openid_ns_uri
if namespace != BARE_NS and type(namespace) not in [str, unicode]:
raise TypeError(
"Namespace must be BARE_NS, OPENID_NS or a string. got %r"
% (namespace,))
if namespace != BARE_NS and ':' not in namespace:
fmt = 'OpenID 2.0 namespace identifiers SHOULD be URIs. Got %r'
warnings.warn(fmt % (namespace,), DeprecationWarning)
if namespace == 'sreg':
fmt = 'Using %r instead of "sreg" as namespace'
warnings.warn(fmt % (SREG_URI,), DeprecationWarning,)
return SREG_URI
return namespace
def hasKey(self, namespace, ns_key):
namespace = self._fixNS(namespace)
return (namespace, ns_key) in self.args
def getKey(self, namespace, ns_key):
"""Get the key for a particular namespaced argument"""
namespace = self._fixNS(namespace)
if namespace == BARE_NS:
return ns_key
ns_alias = self.namespaces.getAlias(namespace)
# No alias is defined, so no key can exist
if ns_alias is None:
return None
if ns_alias == NULL_NAMESPACE:
tail = ns_key
else:
tail = '%s.%s' % (ns_alias, ns_key)
return 'openid.' + tail
def getArg(self, namespace, key, default=None):
"""Get a value for a namespaced key.
@param namespace: The namespace in the message for this key
@type namespace: str
@param key: The key to get within this namespace
@type key: str
@param default: The value to use if this key is absent from
this message. Using the special value
openid.message.no_default will result in this method
raising a KeyError instead of returning the default.
@rtype: str or the type of default
@raises KeyError: if default is no_default
@raises UndefinedOpenIDNamespace: if the message has not yet
had an OpenID namespace set
"""
namespace = self._fixNS(namespace)
args_key = (namespace, key)
try:
return self.args[args_key]
except KeyError:
if default is no_default:
raise KeyError((namespace, key))
else:
return default
def getArgs(self, namespace):
"""Get the arguments that are defined for this namespace URI
@returns: mapping from namespaced keys to values
@returntype: dict
"""
namespace = self._fixNS(namespace)
return dict([
(ns_key, value)
for ((pair_ns, ns_key), value)
in self.args.iteritems()
if pair_ns == namespace
])
def updateArgs(self, namespace, updates):
"""Set multiple key/value pairs in one call
@param updates: The values to set
@type updates: {unicode:unicode}
"""
namespace = self._fixNS(namespace)
for k, v in updates.iteritems():
self.setArg(namespace, k, v)
def setArg(self, namespace, key, value):
"""Set a single argument in this namespace"""
assert key is not None
assert value is not None
namespace = self._fixNS(namespace)
self.args[(namespace, key)] = value
if not (namespace is BARE_NS):
self.namespaces.add(namespace)
def delArg(self, namespace, key):
namespace = self._fixNS(namespace)
del self.args[(namespace, key)]
def __repr__(self):
return "<%s.%s %r>" % (self.__class__.__module__,
self.__class__.__name__,
self.args)
def __eq__(self, other):
return self.args == other.args
def __ne__(self, other):
return not (self == other)
def getAliasedArg(self, aliased_key, default=None):
if aliased_key == 'ns':
return self.getOpenIDNamespace()
if aliased_key.startswith('ns.'):
uri = self.namespaces.getNamespaceURI(aliased_key[3:])
if uri is None:
if default == no_default:
raise KeyError
else:
return default
else:
return uri
try:
alias, key = aliased_key.split('.', 1)
except ValueError:
# need more than x values to unpack
ns = None
else:
ns = self.namespaces.getNamespaceURI(alias)
if ns is None:
key = aliased_key
ns = self.getOpenIDNamespace()
return self.getArg(ns, key, default)
class NamespaceMap(object):
"""Maintains a bijective map between namespace uris and aliases.
"""
def __init__(self):
self.alias_to_namespace = {}
self.namespace_to_alias = {}
self.implicit_namespaces = []
def getAlias(self, namespace_uri):
return self.namespace_to_alias.get(namespace_uri)
def getNamespaceURI(self, alias):
return self.alias_to_namespace.get(alias)
def iterNamespaceURIs(self):
"""Return an iterator over the namespace URIs"""
return iter(self.namespace_to_alias)
def iterAliases(self):
"""Return an iterator over the aliases"""
return iter(self.alias_to_namespace)
def iteritems(self):
"""Iterate over the mapping
@returns: iterator of (namespace_uri, alias)
"""
return self.namespace_to_alias.iteritems()
def addAlias(self, namespace_uri, desired_alias, implicit=False):
"""Add an alias from this namespace URI to the desired alias
"""
# Check that desired_alias is not an openid protocol field as
# per the spec.
assert desired_alias not in OPENID_PROTOCOL_FIELDS, \
"%r is not an allowed namespace alias" % (desired_alias,)
# Check that desired_alias does not contain a period as per
# the spec.
if type(desired_alias) in [str, unicode]:
assert '.' not in desired_alias, \
"%r must not contain a dot" % (desired_alias,)
# Check that there is not a namespace already defined for
# the desired alias
current_namespace_uri = self.alias_to_namespace.get(desired_alias)
if (current_namespace_uri is not None
and current_namespace_uri != namespace_uri):
fmt = ('Cannot map %r to alias %r. '
'%r is already mapped to alias %r')
msg = fmt % (
namespace_uri,
desired_alias,
current_namespace_uri,
desired_alias)
raise KeyError(msg)
# Check that there is not already a (different) alias for
# this namespace URI
alias = self.namespace_to_alias.get(namespace_uri)
if alias is not None and alias != desired_alias:
fmt = ('Cannot map %r to alias %r. '
'It is already mapped to alias %r')
raise KeyError(fmt % (namespace_uri, desired_alias, alias))
assert (desired_alias == NULL_NAMESPACE or
type(desired_alias) in [str, unicode]), repr(desired_alias)
assert namespace_uri not in self.implicit_namespaces
self.alias_to_namespace[desired_alias] = namespace_uri
self.namespace_to_alias[namespace_uri] = desired_alias
if implicit:
self.implicit_namespaces.append(namespace_uri)
return desired_alias
def add(self, namespace_uri):
"""Add this namespace URI to the mapping, without caring what
alias it ends up with"""
# See if this namespace is already mapped to an alias
alias = self.namespace_to_alias.get(namespace_uri)
if alias is not None:
return alias
# Fall back to generating a numerical alias
i = 0
while True:
alias = 'ext' + str(i)
try:
self.addAlias(namespace_uri, alias)
except KeyError:
i += 1
else:
return alias
assert False, "Not reached"
def isDefined(self, namespace_uri):
return namespace_uri in self.namespace_to_alias
def __contains__(self, namespace_uri):
return self.isDefined(namespace_uri)
def isImplicit(self, namespace_uri):
return namespace_uri in self.implicit_namespaces
| bsd-3-clause |
edisonlz/fruit | web_project/app/content/views/city.py | 1 | 3136 | # coding=utf-8
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
import json
from app.content.models import City,ShoppingAddress
from django.db import transaction
from app.content.models import Status
@login_required
def cms_city(request):
if request.method == 'GET':
citys = City.all()
return render(request, 'city/city.html', {
'citys': citys,
})
@login_required
def cms_city_create(request):
if request.method == 'POST':
name = request.POST.get("name")
city_code = request.POST.get("city_code")
phone = request.POST.get("phone")
manager = request.POST.get("manager")
city = City()
city.name = name
city.phone = phone
city.city_code = city_code
city.manager = manager
city.status = Status.StatusOpen
city.save()
response = {'status': 'success'}
return HttpResponse(json.dumps(response), content_type="application/json")
else:
response = {'status': 'fail'}
return HttpResponse(json.dumps(response), content_type="application/json")
@login_required
def cms_city_update(request):
if request.method == 'POST':
pk = request.POST.get("pk")
name = request.POST.get("name")
city_code = request.POST.get("city_code")
phone = request.POST.get("phone")
manager = request.POST.get("manager")
city = City.objects.get(id=pk)
city.name = name
city.phone = phone
city.city_code = city_code
city.manager = manager
city.save()
response = {'status': 'success'}
return HttpResponse(json.dumps(response), content_type="application/json")
else:
pk = request.GET.get("pk")
city = City.objects.get(id=pk)
return render(request, 'city/edit_city.html', {
'city': city,
})
@login_required
def delete(request):
if request.method == 'POST':
pk = request.POST.get("id")
city = City.objects.get(id=pk)
count = ShoppingAddress.objects.filter(city=city).count();
if count > 0:
response = {'status': 'fail', "message":"城市有关联记录不能删除!"}
return HttpResponse(json.dumps(response), content_type="application/json")
city.is_delete = True
city.save()
response = {'status': 'success'}
return HttpResponse(json.dumps(response), content_type="application/json")
else:
response = {'status': 'fail'}
return HttpResponse(json.dumps(response), content_type="application/json")
@login_required
def update_status(request):
pk = request.POST.get("pk")
value = int(request.POST.get("value")[0])
city = City.objects.get(id=pk)
city.state = value
city.save()
response = {'status': 'success'}
return HttpResponse(json.dumps(response), content_type="application/json")
| apache-2.0 |
cbanta/pjproject | tests/pjsua/scripts-recvfrom/208_reg_good_retry_nonce_ok.py | 42 | 1029 | # $Id$
import inc_sip as sip
import inc_sdp as sdp
pjsua = "--null-audio --id=sip:CLIENT --registrar sip:127.0.0.1:$PORT " + \
"--realm=python --user=username --password=password " + \
"--auto-update-nat=0"
req1 = sip.RecvfromTransaction("Initial request", 401,
include=["REGISTER sip"],
exclude=["Authorization"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"1\""]
)
req2 = sip.RecvfromTransaction("REGISTER first retry", 401,
include=["REGISTER sip", "Authorization", "nonce=\"1\""],
exclude=["Authorization:[\\s\\S]+Authorization:"],
resp_hdr=["WWW-Authenticate: Digest realm=\"python\", nonce=\"2\", stale=true"]
)
req3 = sip.RecvfromTransaction("REGISTER retry with new nonce", 200,
include=["REGISTER sip", "Authorization", "nonce=\"2\""],
exclude=["Authorization:[\\s\\S]+Authorization:"],
expect="registration success"
)
recvfrom_cfg = sip.RecvfromCfg("Authentication okay after retry with new nonce",
pjsua, [req1, req2, req3])
| gpl-2.0 |
acshan/odoo | addons/sale_layout/models/sale_layout.py | 180 | 5037 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
from itertools import groupby
def grouplines(self, ordered_lines, sortkey):
"""Return lines from a specified invoice or sale order grouped by category"""
grouped_lines = []
for key, valuesiter in groupby(ordered_lines, sortkey):
group = {}
group['category'] = key
group['lines'] = list(v for v in valuesiter)
if 'subtotal' in key and key.subtotal is True:
group['subtotal'] = sum(line.price_subtotal for line in group['lines'])
grouped_lines.append(group)
return grouped_lines
class SaleLayoutCategory(osv.Model):
_name = 'sale_layout.category'
_order = 'sequence, id'
_columns = {
'name': fields.char('Name', required=True),
'sequence': fields.integer('Sequence', required=True),
'subtotal': fields.boolean('Add subtotal'),
'separator': fields.boolean('Add separator'),
'pagebreak': fields.boolean('Add pagebreak')
}
_defaults = {
'subtotal': True,
'separator': True,
'pagebreak': False,
'sequence': 10
}
class AccountInvoice(osv.Model):
_inherit = 'account.invoice'
def sale_layout_lines(self, cr, uid, ids, invoice_id=None, context=None):
"""
Returns invoice lines from a specified invoice ordered by
sale_layout_category sequence. Used in sale_layout module.
:Parameters:
-'invoice_id' (int): specify the concerned invoice.
"""
ordered_lines = self.browse(cr, uid, invoice_id, context=context).invoice_line
# We chose to group first by category model and, if not present, by invoice name
sortkey = lambda x: x.sale_layout_cat_id if x.sale_layout_cat_id else ''
return grouplines(self, ordered_lines, sortkey)
import openerp
class AccountInvoiceLine(osv.Model):
_inherit = 'account.invoice.line'
_order = 'invoice_id, categ_sequence, sequence, id'
sale_layout_cat_id = openerp.fields.Many2one('sale_layout.category', string='Section')
categ_sequence = openerp.fields.Integer(related='sale_layout_cat_id.sequence',
string='Layout Sequence', store=True)
_defaults = {
'categ_sequence': 0
}
class SaleOrder(osv.Model):
_inherit = 'sale.order'
def sale_layout_lines(self, cr, uid, ids, order_id=None, context=None):
"""
Returns order lines from a specified sale ordered by
sale_layout_category sequence. Used in sale_layout module.
:Parameters:
-'order_id' (int): specify the concerned sale order.
"""
ordered_lines = self.browse(cr, uid, order_id, context=context).order_line
sortkey = lambda x: x.sale_layout_cat_id if x.sale_layout_cat_id else ''
return grouplines(self, ordered_lines, sortkey)
class SaleOrderLine(osv.Model):
_inherit = 'sale.order.line'
_columns = {
'sale_layout_cat_id': fields.many2one('sale_layout.category',
string='Section'),
'categ_sequence': fields.related('sale_layout_cat_id',
'sequence', type='integer',
string='Layout Sequence', store=True)
# Store is intentionally set in order to keep the "historic" order.
}
_defaults = {
'categ_sequence': 0
}
_order = 'order_id, categ_sequence, sale_layout_cat_id, sequence, id'
def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):
"""Save the layout when converting to an invoice line."""
invoice_vals = super(SaleOrderLine, self)._prepare_order_line_invoice_line(cr, uid, line, account_id=account_id, context=context)
if line.sale_layout_cat_id:
invoice_vals['sale_layout_cat_id'] = line.sale_layout_cat_id.id
if line.categ_sequence:
invoice_vals['categ_sequence'] = line.categ_sequence
return invoice_vals
| agpl-3.0 |
blossomica/airmozilla | airmozilla/main/migrations/0002_auto_20150903_1343.py | 9 | 9951 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('auth', '0001_initial'),
('uploads', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='suggestedevent',
name='upload',
field=models.ForeignKey(related_name='upload', to='uploads.Upload', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='suggestedevent',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='recruitmentmessage',
name='modified_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='picture',
name='event',
field=models.ForeignKey(related_name='picture_event', to='main.Event', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='picture',
name='modified_user',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='locationdefaultenvironment',
name='location',
field=models.ForeignKey(to='main.Location'),
preserve_default=True,
),
migrations.AddField(
model_name='locationdefaultenvironment',
name='template',
field=models.ForeignKey(to='main.Template'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='locationdefaultenvironment',
unique_together=set([('location', 'privacy', 'template')]),
),
migrations.AddField(
model_name='location',
name='regions',
field=models.ManyToManyField(to='main.Region', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventtweet',
name='creator',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventtweet',
name='event',
field=models.ForeignKey(to='main.Event'),
preserve_default=True,
),
migrations.AddField(
model_name='eventrevision',
name='channels',
field=models.ManyToManyField(to='main.Channel'),
preserve_default=True,
),
migrations.AddField(
model_name='eventrevision',
name='event',
field=models.ForeignKey(to='main.Event'),
preserve_default=True,
),
migrations.AddField(
model_name='eventrevision',
name='picture',
field=models.ForeignKey(blank=True, to='main.Picture', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventrevision',
name='recruitmentmessage',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to='main.RecruitmentMessage', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventrevision',
name='tags',
field=models.ManyToManyField(to='main.Tag', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventrevision',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventoldslug',
name='event',
field=models.ForeignKey(to='main.Event'),
preserve_default=True,
),
migrations.AddField(
model_name='eventlivehits',
name='event',
field=models.ForeignKey(to='main.Event', unique=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventhitstats',
name='event',
field=models.ForeignKey(to='main.Event', unique=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventemail',
name='event',
field=models.ForeignKey(to='main.Event'),
preserve_default=True,
),
migrations.AddField(
model_name='eventemail',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='eventassignment',
name='event',
field=models.ForeignKey(to='main.Event', unique=True),
preserve_default=True,
),
migrations.AddField(
model_name='eventassignment',
name='locations',
field=models.ManyToManyField(to='main.Location'),
preserve_default=True,
),
migrations.AddField(
model_name='eventassignment',
name='users',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='channels',
field=models.ManyToManyField(to='main.Channel'),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='creator',
field=models.ForeignKey(related_name='creator', on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='location',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='main.Location', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='modified_user',
field=models.ForeignKey(related_name='modified_user', on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='picture',
field=models.ForeignKey(related_name='event_picture', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='main.Picture', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='recruitmentmessage',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, to='main.RecruitmentMessage', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='tags',
field=models.ManyToManyField(to='main.Tag', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='template',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='main.Template', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='topics',
field=models.ManyToManyField(to='main.Topic'),
preserve_default=True,
),
migrations.AddField(
model_name='event',
name='upload',
field=models.ForeignKey(related_name='event_upload', on_delete=django.db.models.deletion.SET_NULL, to='uploads.Upload', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='curatedgroup',
name='event',
field=models.ForeignKey(to='main.Event'),
preserve_default=True,
),
migrations.AddField(
model_name='chapter',
name='event',
field=models.ForeignKey(to='main.Event'),
preserve_default=True,
),
migrations.AddField(
model_name='chapter',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
preserve_default=True,
),
migrations.AddField(
model_name='channel',
name='parent',
field=models.ForeignKey(to='main.Channel', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='approval',
name='event',
field=models.ForeignKey(to='main.Event'),
preserve_default=True,
),
migrations.AddField(
model_name='approval',
name='group',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='auth.Group', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='approval',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
]
| bsd-3-clause |
veltzer/demos-linux | config/project.py | 1 | 4910 | import datetime
import config.general
project_github_username='veltzer'
project_name='demos-linux'
project_website='https://{project_github_username}.github.io/{project_name}'.format(**locals())
project_website_source='https://github.com/{project_github_username}/{project_name}'.format(**locals())
project_website_git='git://github.com/{project_github_username}/{project_name}.git'.format(**locals())
project_long_description='{project_name} is a project to demo and explore the Linux user space C/C++ API'.format(**locals())
project_year_started='2011'
project_description='''This project is a source code repository for instructors or expert programmers
who want to explore the Linux C/C++ API.
It has about 1000 examples (as of 1/2020) I found are useful in explaining the Linux API.
The idea is to provide a good coverage of all major features and to resolve disputes
about exactly how a certain API works.
You can find the project at {project_website}
Topics covered by the examples
------------------------------
* Multi-threading
* Multi-processing
* Locking
* Calling system calls without C
* Performance
* Coding in assembly
* Various compiler directives
* Many more...
Platforms supported
-------------------
Only ia64 is supported. I used to support i386 but I don't have a 32 bit
machine anymore.
Other platforms will be supported if someone is willing to do the work and submit
the patches.
Contributing
------------
This project needs help. fork, commit and request me to pull.
Just open a github account, modify and add examples, commit and ask me to pull...
A lot of the code is documented but some of it is not. More documentation would be welcome.
I would give attribution to whomever contributes.
License
-------
Code is licensed GPL3 and I hold the copyright unless explicity stolen as attributed in the source code.
I encourage people to use this source code as aid for instructing courses.
Please give me some credit if you do wind up using this package and consider dropping
me a note about the fact that you did use the package so I could feel good...
Similar projects
----------------
Michael Kerrisk, the maintainer and author of many of the Linux manual pages has a similar project he calls
TLPI (The Linux Programming Interface) of programs he used in his book of the same title. You can find
it here http://man7.org/tlpi/code/.
Using it
--------
* you need python on your machine:
try:
`python --version`
if python is missing then:
for deb based distributions (debian, ubuntu, ...) do:
`sudo apt-get install python`
for rpm based distributions (fedora, redhat, centos,...) do:
`sudo yum install python`
* clone the examples: `git clone [email protected]:veltzer/demos-linux.git`
* cd into it: `cd demos-linux`
* install the missing packages and headers needed to compile and run this project `./scripts/ubuntu_install.py`
Mind you this only works for 15.10 and will install a ton of stuff.
If you don't want this ton of installations and only want to checkout specific examples
compile the individual examples as described below.
if you are on a different Linux distribution try to get as much of these for your platform
as you can. If you really into contributing I would love a `redhat_install.py` or some such...
then use `make`
* compiling a specific example
`make src/examples/performance/cache_misser.elf`
the name of the elf binary is the same as the example source code with .elf instead of
.[c|cc].
You must be at the root of the project to issue the `make` command.
* the most important part: tweak the examples, try to prove me (my comments) wrong, have fun!
'''.format(**locals())
project_keywords=[
'linux',
'API',
'C',
'C++',
'kernel',
'userspace',
'examples',
'samples',
'demos',
]
# deb section
deb_package=False
project_copyright_years = ', '.join(
map(str, range(int(project_year_started), datetime.datetime.now().year + 1)))
if str(config.general.general_current_year) == project_year_started:
project_copyright_years = config.general.general_current_year
else:
project_copyright_years = '{0}-{1}'.format(project_year_started, config.general.general_current_year)
# project_data_files.append(templar.utils.hlp_files_under('/usr/bin', 'src/*'))
project_google_analytics_tracking_id='UA-80940105-1'
project_google_analytics_snipplet = '''<script type="text/javascript">
(function(i,s,o,g,r,a,m){{i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){{
(i[r].q=i[r].q||[]).push(arguments)}},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
}})(window,document,'script','https://www.google-analytics.com/analytics.js','ga');
ga('create', '{0}', 'auto');
ga('send', 'pageview');
</script>'''.format(project_google_analytics_tracking_id)
| gpl-3.0 |
mrquim/repository.mrquim | repo/script.module.covenant/lib/resources/lib/sources/en/mzmovies.py | 7 | 6851 | # -*- coding: utf-8 -*-
'''
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import urllib, urlparse, re
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import source_utils
from resources.lib.modules import dom_parser
from resources.lib.modules import directstream
from resources.lib.modules import cfscrape
class source:
def __init__(self):
self.priority = 0
self.language = ['en']
self.domains = ['mehlizmovies.com']
self.base_link = 'https://www.mehlizmovies.com/'
self.search_link = '?s=%s'
self.search_link2 = '/search/%s/feed/rss2/'
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = self.__search([localtitle] + source_utils.aliases_to_array(aliases), year)
if not url and title != localtitle: url = self.__search([title] + source_utils.aliases_to_array(
aliases),year)
return url
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = self.__search([localtvshowtitle] + source_utils.aliases_to_array(aliases), year)
if not url and tvshowtitle != localtvshowtitle: url = self.__search(
[tvshowtitle] + source_utils.aliases_to_array(aliases), year)
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url:
return
url = urlparse.urljoin(self.base_link, url)
scraper = cfscrape.create_scraper()
data = scraper.get(url).content
data = client.parseDOM(data, 'ul', attrs={'class': 'episodios'})
links = client.parseDOM(data, 'div', attrs={'class': 'episodiotitle'})
sp = zip(client.parseDOM(data, 'div', attrs={'class': 'numerando'}), client.parseDOM(links, 'a', ret='href'))
Sea_Epi = '%dx%d'% (int(season), int(episode))
for i in sp:
sep = i[0]
if sep == Sea_Epi:
url = source_utils.strip_domain(i[1])
return url
except:
return
def __search(self, titles, year):
try:
query = self.search_link % (urllib.quote_plus(cleantitle.getsearch(titles[0])))
query = urlparse.urljoin(self.base_link, query)
t = cleantitle.get(titles[0])
scraper = cfscrape.create_scraper()
data = scraper.get(query).content
#data = client.request(query, referer=self.base_link)
data = client.parseDOM(data, 'div', attrs={'class': 'result-item'})
r = dom_parser.parse_dom(data, 'div', attrs={'class': 'title'})
r = zip(dom_parser.parse_dom(r, 'a'), dom_parser.parse_dom(data, 'span', attrs={'class': 'year'}))
url = []
for i in range(len(r)):
title = cleantitle.get(r[i][0][1])
title = re.sub('(\d+p|4k|3d|hd|season\d+)','',title)
y = r[i][1][1]
link = r[i][0][0]['href']
if 'season' in title: continue
if t == title and y == year:
if 'season' in link:
url.append(source_utils.strip_domain(link))
print url[0]
return url[0]
else: url.append(source_utils.strip_domain(link))
return url
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
links = self.links_found(url)
hostdict = hostDict + hostprDict
for url in links:
try:
valid, host = source_utils.is_host_valid(url, hostdict)
if 'mehliz' in url:
host = 'MZ'; direct = True; urls = (self.mz_server(url))
elif 'ok.ru' in url:
host = 'vk'; direct = True; urls = (directstream.odnoklassniki(url))
else:
direct = False; urls = [{'quality': 'SD', 'url': url}]
for x in urls:
sources.append({'source': host, 'quality': x['quality'], 'language': 'en',
'url': x['url'], 'direct': direct, 'debridonly': False})
except:
pass
return sources
except:
return sources
def links_found(self,urls):
try:
scraper = cfscrape.create_scraper()
links = []
if type(urls) is list:
for item in urls:
query = urlparse.urljoin(self.base_link, item)
r = scraper.get(query).content
data = client.parseDOM(r, 'div', attrs={'id': 'playex'})
data = client.parseDOM(data, 'div', attrs={'id': 'option-\d+'})
links += client.parseDOM(data, 'iframe', ret='src')
print links
else:
query = urlparse.urljoin(self.base_link, urls)
r = scraper.get(query).content
data = client.parseDOM(r, 'div', attrs={'id': 'playex'})
data = client.parseDOM(data, 'div', attrs={'id': 'option-\d+'})
links += client.parseDOM(data, 'iframe', ret='src')
return links
except:
return urls
def mz_server(self,url):
try:
scraper = cfscrape.create_scraper()
urls = []
data = scraper.get(url).content
data = re.findall('''file:\s*["']([^"']+)",label:\s*"(\d{3,}p)"''', data, re.DOTALL)
for url, label in data:
label = source_utils.label_to_quality(label)
if label == 'SD': continue
urls.append({'url': url, 'quality': label})
return urls
except:
return url
def resolve(self, url):
return url
| gpl-2.0 |
rven/odoo | addons/account_edi_extended/models/account_move.py | 2 | 4061 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, models, fields, _
class AccountMove(models.Model):
_inherit = 'account.move'
edi_show_abandon_cancel_button = fields.Boolean(
compute='_compute_edi_show_abandon_cancel_button')
edi_error_message = fields.Html(compute='_compute_edi_error_message')
edi_blocking_level = fields.Selection(selection=[('info', 'Info'), ('warning', 'Warning'), ('error', 'Error')], compute='_compute_edi_error_message')
@api.depends(
'edi_document_ids',
'edi_document_ids.state',
'edi_document_ids.blocking_level',
'edi_document_ids.edi_format_id',
'edi_document_ids.edi_format_id.name')
def _compute_edi_web_services_to_process(self):
# OVERRIDE to take blocking_level into account
for move in self:
to_process = move.edi_document_ids.filtered(lambda d: d.state in ['to_send', 'to_cancel'] and d.blocking_level != 'error')
format_web_services = to_process.edi_format_id.filtered(lambda f: f._needs_web_services())
move.edi_web_services_to_process = ', '.join(f.name for f in format_web_services)
@api.depends(
'state',
'edi_document_ids.state',
'edi_document_ids.attachment_id')
def _compute_edi_show_abandon_cancel_button(self):
for move in self:
move.edi_show_abandon_cancel_button = any(doc.edi_format_id._needs_web_services()
and doc.state == 'to_cancel'
and move.is_invoice(include_receipts=True)
and doc.edi_format_id._is_required_for_invoice(move)
for doc in move.edi_document_ids)
@api.depends('edi_error_count', 'edi_document_ids.error', 'edi_document_ids.blocking_level')
def _compute_edi_error_message(self):
for move in self:
if move.edi_error_count == 0:
move.edi_error_message = None
move.edi_blocking_level = None
elif move.edi_error_count == 1:
error_doc = move.edi_document_ids.filtered(lambda d: d.error)
move.edi_error_message = error_doc.error
move.edi_blocking_level = error_doc.blocking_level
else:
error_levels = set([doc.blocking_level for doc in move.edi_document_ids])
if 'error' in error_levels:
move.edi_error_message = str(move.edi_error_count) + _(" Electronic invoicing error(s)")
move.edi_blocking_level = 'error'
elif 'warning' in error_levels:
move.edi_error_message = str(move.edi_error_count) + _(" Electronic invoicing warning(s)")
move.edi_blocking_level = 'warning'
else:
move.edi_error_message = str(move.edi_error_count) + _(" Electronic invoicing info(s)")
move.edi_blocking_level = 'info'
def action_retry_edi_documents_error(self):
self.edi_document_ids.write({'error': False, 'blocking_level': False})
self.action_process_edi_web_services()
def button_abandon_cancel_posted_posted_moves(self):
'''Cancel the request for cancellation of the EDI.
'''
documents = self.env['account.edi.document']
for move in self:
is_move_marked = False
for doc in move.edi_document_ids:
if doc.state == 'to_cancel' \
and move.is_invoice(include_receipts=True) \
and doc.edi_format_id._is_required_for_invoice(move):
documents |= doc
is_move_marked = True
if is_move_marked:
move.message_post(body=_("A request for cancellation of the EDI has been called off."))
documents.write({'state': 'sent'})
| agpl-3.0 |
chemelnucfin/tensorflow | tensorflow/python/autograph/core/converter.py | 5 | 13637 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter construction support.
This module contains a base class for all converters, as well as supporting
structures. These structures are referred to as contexts.
The class hierarchy is as follows:
<your converter>
[extends] converter.Base
[extends] transformer.Base
[extends] gast.nodeTransformer
[uses] transfomer.SourceInfo
[uses] converter.EntityContext
[uses] converter.ProgramContext
[uses] transfomer.SourceInfo
converter.Base is a specialization of transformer.Base for AutoGraph. It's a
very lightweight subclass that adds a `ctx` attribute holding the corresponding
EntityContext object (see below). Note that converters are not reusable, and
`visit` will raise an error if called more than once.
converter.EntityContext contains mutable state associated with an entity that
the converter processes.
converter.ProgramContext contains mutable state across related entities. For
example, when converting several functions that call one another, the
ProgramContext should be shared across these entities.
Below is the overall flow at conversion:
program_ctx = ProgramContext(<entities to convert>, <global settings>, ...)
while <program_ctx has more entities to convert>:
entity, source_info = <get next entity from program_ctx>
entity_ctx = EntityContext(program_ctx, source_info)
for <each ConverterClass>:
converter = ConverterClass(entity_ctx)
# May update entity_ctx and program_ctx
entity = converter.visit(entity)
<add entity's dependencies to program_ctx>
Note that pyct contains a small number of transformers used for static analysis.
These implement transformer.Base, rather than converter.Base, to avoid a
dependency on AutoGraph.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import enum
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import compiler
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct import transformer
from tensorflow.python.autograph.pyct.static_analysis import activity
from tensorflow.python.autograph.pyct.static_analysis import liveness
from tensorflow.python.autograph.pyct.static_analysis import reaching_definitions
from tensorflow.python.util.tf_export import tf_export
# TODO(mdan): These contexts can be refactored into first class objects.
# For example, we could define Program and Entity abstractions that hold on
# to the actual entity and have conversion methods.
# TODO(mdan): Add a test specific to this converter.
@tf_export('autograph.experimental.Feature')
class Feature(enum.Enum):
"""This enumeration represents optional conversion options.
These conversion options are experimental. They are subject to change without
notice and offer no guarantees.
_Example Usage_
```python
optionals= tf.autograph.experimental.Feature.EQUALITY_OPERATORS
@tf.function(experimental_autograph_options=optionals)
def f(i):
if i == 0: # EQUALITY_OPERATORS allows the use of == here.
tf.print('i is zero')
```
Attributes:
ALL: Enable all features.
AUTO_CONTROL_DEPS: Insert of control dependencies in the generated code.
ASSERT_STATEMENTS: Convert Tensor-dependent assert statements to tf.Assert.
BUILTIN_FUNCTIONS: Convert builtin functions applied to Tensors to
their TF counterparts.
EQUALITY_OPERATORS: Whether to convert the comparison operators, like
equality. This is soon to be deprecated as support is being added to the
Tensor class.
LISTS: Convert list idioms, like initializers, slices, append, etc.
NAME_SCOPES: Insert name scopes that name ops according to context, like the
function they were defined in.
"""
ALL = 'ALL'
AUTO_CONTROL_DEPS = 'AUTO_CONTROL_DEPS'
ASSERT_STATEMENTS = 'ASSERT_STATEMENTS'
BUILTIN_FUNCTIONS = 'BUILTIN_FUNCTIONS'
EQUALITY_OPERATORS = 'EQUALITY_OPERATORS'
LISTS = 'LISTS'
NAME_SCOPES = 'NAME_SCOPES'
@classmethod
def all(cls):
"""Returns a tuple that enables all options."""
return tuple(cls.__members__.values())
@classmethod
def all_but(cls, exclude):
"""Returns a tuple that enables all but the excluded options."""
if not isinstance(exclude, (list, tuple, set)):
exclude = (exclude,)
return tuple(set(cls.all()) - set(exclude) - {cls.ALL})
STANDARD_OPTIONS = None # Forward definition.
class ConversionOptions(object):
"""Immutable container for global conversion flags.
Attributes:
recursive: bool, whether to recursively convert any user functions or
classes that the converted function may use.
user_requested: bool, whether the conversion was explicitly requested by
the user, as opposed to being performed as a result of other logic. This
value always auto-resets resets to False in child conversions.
optional_features: Union[Feature, Set[Feature]], controls the use of
optional features in the conversion process. See Feature for available
options.
"""
def __init__(self,
recursive=False,
user_requested=False,
internal_convert_user_code=True,
optional_features=Feature.ALL):
self.recursive = recursive
self.user_requested = user_requested
# TODO(mdan): Rename to conversion_recursion_depth?
self.internal_convert_user_code = internal_convert_user_code
if optional_features is None:
optional_features = ()
elif isinstance(optional_features, Feature):
optional_features = (optional_features,)
optional_features = frozenset(optional_features)
self.optional_features = optional_features
def as_tuple(self):
return (self.recursive, self.user_requested,
self.internal_convert_user_code, self.optional_features)
def __hash__(self):
return hash(self.as_tuple())
def __eq__(self, other):
assert isinstance(other, ConversionOptions)
return self.as_tuple() == other.as_tuple()
def __str__(self):
return 'ConversionOptions[{}]'
def uses(self, feature):
return (Feature.ALL in self.optional_features or
feature in self.optional_features)
def call_options(self):
"""Returns the corresponding options to be used for recursive conversion."""
return ConversionOptions(
recursive=self.recursive,
user_requested=False,
internal_convert_user_code=self.recursive,
optional_features=self.optional_features)
def to_ast(self):
"""Returns a representation of this object as an AST node.
The AST node encodes a constructor that would create an object with the
same contents.
Returns:
ast.Node
"""
if self == STANDARD_OPTIONS:
return parser.parse_expression('ag__.STD')
template = """
ag__.ConversionOptions(
recursive=recursive_val,
user_requested=user_requested_val,
optional_features=optional_features_val,
internal_convert_user_code=internal_convert_user_code_val)
"""
def list_of_features(values):
return parser.parse_expression('({})'.format(', '.join(
'ag__.{}'.format(str(v)) for v in values)))
expr_ast = templates.replace(
template,
recursive_val=parser.parse_expression(str(self.recursive)),
user_requested_val=parser.parse_expression(str(self.user_requested)),
internal_convert_user_code_val=parser.parse_expression(
str(self.internal_convert_user_code)),
optional_features_val=list_of_features(self.optional_features))
return expr_ast[0].value
STANDARD_OPTIONS = ConversionOptions(
recursive=True,
user_requested=False,
internal_convert_user_code=True,
optional_features=None)
class ProgramContext(
collections.namedtuple('ProgramContext', ('options', 'autograph_module'))):
"""ProgramContext keeps track of converting function hierarchies.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
options: ConversionOptions
autograph_module: Module, a reference to the autograph module. This needs to
be specified by the caller to avoid circular dependencies.
"""
pass
class EntityContext(transformer.Context):
"""Tracks the conversion of a single entity.
This object is mutable, and is updated during conversion. Not thread safe.
Attributes:
namer: Namer
info: transformer.EntityInfo
program: ProgramContext,
targe_name: Text
"""
def __init__(self, namer, entity_info, program_ctx, target_name=None):
super(EntityContext, self).__init__(entity_info)
self.namer = namer
self.program = program_ctx
self.target_name = target_name
class Base(transformer.Base):
"""All converters should inherit from this class.
Attributes:
ctx: EntityContext
"""
def __init__(self, ctx):
super(Base, self).__init__(ctx)
self._used = False
self._ast_depth = 0
def get_definition_directive(self, node, directive, arg, default):
"""Returns the unique directive argument for a symbol.
See lang/directives.py for details on directives.
Example:
# Given a directive in the code:
ag.foo_directive(bar, baz=1)
# One can write for an AST node Name(id='bar'):
get_definition_directive(node, ag.foo_directive, 'baz')
Args:
node: ast.AST, the node representing the symbol for which the directive
argument is needed.
directive: Callable[..., Any], the directive to search.
arg: str, the directive argument to return.
default: Any
Raises:
ValueError: if conflicting annotations have been found
"""
defs = anno.getanno(node, anno.Static.ORIG_DEFINITIONS, ())
if not defs:
return default
arg_values_found = []
for def_ in defs:
if (directive in def_.directives and arg in def_.directives[directive]):
arg_values_found.append(def_.directives[directive][arg])
if not arg_values_found:
return default
if len(arg_values_found) == 1:
return arg_values_found[0]
# If multiple annotations reach the symbol, they must all match. If they do,
# return any of them.
first_value = arg_values_found[0]
for other_value in arg_values_found[1:]:
if not ast_util.matches(first_value, other_value):
qn = anno.getanno(node, anno.Basic.QN)
raise ValueError('%s has ambiguous annotations for %s(%s): %s, %s' %
(qn, directive.__name__, arg,
compiler.ast_to_source(other_value).strip(),
compiler.ast_to_source(first_value).strip()))
return first_value
def visit(self, node):
if not self._ast_depth:
if self._used:
raise ValueError('converter objects cannot be reused')
self._used = True
self._ast_depth += 1
try:
return super(Base, self).visit(node)
finally:
self._ast_depth -= 1
class AnnotatedDef(reaching_definitions.Definition):
def __init__(self):
super(AnnotatedDef, self).__init__()
self.directives = {}
class AgAnno(enum.Enum):
"""Annotation labels specific to AutoGraph. See anno.py."""
DIRECTIVES = 'User directives associated with the annotated statement.'
def __repr__(self):
return self.name
def standard_analysis(node, context, is_initial=False):
"""Performs a complete static analysis of the given code.
Args:
node: ast.AST
context: converter.EntityContext
is_initial: bool, whether this is the initial analysis done on the input
source code
Returns:
ast.AST, same as node, with the static analysis annotations added
"""
# TODO(mdan): Clear static analysis here.
# TODO(mdan): Consider not running all analyses every time.
# TODO(mdan): Don't return a node because it's modified by reference.
graphs = cfg.build(node)
node = qual_names.resolve(node)
node = activity.resolve(node, context, None)
node = reaching_definitions.resolve(node, context, graphs, AnnotatedDef)
node = liveness.resolve(node, context, graphs)
if is_initial:
anno.dup(
node,
{
anno.Static.DEFINITIONS: anno.Static.ORIG_DEFINITIONS,
},
)
return node
def apply_(node, context, converter_module):
"""Applies a converter to an AST.
Args:
node: ast.AST
context: converter.EntityContext
converter_module: converter.Base
Returns:
ast.AST, the result of applying converter to node
"""
node = standard_analysis(node, context)
node = converter_module.transform(node, context)
return node
| apache-2.0 |
Bjay1435/capstone | rootfs/usr/lib/python2.7/token.py | 73 | 2922 | """Token constants (from "token.h")."""
# This file is automatically generated; please don't muck it up!
#
# To update the symbols in this file, 'cd' to the top directory of
# the python source tree after building the interpreter and run:
#
# ./python Lib/token.py
#--start constants--
ENDMARKER = 0
NAME = 1
NUMBER = 2
STRING = 3
NEWLINE = 4
INDENT = 5
DEDENT = 6
LPAR = 7
RPAR = 8
LSQB = 9
RSQB = 10
COLON = 11
COMMA = 12
SEMI = 13
PLUS = 14
MINUS = 15
STAR = 16
SLASH = 17
VBAR = 18
AMPER = 19
LESS = 20
GREATER = 21
EQUAL = 22
DOT = 23
PERCENT = 24
BACKQUOTE = 25
LBRACE = 26
RBRACE = 27
EQEQUAL = 28
NOTEQUAL = 29
LESSEQUAL = 30
GREATEREQUAL = 31
TILDE = 32
CIRCUMFLEX = 33
LEFTSHIFT = 34
RIGHTSHIFT = 35
DOUBLESTAR = 36
PLUSEQUAL = 37
MINEQUAL = 38
STAREQUAL = 39
SLASHEQUAL = 40
PERCENTEQUAL = 41
AMPEREQUAL = 42
VBAREQUAL = 43
CIRCUMFLEXEQUAL = 44
LEFTSHIFTEQUAL = 45
RIGHTSHIFTEQUAL = 46
DOUBLESTAREQUAL = 47
DOUBLESLASH = 48
DOUBLESLASHEQUAL = 49
AT = 50
OP = 51
ERRORTOKEN = 52
N_TOKENS = 53
NT_OFFSET = 256
#--end constants--
tok_name = {}
for _name, _value in globals().items():
if type(_value) is type(0):
tok_name[_value] = _name
del _name, _value
def ISTERMINAL(x):
return x < NT_OFFSET
def ISNONTERMINAL(x):
return x >= NT_OFFSET
def ISEOF(x):
return x == ENDMARKER
def main():
import re
import sys
args = sys.argv[1:]
inFileName = args and args[0] or "Include/token.h"
outFileName = "Lib/token.py"
if len(args) > 1:
outFileName = args[1]
try:
fp = open(inFileName)
except IOError, err:
sys.stdout.write("I/O error: %s\n" % str(err))
sys.exit(1)
lines = fp.read().split("\n")
fp.close()
prog = re.compile(
"#define[ \t][ \t]*([A-Z0-9][A-Z0-9_]*)[ \t][ \t]*([0-9][0-9]*)",
re.IGNORECASE)
tokens = {}
for line in lines:
match = prog.match(line)
if match:
name, val = match.group(1, 2)
val = int(val)
tokens[val] = name # reverse so we can sort them...
keys = tokens.keys()
keys.sort()
# load the output skeleton from the target:
try:
fp = open(outFileName)
except IOError, err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(2)
format = fp.read().split("\n")
fp.close()
try:
start = format.index("#--start constants--") + 1
end = format.index("#--end constants--")
except ValueError:
sys.stderr.write("target does not contain format markers")
sys.exit(3)
lines = []
for val in keys:
lines.append("%s = %d" % (tokens[val], val))
format[start:end] = lines
try:
fp = open(outFileName, 'w')
except IOError, err:
sys.stderr.write("I/O error: %s\n" % str(err))
sys.exit(4)
fp.write("\n".join(format))
fp.close()
if __name__ == "__main__":
main()
| mit |
thomasem/nova | nova/api/openstack/compute/plugins/v3/admin_actions.py | 33 | 3773 | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack import common
from nova.api.openstack.compute.schemas.v3 import reset_server_state
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova import compute
from nova.compute import vm_states
from nova import exception
ALIAS = "os-admin-actions"
# States usable in resetState action
# NOTE: It is necessary to update the schema of nova/api/openstack/compute/
# schemas/v3/reset_server_state.py, when updating this state_map.
state_map = dict(active=vm_states.ACTIVE, error=vm_states.ERROR)
authorize = extensions.os_compute_authorizer(ALIAS)
class AdminActionsController(wsgi.Controller):
def __init__(self, *args, **kwargs):
super(AdminActionsController, self).__init__(*args, **kwargs)
self.compute_api = compute.API(skip_policy_check=True)
@wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('resetNetwork')
def _reset_network(self, req, id, body):
"""Permit admins to reset networking on a server."""
context = req.environ['nova.context']
authorize(context, action='reset_network')
try:
instance = common.get_instance(self.compute_api, context, id)
self.compute_api.reset_network(context, instance)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
@wsgi.response(202)
@extensions.expected_errors((404, 409))
@wsgi.action('injectNetworkInfo')
def _inject_network_info(self, req, id, body):
"""Permit admins to inject network info into a server."""
context = req.environ['nova.context']
authorize(context, action='inject_network_info')
try:
instance = common.get_instance(self.compute_api, context, id)
self.compute_api.inject_network_info(context, instance)
except exception.InstanceIsLocked as e:
raise exc.HTTPConflict(explanation=e.format_message())
@wsgi.response(202)
@extensions.expected_errors((400, 404))
@wsgi.action('os-resetState')
@validation.schema(reset_server_state.reset_state)
def _reset_state(self, req, id, body):
"""Permit admins to reset the state of a server."""
context = req.environ["nova.context"]
authorize(context, action='reset_state')
# Identify the desired state from the body
state = state_map[body["os-resetState"]["state"]]
instance = common.get_instance(self.compute_api, context, id)
instance.vm_state = state
instance.task_state = None
instance.save(admin_state_reset=True)
class AdminActions(extensions.V3APIExtensionBase):
"""Enable admin-only server actions
Actions include: resetNetwork, injectNetworkInfo, os-resetState
"""
name = "AdminActions"
alias = ALIAS
version = 1
def get_controller_extensions(self):
controller = AdminActionsController()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
def get_resources(self):
return []
| apache-2.0 |
Korkki/django | tests/inspectdb/models.py | 208 | 2737 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
class People(models.Model):
name = models.CharField(max_length=255)
parent = models.ForeignKey('self', models.CASCADE)
class Message(models.Model):
from_field = models.ForeignKey(People, models.CASCADE, db_column='from_id')
class PeopleData(models.Model):
people_pk = models.ForeignKey(People, models.CASCADE, primary_key=True)
ssn = models.CharField(max_length=11)
class PeopleMoreData(models.Model):
people_unique = models.ForeignKey(People, models.CASCADE, unique=True)
license = models.CharField(max_length=255)
class DigitsInColumnName(models.Model):
all_digits = models.CharField(max_length=11, db_column='123')
leading_digit = models.CharField(max_length=11, db_column='4extra')
leading_digits = models.CharField(max_length=11, db_column='45extra')
class SpecialName(models.Model):
field = models.IntegerField(db_column='field')
# Underscores
field_field_0 = models.IntegerField(db_column='Field_')
field_field_1 = models.IntegerField(db_column='Field__')
field_field_2 = models.IntegerField(db_column='__field')
# Other chars
prc_x = models.IntegerField(db_column='prc(%) x')
non_ascii = models.IntegerField(db_column='tamaño')
class Meta:
db_table = "inspectdb_special.table name"
class ColumnTypes(models.Model):
id = models.AutoField(primary_key=True)
big_int_field = models.BigIntegerField()
bool_field = models.BooleanField(default=False)
null_bool_field = models.NullBooleanField()
char_field = models.CharField(max_length=10)
null_char_field = models.CharField(max_length=10, blank=True, null=True)
comma_separated_int_field = models.CommaSeparatedIntegerField(max_length=99)
date_field = models.DateField()
date_time_field = models.DateTimeField()
decimal_field = models.DecimalField(max_digits=6, decimal_places=1)
email_field = models.EmailField()
file_field = models.FileField(upload_to="unused")
file_path_field = models.FilePathField()
float_field = models.FloatField()
int_field = models.IntegerField()
gen_ip_adress_field = models.GenericIPAddressField(protocol="ipv4")
pos_int_field = models.PositiveIntegerField()
pos_small_int_field = models.PositiveSmallIntegerField()
slug_field = models.SlugField()
small_int_field = models.SmallIntegerField()
text_field = models.TextField()
time_field = models.TimeField()
url_field = models.URLField()
class UniqueTogether(models.Model):
field1 = models.IntegerField()
field2 = models.CharField(max_length=10)
class Meta:
unique_together = ('field1', 'field2')
| bsd-3-clause |
shreks7/crazyflie-android-no-otg | CrazyFliePythonServer/crazyflie/main.py | 1 | 18432 | /**
*
* The MIT License (MIT)
*
* Copyright (c) 2013 Shrey Malhotra
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
import sys
from PyQt4 import QtGui, QtCore, uic
from PyQt4.QtCore import pyqtSignal, Qt, pyqtSlot
import ast
import urllib2
import httplib
import logging,threading,time
from cflib.crazyflie import Crazyflie
import cflib.crtp
from cfclient.utils.logconfigreader import LogConfig
from cfclient.utils.logconfigreader import LogVariable
from connectiondialogue import ConnectDialogue
html =""
ipaddress = "http://192.168.43.1"
stream_url = ':8080/stream/json'
setup_url = ':8080/cgi/setup'
ex = False
threads = None
_signalThread = None
MAX_THRUST = 65365.0
MIN_THRUST = 10000.0
_CrazyFlieThread = None
values = None
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
class UIState:
DISCONNECTED = 0
CONNECTING = 1
CONNECTED = 2
class App(QtGui.QMainWindow):
global _CrazyFlieThread,threads
connectionLostSignal = pyqtSignal(str, str)
connectionInitiatedSignal = pyqtSignal(str)
batteryUpdatedSignal = pyqtSignal(object)
connectionDoneSignal = pyqtSignal(str)
connectionFailedSignal = pyqtSignal(str, str)
disconnectedSignal = pyqtSignal(str)
linkQualitySignal = pyqtSignal(int)
_motor_data_signal = pyqtSignal(object)
_imu_data_signal = pyqtSignal(object)
def __init__(self):
global stream_url,setup_url, ipaddress
super(App, self).__init__()
stream_url = ipaddress+stream_url
setup_url = ipaddress+setup_url
uic.loadUi('mainUI.ui', self)
self.initUI()
cflib.crtp.init_drivers(enable_debug_driver=False)
self.cf = Crazyflie()
self.CrazyFlieSignal = QtCore.pyqtSignal(object)
#oldValues
self.oldThrust = 0
self.maxAngleV = self.maxAngle.value()
self.maxYawRateV = self.maxYawRate.value()
self.maxThrustV = self.maxThrust.value()
self.minThrustV = self.minThrust.value()
self.slewEnableLimitV = self.slewEnableLimit.value()
self.thrustLoweringSlewRateLimitV = self.thrustLoweringSlewRateLimit.value()
#Connection Dialogue
self.connectDialogue = ConnectDialogue()
#Status Bar Update
self._statusbar_label = QtGui.QLabel("Loading device and configuration.")
self.statusBar().addWidget(self._statusbar_label)
#Connect to the URI
self.connectDialogue.requestConnectionSignal.connect(self.cf.open_link)
# Set UI state in disconnected buy default
self.setUIState(UIState.DISCONNECTED)
# Connection callbacks and signal wrappers for UI protection
self.connectionDoneSignal.connect(self.connectionDone)
self.connectionFailedSignal.connect(self.connectionFailed)
self.batteryUpdatedSignal.connect(self.updateBatteryVoltage)
self.connectionLostSignal.connect(self.connectionLost)
self.disconnectedSignal.connect(
lambda linkURI: self.setUIState(UIState.DISCONNECTED,
linkURI))
self.connectionInitiatedSignal.connect(
lambda linkURI: self.setUIState(UIState.CONNECTING,
linkURI))
self.cf.connectionFailed.add_callback(self.connectionFailedSignal.emit)
self.cf.connectSetupFinished.add_callback(self.connectionDoneSignal.emit)
self.cf.disconnected.add_callback(self.disconnectedSignal.emit)
self.cf.connectionLost.add_callback(self.connectionLostSignal.emit)
self.cf.connectionInitiated.add_callback(self.connectionInitiatedSignal.emit)
# Connect link quality feedback
self.cf.linkQuality.add_callback(self.linkQualitySignal.emit)
self.linkQualitySignal.connect(
lambda percentage: self.linkQualityBar.setValue(percentage))
QtCore.QObject.connect(self.connectFlie, QtCore.SIGNAL('clicked()'), self.onConnectButtonClicked)
# Flight Data Signal Connection
self._imu_data_signal.connect(self._imu_data_received)
self._motor_data_signal.connect(self._motor_data_received)
# menu items
self.actionSet_Server_Ip.triggered.connect(self.setIpAddress)
# --------- Initialize UI & Define Close Event -------------
def setIpAddress(self):
global ipaddress,stream_url,setup_url
text, ok = QtGui.QInputDialog.getText(self, 'Input IpAddress (X.X.X.X)',
'Enter Ip Address:')
if ok:
ipaddress = "http://"+text
stream_url = ':8080/stream/json'
setup_url = ':8080/cgi/setup'
stream_url = str(ipaddress+stream_url)
setup_url = str(ipaddress+setup_url)
def initUI(self):
self.setWindowTitle('CrazyFlie')
self.show()
self.statusBar()
QtCore.QObject.connect(self.serverConnect, QtCore.SIGNAL('clicked()'), self.onServerButtonClicked)
QtCore.QObject.connect(self.updateServer, QtCore.SIGNAL('clicked()'), self.onSendServerAttr)
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(self, 'Message',
"Are you sure to quit?", QtGui.QMessageBox.Yes |
QtGui.QMessageBox.No, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
ex = True
self.cf.close_link()
event.accept()
else:
event.ignore()
# ---- Button State Handlers -----------------
def onSendServerAttr(self):
self.maxAngleV = self.maxAngle.value()
self.maxYawRateV = self.maxYawRate.value()
self.maxThrustV = self.maxThrust.value()
self.minThrustV = self.minThrust.value()
self.slewEnableLimitV = self.slewEnableLimit.value()
self.thrustLoweringSlewRateLimitV = self.thrustLoweringSlewRateLimit.value()
self.console.append("Sending Server: %d, %d ,%f,%f,%f" %(self.maxAngleV,self.maxYawRateV,self.maxThrustV,
self.minThrustV,self.slewEnableLimitV))
threading.Thread(target=self.updateSer).start()
def updateSer(self):
update_url = setup_url+"?maxRollPitchAngle=%d&maxYawAngle=%d&maxThrust=%f&minThrust=%f&xmode=False" %(self.maxAngleV,self.maxYawRateV,self.maxThrustV,
self.minThrustV)
try:
response = urllib2.urlopen(update_url)
except urllib2.HTTPError, e:
self.console.append(str(e))
return
except urllib2.URLError, e:
self.console.append(str(e))
return
except httplib.HTTPException, e:
self.console.append(str(e))
return
if(response.read=="OK"):
self.console.append("Server Update Status: OK")
def onServerButtonClicked(self):
global stream_url
ex=False
self.serverConnect.setEnabled(False)
downloader = DownloadThread(stream_url, self.console)
self.threads = downloader
downloader.data_downloaded.connect(self.on_data_ready,QtCore.Qt.QueuedConnection)
downloader.start()
self.updateServer.setEnabled(False)
def onConnectButtonClicked(self):
if (self.uiState == UIState.CONNECTED):
self.cf.close_link()
elif (self.uiState == UIState.CONNECTING):
self.cf.close_link()
self.setUIState(UIState.DISCONNECTED)
else:
self.connectDialogue.show()
# ------- Connection Callback Handlers -------------------------
def connectionFailed(self, linkURI, error):
msg = "Failed to connect on %s: %s" % (linkURI, error)
warningCaption = "Communication failure"
QtGui.QMessageBox.critical(self, warningCaption, msg)
self.setUIState(UIState.DISCONNECTED, linkURI)
self.disconnectedFlightData(linkURI)
def connectionLost(self, linkURI, msg):
warningCaption = "Communication failure"
error = "Connection lost to %s: %s" % (linkURI, msg)
QtGui.QMessageBox.critical(self, warningCaption, error)
self.setUIState(UIState.DISCONNECTED, linkURI)
self.disconnectedFlightData(linkURI)
def connectionDone(self, linkURI):
global _signalThread
self.setUIState(UIState.CONNECTED, linkURI)
dataThread= threading.Thread(target=self.connectedFlightData(linkURI))
dataThread.start()
threading.Thread(target=self.pulse_command).start()
def on_data_ready(self,value):
global values
if(value=="error"):
self.console.setText("Error in connection")
self.serverConnect.setEnabled(True)
self.updateServer.setEnabled(True)
values = None
else:
self.targetRoll.setText("%f" % (value[0]))
self.targetPitch.setText("%f" % (value[1]))
self.targetThrust.setText("%f" % (value[2]))
self.targetYaw.setText("%f" % (value[3]))
self.readInput(value)
def readInput(self,value):
global values,MAX_THRUST,MIN_THRUST,_signalThread
roll,pitch,thrust,yaw = value
if (self.slewEnableLimitV > thrust):
if self.oldThrust > self.slewEnableLimitV:
self.oldThrust = self.slewEnableLimitV
if thrust < (self.oldThrust - (self.thrustLoweringSlewRateLimitV / 100)):
thrust = self.oldThrust - self.thrustLoweringSlewRateLimitV / 100
if thrust < self.minThrustV:
thrust = 0
self.oldThrust = thrust
pitch = -pitch
values = roll,pitch,yaw,thrust
def pulse_command(self):
global values
while(True):
if(values!=None):
roll,pitch,yaw,thrust = values
#print "%f %f %f %f" %(roll, pitch, yaw, thrust)
self.cf.commander.send_setpoint(roll, pitch, yaw, thrust)
time.sleep(0.1)
else:
break
# ------- UI State Handling -------------------
def setUIState(self, newState, linkURI=""):
self.uiState = newState
if (newState == UIState.DISCONNECTED):
self.setWindowTitle("Not connected")
self.connectFlie.setText("Connect")
self.batteryBar.setValue(3000)
self.disconnectedFlightData(linkURI)
self.linkQualityBar.setValue(0)
if (newState == UIState.CONNECTED):
s = "Connected on %s" % linkURI
self.menuItemConnect.setText("Disconnect")
self.connectFlie.setText("Disconnect")
if (newState == UIState.CONNECTING):
s = "Connecting to %s ..." % linkURI
self.setWindowTitle(s)
self.connectFlie.setText("Connecting")
# ------------------ Flight Data Receiver-----------------------
def connectedFlightData(self, linkURI):
lg = LogConfig("Battery", 1000)
lg.addVariable(LogVariable("pm.vbat", "float"))
self.log = self.cf.log.create_log_packet(lg)
if (self.log != None):
self.log.data_received.add_callback(self.batteryUpdatedSignal.emit)
self.log.error.add_callback(self.loggingError)
self.log.start()
else:
print("Could not setup loggingblock!")
lg = LogConfig("Stabalizer", 100)
lg.addVariable(LogVariable("stabilizer.roll", "float"))
lg.addVariable(LogVariable("stabilizer.pitch", "float"))
lg.addVariable(LogVariable("stabilizer.yaw", "float"))
lg.addVariable(LogVariable("stabilizer.thrust", "uint16_t"))
self.log = self.cf.log.create_log_packet(lg)
if (self.log is not None):
self.log.data_received.add_callback(self._imu_data_signal.emit)
self.log.error.add_callback(self.loggingError)
self.log.start()
else:
print("Could not setup logconfiguration after "
"connection!")
lg = LogConfig("Motors", 100)
lg.addVariable(LogVariable("motor.m1", "uint32_t"))
lg.addVariable(LogVariable("motor.m2", "uint32_t"))
lg.addVariable(LogVariable("motor.m3", "uint32_t"))
lg.addVariable(LogVariable("motor.m4", "uint32_t"))
self.log = self.cf.log.create_log_packet(lg)
if (self.log is not None):
self.log.data_received.add_callback(self._motor_data_signal.emit)
self.log.error.add_callback(self.loggingError)
self.log.start()
else:
print("Could not setup logconfiguration after "
"connection!")
def loggingError(self, error):
logger.warn("logging error %s", error)
def disconnectedFlightData(self, linkURI):
self.actualM1.setValue(0)
self.actualM2.setValue(0)
self.actualM3.setValue(0)
self.actualM4.setValue(0)
self.actualRoll.setText("")
self.actualPitch.setText("")
self.actualYaw.setText("")
self.actualThrust.setText("")
def _motor_data_received(self, data):
self.actualM1.setValue(data["motor.m1"])
self.actualM2.setValue(data["motor.m2"])
self.actualM3.setValue(data["motor.m3"])
self.actualM4.setValue(data["motor.m4"])
def _imu_data_received(self, data):
self.actualRoll.setText(("%.2f" % data["stabilizer.roll"]))
self.actualPitch.setText(("%.2f" % data["stabilizer.pitch"]))
self.actualYaw.setText(("%.2f" % data["stabilizer.yaw"]))
self.actualThrust.setText("%.2f%%" %
self.thrustToPercentage(
data["stabilizer.thrust"]))
def updateBatteryVoltage(self, data):
self.batteryBar.setValue(int(data["pm.vbat"] * 1000))
def thrustToPercentage(self, thrust):
return ((thrust / MAX_THRUST) * 100.0)
def percentageToThrust(self, percentage):
return int(MAX_THRUST * (percentage / 100.0))
# ------------------- Android Server Thread -----------------------------------
class DownloadThread(QtCore.QThread):
data_downloaded = QtCore.pyqtSignal(object)
def __init__(self, url, console):
QtCore.QThread.__init__(self)
self.url = url
self.console = console
def run(self):
global ex
try:
response = urllib2.urlopen(self.url)
except urllib2.HTTPError, e:
self.data_downloaded.emit("error")
ex=True
except urllib2.URLError, e:
self.data_downloaded.emit("error")
ex=True
except httplib.HTTPException, e:
self.data_downloaded.emit("error")
ex=True
while(ex!=True):
try:
html = response.read(27);
if(len(html)>0):
index = html.find("]")
html = html[:index+1]
values = ast.literal_eval(html)
consoleText= "Roll: %f | Pitch: %f | Thrust: %f | Yaw: %f" % (values[0],values[1],values[2],values[3])
# print consoleText
self.data_downloaded.emit(values)
else:
self.data_downloaded.emit("error")
break
except:
continue
pass
# ----------------------- CrazyFlie Thread ---------------------
class CrazyFlieThread(QtCore.QThread):
global values
def __init__(self,cf):
QtCore.QThread.__init__(self)
self.values = values
self.cf = cf
def run(self):
if(self.values!=None or self.values!="error"):
roll,pitch,yaw,thrust = self.values
self.pulse_command(roll,pitch,yaw,thrust)
def pulse_command(self,roll,pitch,yaw,thrust):
print "%f %f %f %f" %(roll, pitch, yaw, thrust)
self.cf.commander.send_setpoint(roll, pitch, yaw, thrust)
time.sleep(0.1)
self.pulse_command(roll, pitch, yaw,thrust)
# ----------------------------- Main ------------------------------
def main():
app = QtGui.QApplication(sys.argv)
ex = App()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| mit |
thopiekar/Cura | plugins/UM3NetworkPrinting/src/Models/Http/CloudClusterStatus.py | 1 | 1234 | # Copyright (c) 2019 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from datetime import datetime
from typing import List, Dict, Union, Any
from ..BaseModel import BaseModel
from .ClusterPrinterStatus import ClusterPrinterStatus
from .ClusterPrintJobStatus import ClusterPrintJobStatus
# Model that represents the status of the cluster for the cloud
class CloudClusterStatus(BaseModel):
## Creates a new cluster status model object.
# \param printers: The latest status of each printer in the cluster.
# \param print_jobs: The latest status of each print job in the cluster.
# \param generated_time: The datetime when the object was generated on the server-side.
def __init__(self,
printers: List[Union[ClusterPrinterStatus, Dict[str, Any]]],
print_jobs: List[Union[ClusterPrintJobStatus, Dict[str, Any]]],
generated_time: Union[str, datetime],
**kwargs) -> None:
self.generated_time = self.parseDate(generated_time)
self.printers = self.parseModels(ClusterPrinterStatus, printers)
self.print_jobs = self.parseModels(ClusterPrintJobStatus, print_jobs)
super().__init__(**kwargs)
| lgpl-3.0 |
yoghadj/or-tools | examples/python/steel.py | 32 | 6171 | # Copyright 2010 Pierre Schaus [email protected], [email protected]
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.apputils import app
import gflags
from ortools.constraint_solver import pywrapcp
FLAGS = gflags.FLAGS
gflags.DEFINE_string('data', 'data/steel_mill/steel_mill_slab.txt',
'path to data file')
gflags.DEFINE_integer('time_limit', 20000, 'global time limit')
#----------------helper for binpacking posting----------------
def BinPacking(solver, binvars, weights, loadvars):
'''post the load constraint on bins.
constraints forall j: loadvars[j] == sum_i (binvars[i] == j) * weights[i])
'''
pack = solver.Pack(binvars, len(binvars))
pack.AddWeightedSumEqualVarDimension(weights, loadvars)
solver.Add(pack)
solver.Add(solver.SumEquality(loadvars, sum(weights)))
#------------------------------data reading-------------------
def ReadData(filename):
"""Read data from <filename>."""
f = open(filename)
capacity = [int(nb) for nb in f.readline().split()]
capacity.pop(0)
capacity = [0] + capacity
max_capacity = max(capacity)
nb_colors = int(f.readline())
nb_slabs = int(f.readline())
wc = [[int(j) for j in f.readline().split()] for i in range(nb_slabs)]
weights = [x[0] for x in wc]
colors = [x[1] for x in wc]
loss = [min(filter(lambda x: x >= c, capacity)) - c
for c in range(max_capacity + 1)]
color_orders = [filter(lambda o: colors[o] == c, range(nb_slabs))
for c in range(1, nb_colors + 1)]
print 'Solving steel mill with', nb_slabs, 'slabs'
return (nb_slabs, capacity, max_capacity, weights, colors, loss, color_orders)
#------------------dedicated search for this problem-----------
class SteelDecisionBuilder(pywrapcp.PyDecisionBuilder):
'''Dedicated Decision Builder for steel mill slab.
Search for the steel mill slab problem with Dynamic Symmetry
Breaking during search is an adaptation (for binary tree) from the
paper of Pascal Van Hentenryck and Laurent Michel CPAIOR-2008.
The value heuristic comes from the paper
Solving Steel Mill Slab Problems with Constraint-Based Techniques:
CP, LNS, and CBLS,
Schaus et. al. to appear in Constraints 2010
'''
def __init__(self, x, nb_slabs, weights, losstab, loads):
self.__x = x
self.__nb_slabs = nb_slabs
self.__weights = weights
self.__losstab = losstab
self.__loads = loads
self.__maxcapa = len(losstab) - 1
def Next(self, solver):
var, weight = self.NextVar()
if var:
v = self.MaxBound()
if v + 1 == var.Min():
# Symmetry breaking. If you need to assign to a new bin,
# select the first one.
solver.Add(var == v + 1)
return self.Next(solver)
else:
# value heuristic (important for difficult problem):
# try first to place the order in the slab that will induce
# the least increase of the loss
loads = self.getLoads()
l, v = min((self.__losstab[loads[i] + weight], i)
for i in range(var.Min(), var.Max() + 1)
if var.Contains(i) and loads[i] + weight <= self.__maxcapa)
decision = solver.AssignVariableValue(var, v)
return decision
else:
return None
def getLoads(self):
load = [0] * len(self.__loads)
for w, x in zip(self.__weights, self.__x):
if x.Bound():
load[x.Min()] += w
return load
def MaxBound(self):
""" returns the max value bound to a variable, -1 if no variables bound"""
return max([-1] + [self.__x[o].Min()
for o in range(self.__nb_slabs)
if self.__x[o].Bound()])
def NextVar(self):
""" mindom size heuristic with tie break on the weights of orders """
res = [(self.__x[o].Size(), -self.__weights[o], self.__x[o])
for o in range(self.__nb_slabs)
if self.__x[o].Size() > 1]
if res:
res.sort()
return res[0][2], -res[0][1] # returns the order var and its weight
else:
return None, None
def DebugString(self):
return 'SteelMillDecisionBuilder(' + str(self.__x) + ')'
def main(unused_argv):
#------------------solver and variable declaration-------------
(nb_slabs, capacity, max_capacity, weights, colors, loss, color_orders) =\
ReadData(FLAGS.data)
nb_colors = len(color_orders)
solver = pywrapcp.Solver('Steel Mill Slab')
x = [solver.IntVar(0, nb_slabs - 1, 'x' + str(i))
for i in range(nb_slabs)]
load_vars = [solver.IntVar(0, max_capacity - 1, 'load_vars' + str(i))
for i in range(nb_slabs)]
#-------------------post of the constraints--------------
# Bin Packing.
BinPacking(solver, x, weights, load_vars)
# At most two colors per slab.
for s in range(nb_slabs):
solver.Add(solver.SumLessOrEqual(
[solver.Max([solver.IsEqualCstVar(x[c], s) for c in o])
for o in color_orders], 2))
#----------------Objective-------------------------------
objective_var = \
solver.Sum([load_vars[s].IndexOf(loss) for s in range(nb_slabs)]).Var()
objective = solver.Minimize(objective_var, 1)
#------------start the search and optimization-----------
db = SteelDecisionBuilder(x, nb_slabs, weights, loss, load_vars)
search_log = solver.SearchLog(100000, objective_var)
global_limit = solver.TimeLimit(FLAGS.time_limit)
solver.NewSearch(db, [objective, search_log, global_limit])
while solver.NextSolution():
print 'Objective:', objective_var.Value(),\
'check:', sum(loss[load_vars[s].Min()] for s in range(nb_slabs))
solver.EndSearch()
if __name__ == '__main__':
app.run()
| apache-2.0 |
Lujeni/ansible | lib/ansible/modules/network/fortios/fortios_system_resource_limits.py | 13 | 12995 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_system_resource_limits
short_description: Configure resource limits in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify system feature and resource_limits category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.9"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
system_resource_limits:
description:
- Configure resource limits.
default: null
type: dict
suboptions:
custom_service:
description:
- Maximum number of firewall custom services.
type: int
dialup_tunnel:
description:
- Maximum number of dial-up tunnels.
type: int
firewall_address:
description:
- Maximum number of firewall addresses (IPv4, IPv6, multicast).
type: int
firewall_addrgrp:
description:
- Maximum number of firewall address groups (IPv4, IPv6).
type: int
firewall_policy:
description:
- Maximum number of firewall policies (IPv4, IPv6, policy46, policy64, DoS-policy4, DoS-policy6, multicast).
type: int
ipsec_phase1:
description:
- Maximum number of VPN IPsec phase1 tunnels.
type: int
ipsec_phase1_interface:
description:
- Maximum number of VPN IPsec phase1 interface tunnels.
type: int
ipsec_phase2:
description:
- Maximum number of VPN IPsec phase2 tunnels.
type: int
ipsec_phase2_interface:
description:
- Maximum number of VPN IPsec phase2 interface tunnels.
type: int
log_disk_quota:
description:
- Log disk quota in MB.
type: int
onetime_schedule:
description:
- Maximum number of firewall one-time schedules.
type: int
proxy:
description:
- Maximum number of concurrent proxy users.
type: int
recurring_schedule:
description:
- Maximum number of firewall recurring schedules.
type: int
service_group:
description:
- Maximum number of firewall service groups.
type: int
session:
description:
- Maximum number of sessions.
type: int
sslvpn:
description:
- Maximum number of SSL-VPN.
type: int
user:
description:
- Maximum number of local users.
type: int
user_group:
description:
- Maximum number of user groups.
type: int
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure resource limits.
fortios_system_resource_limits:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
system_resource_limits:
custom_service: "3"
dialup_tunnel: "4"
firewall_address: "5"
firewall_addrgrp: "6"
firewall_policy: "7"
ipsec_phase1: "8"
ipsec_phase1_interface: "9"
ipsec_phase2: "10"
ipsec_phase2_interface: "11"
log_disk_quota: "12"
onetime_schedule: "13"
proxy: "14"
recurring_schedule: "15"
service_group: "16"
session: "17"
sslvpn: "18"
user: "19"
user_group: "20"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_system_resource_limits_data(json):
option_list = ['custom_service', 'dialup_tunnel', 'firewall_address',
'firewall_addrgrp', 'firewall_policy', 'ipsec_phase1',
'ipsec_phase1_interface', 'ipsec_phase2', 'ipsec_phase2_interface',
'log_disk_quota', 'onetime_schedule', 'proxy',
'recurring_schedule', 'service_group', 'session',
'sslvpn', 'user', 'user_group']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def system_resource_limits(data, fos):
vdom = data['vdom']
system_resource_limits_data = data['system_resource_limits']
filtered_data = underscore_to_hyphen(filter_system_resource_limits_data(system_resource_limits_data))
return fos.set('system',
'resource-limits',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_system(data, fos):
if data['system_resource_limits']:
resp = system_resource_limits(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"system_resource_limits": {
"required": False, "type": "dict", "default": None,
"options": {
"custom_service": {"required": False, "type": "int"},
"dialup_tunnel": {"required": False, "type": "int"},
"firewall_address": {"required": False, "type": "int"},
"firewall_addrgrp": {"required": False, "type": "int"},
"firewall_policy": {"required": False, "type": "int"},
"ipsec_phase1": {"required": False, "type": "int"},
"ipsec_phase1_interface": {"required": False, "type": "int"},
"ipsec_phase2": {"required": False, "type": "int"},
"ipsec_phase2_interface": {"required": False, "type": "int"},
"log_disk_quota": {"required": False, "type": "int"},
"onetime_schedule": {"required": False, "type": "int"},
"proxy": {"required": False, "type": "int"},
"recurring_schedule": {"required": False, "type": "int"},
"service_group": {"required": False, "type": "int"},
"session": {"required": False, "type": "int"},
"sslvpn": {"required": False, "type": "int"},
"user": {"required": False, "type": "int"},
"user_group": {"required": False, "type": "int"}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_system(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_system(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
stvstnfrd/edx-platform | lms/djangoapps/program_enrollments/management/commands/reset_enrollment_data.py | 5 | 2105 | """
Management command to remove enrollments and any related models created as
a side effect of enrolling students.
Intented for use in integration sandbox environments
"""
import logging
from textwrap import dedent
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from common.djangoapps.student.models import CourseEnrollment
from lms.djangoapps.program_enrollments.models import ProgramEnrollment
log = logging.getLogger(__name__)
class Command(BaseCommand):
"""
Deletes all enrollments and related data
Example usage:
$ ./manage.py lms reset_enrollment_data ca73b4af-676a-4bb3-a9a5-f6b5a3dedd,1c5f61b9-0be5-4a90-9ea5-582d5e066c
"""
help = dedent(__doc__).strip()
confirmation_prompt = "Type 'confirm' to continue with deletion\n"
def add_arguments(self, parser):
parser.add_argument(
'programs',
help='Comma separated list of programs to delete enrollments for'
)
parser.add_argument(
'--force',
action='store_true',
help='Skip manual confirmation step before deleting objects',
)
@transaction.atomic
def handle(self, *args, **options):
programs = options['programs'].split(',')
q1_count, deleted_course_enrollment_models = CourseEnrollment.objects.filter(
programcourseenrollment__program_enrollment__program_uuid__in=programs
).delete()
q2_count, deleted_program_enrollment_models = ProgramEnrollment.objects.filter(
program_uuid__in=programs
).delete()
log.info(
'The following records will be deleted:\n%s\n%s\n',
deleted_course_enrollment_models,
deleted_program_enrollment_models,
)
if not options['force']:
confirmation = input(self.confirmation_prompt)
if confirmation != 'confirm':
raise CommandError('User confirmation required. No records have been modified')
log.info('Deleting %s records...', q1_count + q2_count)
| agpl-3.0 |
shams169/pythonProject | ContactsDir/env/lib/python3.6/site-packages/setuptools/sandbox.py | 21 | 14543 | import os
import sys
import tempfile
import operator
import functools
import itertools
import re
import contextlib
import pickle
import textwrap
from setuptools.extern import six
from setuptools.extern.six.moves import builtins, map
import pkg_resources
if sys.platform.startswith('java'):
import org.python.modules.posix.PosixModule as _os
else:
_os = sys.modules[os.name]
try:
_file = file
except NameError:
_file = None
_open = open
from distutils.errors import DistutilsError
from pkg_resources import working_set
__all__ = [
"AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup",
]
def _execfile(filename, globals, locals=None):
"""
Python 3 implementation of execfile.
"""
mode = 'rb'
with open(filename, mode) as stream:
script = stream.read()
# compile() function in Python 2.6 and 3.1 requires LF line endings.
if sys.version_info[:2] < (2, 7) or sys.version_info[:2] >= (3, 0) and sys.version_info[:2] < (3, 2):
script = script.replace(b'\r\n', b'\n')
script = script.replace(b'\r', b'\n')
if locals is None:
locals = globals
code = compile(script, filename, 'exec')
exec(code, globals, locals)
@contextlib.contextmanager
def save_argv(repl=None):
saved = sys.argv[:]
if repl is not None:
sys.argv[:] = repl
try:
yield saved
finally:
sys.argv[:] = saved
@contextlib.contextmanager
def save_path():
saved = sys.path[:]
try:
yield saved
finally:
sys.path[:] = saved
@contextlib.contextmanager
def override_temp(replacement):
"""
Monkey-patch tempfile.tempdir with replacement, ensuring it exists
"""
if not os.path.isdir(replacement):
os.makedirs(replacement)
saved = tempfile.tempdir
tempfile.tempdir = replacement
try:
yield
finally:
tempfile.tempdir = saved
@contextlib.contextmanager
def pushd(target):
saved = os.getcwd()
os.chdir(target)
try:
yield saved
finally:
os.chdir(saved)
class UnpickleableException(Exception):
"""
An exception representing another Exception that could not be pickled.
"""
@staticmethod
def dump(type, exc):
"""
Always return a dumped (pickled) type and exc. If exc can't be pickled,
wrap it in UnpickleableException first.
"""
try:
return pickle.dumps(type), pickle.dumps(exc)
except Exception:
# get UnpickleableException inside the sandbox
from setuptools.sandbox import UnpickleableException as cls
return cls.dump(cls, cls(repr(exc)))
class ExceptionSaver:
"""
A Context Manager that will save an exception, serialized, and restore it
later.
"""
def __enter__(self):
return self
def __exit__(self, type, exc, tb):
if not exc:
return
# dump the exception
self._saved = UnpickleableException.dump(type, exc)
self._tb = tb
# suppress the exception
return True
def resume(self):
"restore and re-raise any exception"
if '_saved' not in vars(self):
return
type, exc = map(pickle.loads, self._saved)
six.reraise(type, exc, self._tb)
@contextlib.contextmanager
def save_modules():
"""
Context in which imported modules are saved.
Translates exceptions internal to the context into the equivalent exception
outside the context.
"""
saved = sys.modules.copy()
with ExceptionSaver() as saved_exc:
yield saved
sys.modules.update(saved)
# remove any modules imported since
del_modules = (
mod_name for mod_name in sys.modules
if mod_name not in saved
# exclude any encodings modules. See #285
and not mod_name.startswith('encodings.')
)
_clear_modules(del_modules)
saved_exc.resume()
def _clear_modules(module_names):
for mod_name in list(module_names):
del sys.modules[mod_name]
@contextlib.contextmanager
def save_pkg_resources_state():
saved = pkg_resources.__getstate__()
try:
yield saved
finally:
pkg_resources.__setstate__(saved)
@contextlib.contextmanager
def setup_context(setup_dir):
temp_dir = os.path.join(setup_dir, 'temp')
with save_pkg_resources_state():
with save_modules():
hide_setuptools()
with save_path():
with save_argv():
with override_temp(temp_dir):
with pushd(setup_dir):
# ensure setuptools commands are available
__import__('setuptools')
yield
def _needs_hiding(mod_name):
"""
>>> _needs_hiding('setuptools')
True
>>> _needs_hiding('pkg_resources')
True
>>> _needs_hiding('setuptools_plugin')
False
>>> _needs_hiding('setuptools.__init__')
True
>>> _needs_hiding('distutils')
True
>>> _needs_hiding('os')
False
>>> _needs_hiding('Cython')
True
"""
pattern = re.compile(r'(setuptools|pkg_resources|distutils|Cython)(\.|$)')
return bool(pattern.match(mod_name))
def hide_setuptools():
"""
Remove references to setuptools' modules from sys.modules to allow the
invocation to import the most appropriate setuptools. This technique is
necessary to avoid issues such as #315 where setuptools upgrading itself
would fail to find a function declared in the metadata.
"""
modules = filter(_needs_hiding, sys.modules)
_clear_modules(modules)
def run_setup(setup_script, args):
"""Run a distutils setup script, sandboxed in its directory"""
setup_dir = os.path.abspath(os.path.dirname(setup_script))
with setup_context(setup_dir):
try:
sys.argv[:] = [setup_script] + list(args)
sys.path.insert(0, setup_dir)
# reset to include setup dir, w/clean callback list
working_set.__init__()
working_set.callbacks.append(lambda dist: dist.activate())
# __file__ should be a byte string on Python 2 (#712)
dunder_file = (
setup_script
if isinstance(setup_script, str) else
setup_script.encode(sys.getfilesystemencoding())
)
with DirectorySandbox(setup_dir):
ns = dict(__file__=dunder_file, __name__='__main__')
_execfile(setup_script, ns)
except SystemExit as v:
if v.args and v.args[0]:
raise
# Normal exit, just return
class AbstractSandbox:
"""Wrap 'os' module and 'open()' builtin for virtualizing setup scripts"""
_active = False
def __init__(self):
self._attrs = [
name for name in dir(_os)
if not name.startswith('_') and hasattr(self, name)
]
def _copy(self, source):
for name in self._attrs:
setattr(os, name, getattr(source, name))
def __enter__(self):
self._copy(self)
if _file:
builtins.file = self._file
builtins.open = self._open
self._active = True
def __exit__(self, exc_type, exc_value, traceback):
self._active = False
if _file:
builtins.file = _file
builtins.open = _open
self._copy(_os)
def run(self, func):
"""Run 'func' under os sandboxing"""
with self:
return func()
def _mk_dual_path_wrapper(name):
original = getattr(_os, name)
def wrap(self, src, dst, *args, **kw):
if self._active:
src, dst = self._remap_pair(name, src, dst, *args, **kw)
return original(src, dst, *args, **kw)
return wrap
for name in ["rename", "link", "symlink"]:
if hasattr(_os, name):
locals()[name] = _mk_dual_path_wrapper(name)
def _mk_single_path_wrapper(name, original=None):
original = original or getattr(_os, name)
def wrap(self, path, *args, **kw):
if self._active:
path = self._remap_input(name, path, *args, **kw)
return original(path, *args, **kw)
return wrap
if _file:
_file = _mk_single_path_wrapper('file', _file)
_open = _mk_single_path_wrapper('open', _open)
for name in [
"stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir",
"remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat",
"startfile", "mkfifo", "mknod", "pathconf", "access"
]:
if hasattr(_os, name):
locals()[name] = _mk_single_path_wrapper(name)
def _mk_single_with_return(name):
original = getattr(_os, name)
def wrap(self, path, *args, **kw):
if self._active:
path = self._remap_input(name, path, *args, **kw)
return self._remap_output(name, original(path, *args, **kw))
return original(path, *args, **kw)
return wrap
for name in ['readlink', 'tempnam']:
if hasattr(_os, name):
locals()[name] = _mk_single_with_return(name)
def _mk_query(name):
original = getattr(_os, name)
def wrap(self, *args, **kw):
retval = original(*args, **kw)
if self._active:
return self._remap_output(name, retval)
return retval
return wrap
for name in ['getcwd', 'tmpnam']:
if hasattr(_os, name):
locals()[name] = _mk_query(name)
def _validate_path(self, path):
"""Called to remap or validate any path, whether input or output"""
return path
def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
return self._validate_path(path)
def _remap_output(self, operation, path):
"""Called for path outputs"""
return self._validate_path(path)
def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
return (
self._remap_input(operation + '-from', src, *args, **kw),
self._remap_input(operation + '-to', dst, *args, **kw)
)
if hasattr(os, 'devnull'):
_EXCEPTIONS = [os.devnull,]
else:
_EXCEPTIONS = []
class DirectorySandbox(AbstractSandbox):
"""Restrict operations to a single subdirectory - pseudo-chroot"""
write_ops = dict.fromkeys([
"open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir",
"utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam",
])
_exception_patterns = [
# Allow lib2to3 to attempt to save a pickled grammar object (#121)
r'.*lib2to3.*\.pickle$',
]
"exempt writing to paths that match the pattern"
def __init__(self, sandbox, exceptions=_EXCEPTIONS):
self._sandbox = os.path.normcase(os.path.realpath(sandbox))
self._prefix = os.path.join(self._sandbox, '')
self._exceptions = [
os.path.normcase(os.path.realpath(path))
for path in exceptions
]
AbstractSandbox.__init__(self)
def _violation(self, operation, *args, **kw):
from setuptools.sandbox import SandboxViolation
raise SandboxViolation(operation, args, kw)
if _file:
def _file(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("file", path, mode, *args, **kw)
return _file(path, mode, *args, **kw)
def _open(self, path, mode='r', *args, **kw):
if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path):
self._violation("open", path, mode, *args, **kw)
return _open(path, mode, *args, **kw)
def tmpnam(self):
self._violation("tmpnam")
def _ok(self, path):
active = self._active
try:
self._active = False
realpath = os.path.normcase(os.path.realpath(path))
return (
self._exempted(realpath)
or realpath == self._sandbox
or realpath.startswith(self._prefix)
)
finally:
self._active = active
def _exempted(self, filepath):
start_matches = (
filepath.startswith(exception)
for exception in self._exceptions
)
pattern_matches = (
re.match(pattern, filepath)
for pattern in self._exception_patterns
)
candidates = itertools.chain(start_matches, pattern_matches)
return any(candidates)
def _remap_input(self, operation, path, *args, **kw):
"""Called for path inputs"""
if operation in self.write_ops and not self._ok(path):
self._violation(operation, os.path.realpath(path), *args, **kw)
return path
def _remap_pair(self, operation, src, dst, *args, **kw):
"""Called for path pairs like rename, link, and symlink operations"""
if not self._ok(src) or not self._ok(dst):
self._violation(operation, src, dst, *args, **kw)
return (src, dst)
def open(self, file, flags, mode=0o777, *args, **kw):
"""Called for low-level os.open()"""
if flags & WRITE_FLAGS and not self._ok(file):
self._violation("os.open", file, flags, mode, *args, **kw)
return _os.open(file, flags, mode, *args, **kw)
WRITE_FLAGS = functools.reduce(
operator.or_, [getattr(_os, a, 0) for a in
"O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()]
)
class SandboxViolation(DistutilsError):
"""A setup script attempted to modify the filesystem outside the sandbox"""
tmpl = textwrap.dedent("""
SandboxViolation: {cmd}{args!r} {kwargs}
The package setup script has attempted to modify files on your system
that are not within the EasyInstall build area, and has been aborted.
This package cannot be safely installed by EasyInstall, and may not
support alternate installation locations even if you run its setup
script by hand. Please inform the package's author and the EasyInstall
maintainers to find out if a fix or workaround is available.
""").lstrip()
def __str__(self):
cmd, args, kwargs = self.args
return self.tmpl.format(**locals())
| mit |
mbachry/exxo | exxo/venv.py | 1 | 2558 | ACTIVATE_SCRIPT = """# This file must be used with "source bin/activate" *from bash*
# you cannot run it directly
deactivate () {
# reset old environment variables
if [ -n "$_OLD_VIRTUAL_PATH" ] ; then
PATH="$_OLD_VIRTUAL_PATH"
export PATH
unset _OLD_VIRTUAL_PATH
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "$BASH" -o -n "$ZSH_VERSION" ] ; then
hash -r
fi
PYTHONPATH="$_OLD_PYTHONPATH"
export PYTHONPATH
unset _OLD_PYTHONPATH
if [ -n "$_OLD_VIRTUAL_PS1" ] ; then
PS1="$_OLD_VIRTUAL_PS1"
export PS1
unset _OLD_VIRTUAL_PS1
fi
unset VIRTUAL_ENV
if [ ! "$1" = "nondestructive" ] ; then
# Self destruct!
unset -f deactivate
fi
}
# unset irrelavent variables
deactivate nondestructive
VIRTUAL_ENV="{{ venv_path }}"
export VIRTUAL_ENV
VIRTUAL_ENV_PYRUN_VERSION="{{ pyrun_version }}"
export VIRTUAL_ENV_PYRUN_VERSION
_OLD_PYTHONPATH="$PYTHONPATH"
PYTHONPATH="$VIRTUAL_ENV/pip:$PYTHONPATH"
export PYTHONPATH
_OLD_VIRTUAL_PATH="$PATH"
PATH="$VIRTUAL_ENV/bin:$PATH"
export PATH
if [ -z "$VIRTUAL_ENV_DISABLE_PROMPT" ] ; then
_OLD_VIRTUAL_PS1="$PS1"
if [ "x({{ venv_name }}) " != x ] ; then
PS1="({{ venv_name }}) $PS1"
else
if [ "`basename \"$VIRTUAL_ENV\"`" = "__" ] ; then
# special case for Aspen magic directories
# see http://www.zetadev.com/software/aspen/
PS1="[`basename \`dirname \"$VIRTUAL_ENV\"\``] $PS1"
else
PS1="(`basename \"$VIRTUAL_ENV\"`)$PS1"
fi
fi
export PS1
fi
# This should detect bash and zsh, which have a hash command that must
# be called to get it to forget past commands. Without forgetting
# past commands the $PATH changes we made may not be respected
if [ -n "$BASH" -o -n "$ZSH_VERSION" ] ; then
hash -r
fi
"""
PIP_SCRIPT = """#!/usr/bin/env python
import os
import sys
import pkg_resources
import pip
import pip._vendor.pkg_resources
eggdir = os.path.join(os.environ['VIRTUAL_ENV'], 'pip')
try:
pkg_resources.working_set.entries.remove(eggdir)
except ValueError:
pass
try:
pip._vendor.pkg_resources.working_set.entries.remove(eggdir)
except ValueError:
pass
for p in ('setuptools', 'pip'):
pkg_resources.working_set.by_key.pop(p, None)
pip._vendor.pkg_resources.working_set.by_key.pop(p, None)
sys.exit(pip.main())
"""
| isc |
TNT-Samuel/Coding-Projects | DNS Server/Source - Copy/Lib/encodings/cp1255.py | 272 | 12466 | """ Python Character Mapping Codec cp1255 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1255.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1255',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\u0192' # 0x83 -> LATIN SMALL LETTER F WITH HOOK
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\u02c6' # 0x88 -> MODIFIER LETTER CIRCUMFLEX ACCENT
'\u2030' # 0x89 -> PER MILLE SIGN
'\ufffe' # 0x8A -> UNDEFINED
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\ufffe' # 0x8C -> UNDEFINED
'\ufffe' # 0x8D -> UNDEFINED
'\ufffe' # 0x8E -> UNDEFINED
'\ufffe' # 0x8F -> UNDEFINED
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\u02dc' # 0x98 -> SMALL TILDE
'\u2122' # 0x99 -> TRADE MARK SIGN
'\ufffe' # 0x9A -> UNDEFINED
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\ufffe' # 0x9C -> UNDEFINED
'\ufffe' # 0x9D -> UNDEFINED
'\ufffe' # 0x9E -> UNDEFINED
'\ufffe' # 0x9F -> UNDEFINED
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\xa1' # 0xA1 -> INVERTED EXCLAMATION MARK
'\xa2' # 0xA2 -> CENT SIGN
'\xa3' # 0xA3 -> POUND SIGN
'\u20aa' # 0xA4 -> NEW SHEQEL SIGN
'\xa5' # 0xA5 -> YEN SIGN
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\xd7' # 0xAA -> MULTIPLICATION SIGN
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\xaf' # 0xAF -> MACRON
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\xb2' # 0xB2 -> SUPERSCRIPT TWO
'\xb3' # 0xB3 -> SUPERSCRIPT THREE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\xb9' # 0xB9 -> SUPERSCRIPT ONE
'\xf7' # 0xBA -> DIVISION SIGN
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbc' # 0xBC -> VULGAR FRACTION ONE QUARTER
'\xbd' # 0xBD -> VULGAR FRACTION ONE HALF
'\xbe' # 0xBE -> VULGAR FRACTION THREE QUARTERS
'\xbf' # 0xBF -> INVERTED QUESTION MARK
'\u05b0' # 0xC0 -> HEBREW POINT SHEVA
'\u05b1' # 0xC1 -> HEBREW POINT HATAF SEGOL
'\u05b2' # 0xC2 -> HEBREW POINT HATAF PATAH
'\u05b3' # 0xC3 -> HEBREW POINT HATAF QAMATS
'\u05b4' # 0xC4 -> HEBREW POINT HIRIQ
'\u05b5' # 0xC5 -> HEBREW POINT TSERE
'\u05b6' # 0xC6 -> HEBREW POINT SEGOL
'\u05b7' # 0xC7 -> HEBREW POINT PATAH
'\u05b8' # 0xC8 -> HEBREW POINT QAMATS
'\u05b9' # 0xC9 -> HEBREW POINT HOLAM
'\ufffe' # 0xCA -> UNDEFINED
'\u05bb' # 0xCB -> HEBREW POINT QUBUTS
'\u05bc' # 0xCC -> HEBREW POINT DAGESH OR MAPIQ
'\u05bd' # 0xCD -> HEBREW POINT METEG
'\u05be' # 0xCE -> HEBREW PUNCTUATION MAQAF
'\u05bf' # 0xCF -> HEBREW POINT RAFE
'\u05c0' # 0xD0 -> HEBREW PUNCTUATION PASEQ
'\u05c1' # 0xD1 -> HEBREW POINT SHIN DOT
'\u05c2' # 0xD2 -> HEBREW POINT SIN DOT
'\u05c3' # 0xD3 -> HEBREW PUNCTUATION SOF PASUQ
'\u05f0' # 0xD4 -> HEBREW LIGATURE YIDDISH DOUBLE VAV
'\u05f1' # 0xD5 -> HEBREW LIGATURE YIDDISH VAV YOD
'\u05f2' # 0xD6 -> HEBREW LIGATURE YIDDISH DOUBLE YOD
'\u05f3' # 0xD7 -> HEBREW PUNCTUATION GERESH
'\u05f4' # 0xD8 -> HEBREW PUNCTUATION GERSHAYIM
'\ufffe' # 0xD9 -> UNDEFINED
'\ufffe' # 0xDA -> UNDEFINED
'\ufffe' # 0xDB -> UNDEFINED
'\ufffe' # 0xDC -> UNDEFINED
'\ufffe' # 0xDD -> UNDEFINED
'\ufffe' # 0xDE -> UNDEFINED
'\ufffe' # 0xDF -> UNDEFINED
'\u05d0' # 0xE0 -> HEBREW LETTER ALEF
'\u05d1' # 0xE1 -> HEBREW LETTER BET
'\u05d2' # 0xE2 -> HEBREW LETTER GIMEL
'\u05d3' # 0xE3 -> HEBREW LETTER DALET
'\u05d4' # 0xE4 -> HEBREW LETTER HE
'\u05d5' # 0xE5 -> HEBREW LETTER VAV
'\u05d6' # 0xE6 -> HEBREW LETTER ZAYIN
'\u05d7' # 0xE7 -> HEBREW LETTER HET
'\u05d8' # 0xE8 -> HEBREW LETTER TET
'\u05d9' # 0xE9 -> HEBREW LETTER YOD
'\u05da' # 0xEA -> HEBREW LETTER FINAL KAF
'\u05db' # 0xEB -> HEBREW LETTER KAF
'\u05dc' # 0xEC -> HEBREW LETTER LAMED
'\u05dd' # 0xED -> HEBREW LETTER FINAL MEM
'\u05de' # 0xEE -> HEBREW LETTER MEM
'\u05df' # 0xEF -> HEBREW LETTER FINAL NUN
'\u05e0' # 0xF0 -> HEBREW LETTER NUN
'\u05e1' # 0xF1 -> HEBREW LETTER SAMEKH
'\u05e2' # 0xF2 -> HEBREW LETTER AYIN
'\u05e3' # 0xF3 -> HEBREW LETTER FINAL PE
'\u05e4' # 0xF4 -> HEBREW LETTER PE
'\u05e5' # 0xF5 -> HEBREW LETTER FINAL TSADI
'\u05e6' # 0xF6 -> HEBREW LETTER TSADI
'\u05e7' # 0xF7 -> HEBREW LETTER QOF
'\u05e8' # 0xF8 -> HEBREW LETTER RESH
'\u05e9' # 0xF9 -> HEBREW LETTER SHIN
'\u05ea' # 0xFA -> HEBREW LETTER TAV
'\ufffe' # 0xFB -> UNDEFINED
'\ufffe' # 0xFC -> UNDEFINED
'\u200e' # 0xFD -> LEFT-TO-RIGHT MARK
'\u200f' # 0xFE -> RIGHT-TO-LEFT MARK
'\ufffe' # 0xFF -> UNDEFINED
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
mdworks2016/work_development | Python/20_Third_Certification/venv/lib/python3.7/site-packages/django/utils/translation/__init__.py | 5 | 10790 | """
Internationalization support.
"""
import re
import warnings
from contextlib import ContextDecorator
from decimal import ROUND_UP, Decimal
from django.utils.autoreload import autoreload_started, file_changed
from django.utils.deprecation import RemovedInDjango40Warning
from django.utils.functional import lazy
__all__ = [
'activate', 'deactivate', 'override', 'deactivate_all',
'get_language', 'get_language_from_request',
'get_language_info', 'get_language_bidi',
'check_for_language', 'to_language', 'to_locale', 'templatize',
'gettext', 'gettext_lazy', 'gettext_noop',
'ugettext', 'ugettext_lazy', 'ugettext_noop',
'ngettext', 'ngettext_lazy',
'ungettext', 'ungettext_lazy',
'pgettext', 'pgettext_lazy',
'npgettext', 'npgettext_lazy',
'LANGUAGE_SESSION_KEY',
]
LANGUAGE_SESSION_KEY = '_language'
class TranslatorCommentWarning(SyntaxWarning):
pass
# Here be dragons, so a short explanation of the logic won't hurt:
# We are trying to solve two problems: (1) access settings, in particular
# settings.USE_I18N, as late as possible, so that modules can be imported
# without having to first configure Django, and (2) if some other code creates
# a reference to one of these functions, don't break that reference when we
# replace the functions with their real counterparts (once we do access the
# settings).
class Trans:
"""
The purpose of this class is to store the actual translation function upon
receiving the first call to that function. After this is done, changes to
USE_I18N will have no effect to which function is served upon request. If
your tests rely on changing USE_I18N, you can delete all the functions
from _trans.__dict__.
Note that storing the function with setattr will have a noticeable
performance effect, as access to the function goes the normal path,
instead of using __getattr__.
"""
def __getattr__(self, real_name):
from django.conf import settings
if settings.USE_I18N:
from django.utils.translation import trans_real as trans
from django.utils.translation.reloader import watch_for_translation_changes, translation_file_changed
autoreload_started.connect(watch_for_translation_changes, dispatch_uid='translation_file_changed')
file_changed.connect(translation_file_changed, dispatch_uid='translation_file_changed')
else:
from django.utils.translation import trans_null as trans
setattr(self, real_name, getattr(trans, real_name))
return getattr(trans, real_name)
_trans = Trans()
# The Trans class is no more needed, so remove it from the namespace.
del Trans
def gettext_noop(message):
return _trans.gettext_noop(message)
def ugettext_noop(message):
"""
A legacy compatibility wrapper for Unicode handling on Python 2.
Alias of gettext_noop() since Django 2.0.
"""
warnings.warn(
'django.utils.translation.ugettext_noop() is deprecated in favor of '
'django.utils.translation.gettext_noop().',
RemovedInDjango40Warning, stacklevel=2,
)
return gettext_noop(message)
def gettext(message):
return _trans.gettext(message)
def ugettext(message):
"""
A legacy compatibility wrapper for Unicode handling on Python 2.
Alias of gettext() since Django 2.0.
"""
warnings.warn(
'django.utils.translation.ugettext() is deprecated in favor of '
'django.utils.translation.gettext().',
RemovedInDjango40Warning, stacklevel=2,
)
return gettext(message)
def ngettext(singular, plural, number):
return _trans.ngettext(singular, plural, number)
def ungettext(singular, plural, number):
"""
A legacy compatibility wrapper for Unicode handling on Python 2.
Alias of ngettext() since Django 2.0.
"""
warnings.warn(
'django.utils.translation.ungettext() is deprecated in favor of '
'django.utils.translation.ngettext().',
RemovedInDjango40Warning, stacklevel=2,
)
return ngettext(singular, plural, number)
def pgettext(context, message):
return _trans.pgettext(context, message)
def npgettext(context, singular, plural, number):
return _trans.npgettext(context, singular, plural, number)
gettext_lazy = lazy(gettext, str)
pgettext_lazy = lazy(pgettext, str)
def ugettext_lazy(message):
"""
A legacy compatibility wrapper for Unicode handling on Python 2. Has been
Alias of gettext_lazy since Django 2.0.
"""
warnings.warn(
'django.utils.translation.ugettext_lazy() is deprecated in favor of '
'django.utils.translation.gettext_lazy().',
RemovedInDjango40Warning, stacklevel=2,
)
return gettext_lazy(message)
def lazy_number(func, resultclass, number=None, **kwargs):
if isinstance(number, int):
kwargs['number'] = number
proxy = lazy(func, resultclass)(**kwargs)
else:
original_kwargs = kwargs.copy()
class NumberAwareString(resultclass):
def __bool__(self):
return bool(kwargs['singular'])
def _get_number_value(self, values):
try:
return values[number]
except KeyError:
raise KeyError(
"Your dictionary lacks key '%s\'. Please provide "
"it, because it is required to determine whether "
"string is singular or plural." % number
)
def _translate(self, number_value):
kwargs['number'] = number_value
return func(**kwargs)
def format(self, *args, **kwargs):
number_value = self._get_number_value(kwargs) if kwargs and number else args[0]
return self._translate(number_value).format(*args, **kwargs)
def __mod__(self, rhs):
if isinstance(rhs, dict) and number:
number_value = self._get_number_value(rhs)
else:
number_value = rhs
translated = self._translate(number_value)
try:
translated = translated % rhs
except TypeError:
# String doesn't contain a placeholder for the number.
pass
return translated
proxy = lazy(lambda **kwargs: NumberAwareString(), NumberAwareString)(**kwargs)
proxy.__reduce__ = lambda: (_lazy_number_unpickle, (func, resultclass, number, original_kwargs))
return proxy
def _lazy_number_unpickle(func, resultclass, number, kwargs):
return lazy_number(func, resultclass, number=number, **kwargs)
def ngettext_lazy(singular, plural, number=None):
return lazy_number(ngettext, str, singular=singular, plural=plural, number=number)
def ungettext_lazy(singular, plural, number=None):
"""
A legacy compatibility wrapper for Unicode handling on Python 2.
An alias of ungettext_lazy() since Django 2.0.
"""
warnings.warn(
'django.utils.translation.ungettext_lazy() is deprecated in favor of '
'django.utils.translation.ngettext_lazy().',
RemovedInDjango40Warning, stacklevel=2,
)
return ngettext_lazy(singular, plural, number)
def npgettext_lazy(context, singular, plural, number=None):
return lazy_number(npgettext, str, context=context, singular=singular, plural=plural, number=number)
def activate(language):
return _trans.activate(language)
def deactivate():
return _trans.deactivate()
class override(ContextDecorator):
def __init__(self, language, deactivate=False):
self.language = language
self.deactivate = deactivate
def __enter__(self):
self.old_language = get_language()
if self.language is not None:
activate(self.language)
else:
deactivate_all()
def __exit__(self, exc_type, exc_value, traceback):
if self.old_language is None:
deactivate_all()
elif self.deactivate:
deactivate()
else:
activate(self.old_language)
def get_language():
return _trans.get_language()
def get_language_bidi():
return _trans.get_language_bidi()
def check_for_language(lang_code):
return _trans.check_for_language(lang_code)
def to_language(locale):
"""Turn a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower() + '-' + locale[p + 1:].lower()
else:
return locale.lower()
def to_locale(language):
"""Turn a language name (en-us) into a locale name (en_US)."""
language, _, country = language.lower().partition('-')
if not country:
return language
# A language with > 2 characters after the dash only has its first
# character after the dash capitalized; e.g. sr-latn becomes sr_Latn.
# A language with 2 characters after the dash has both characters
# capitalized; e.g. en-us becomes en_US.
country, _, tail = country.partition('-')
country = country.title() if len(country) > 2 else country.upper()
if tail:
country += '-' + tail
return language + '_' + country
def get_language_from_request(request, check_path=False):
return _trans.get_language_from_request(request, check_path)
def get_language_from_path(path):
return _trans.get_language_from_path(path)
def get_supported_language_variant(lang_code, *, strict=False):
return _trans.get_supported_language_variant(lang_code, strict)
def templatize(src, **kwargs):
from .template import templatize
return templatize(src, **kwargs)
def deactivate_all():
return _trans.deactivate_all()
def get_language_info(lang_code):
from django.conf.locale import LANG_INFO
try:
lang_info = LANG_INFO[lang_code]
if 'fallback' in lang_info and 'name' not in lang_info:
info = get_language_info(lang_info['fallback'][0])
else:
info = lang_info
except KeyError:
if '-' not in lang_code:
raise KeyError("Unknown language code %s." % lang_code)
generic_lang_code = lang_code.split('-')[0]
try:
info = LANG_INFO[generic_lang_code]
except KeyError:
raise KeyError("Unknown language code %s and %s." % (lang_code, generic_lang_code))
if info:
info['name_translated'] = gettext_lazy(info['name'])
return info
trim_whitespace_re = re.compile(r'\s*\n\s*')
def trim_whitespace(s):
return trim_whitespace_re.sub(' ', s.strip())
def round_away_from_one(value):
return int(Decimal(value - 1).quantize(Decimal('0'), rounding=ROUND_UP)) + 1
| apache-2.0 |
trdean/grEME | gr-uhd/apps/hf_radio/ssbdemod.py | 58 | 4082 | # Copyright 2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
# This tries to push the hilbert transform for ssb demod back into the
# freq. xlating filter.
#
# The starting point for this was weaver_isb_am1_usrp3.py.
#
# The tap coefficients for freq_xlating_fir_filter_ccf were generated
# externally and are read from a file because I didn't want to learn how
# to make fir filters with arbitrary phase response using python numeric
# facilities.
#
# They were generated using Scilab which I am already familiar with.
# M. Revnell Jan 06
from gnuradio import gr
from gnuradio import blocks
from gnuradio import filter
class ssb_demod( gr.hier_block2 ):
def __init__( self, if_rate, af_rate ):
gr.hier_block2.__init__(self, "ssb_demod",
gr.io_signature(1,1,gr.sizeof_gr_complex),
gr.io_signature(1,1,gr.sizeof_float))
self.if_rate = int(if_rate)
self.af_rate = int(af_rate)
self.if_decim = int(if_rate / af_rate)
self.sideband = 1
self.xlate_taps = ([complex(v) for v in file('ssb_taps').readlines()])
self.audio_taps = filter.firdes.low_pass(
1.0,
self.af_rate,
3e3,
600,
filter.firdes.WIN_HAMMING )
self.xlate = filter.freq_xlating_fir_filter_ccc(
self.if_decim,
self.xlate_taps,
0,
self.if_rate )
self.split = blocks.complex_to_float()
self.lpf = filter.fir_filter_fff(
1, self.audio_taps )
self.sum = blocks.add_ff( )
self.am_sel = blocks.multiply_const_ff( 0 )
self.sb_sel = blocks.multiply_const_ff( 1 )
self.mixer = blocks.add_ff()
self.am_det = blocks.complex_to_mag()
self.connect(self, self.xlate)
self.connect(self.xlate, self.split)
self.connect((self.split, 0), (self.sum, 0))
self.connect((self.split, 1), (self.sum, 1))
self.connect(self.sum, self.sb_sel)
self.connect(self.xlate, self.am_det)
self.connect(self.sb_sel, (self.mixer, 0))
self.connect(self.am_det, self.am_sel)
self.connect(self.am_sel, (self.mixer, 1))
self.connect(self.mixer, self.lpf)
self.connect(self.lpf, self)
def upper_sb( self ):
self.xlate.set_taps([v.conjugate() for v in self.xlate_taps])
self.sb_sel.set_k( 1.0 )
self.am_sel.set_k( 0.0 )
def lower_sb( self ):
self.xlate.set_taps(self.xlate_taps)
self.sb_sel.set_k( 1.0 )
self.am_sel.set_k( 0.0 )
def set_am( self ):
taps = filter.firdes.low_pass( 1.0,
self.if_rate,
5e3,
2e3,
filter.firdes.WIN_HAMMING )
self.xlate.set_taps( taps )
self.sb_sel.set_k( 0.0 )
self.am_sel.set_k( 1.0 )
def set_bw( self, bw ):
self.audio_taps = filter.firdes.low_pass(
1.0,
self.af_rate,
bw,
600,
filter.firdes.WIN_HAMMING )
self.lpf.set_taps( self.audio_taps )
def tune( self, freq ):
self.xlate.set_center_freq( freq )
| gpl-3.0 |
miipl-naveen/optibizz | addons/account/project/report/inverted_analytic_balance.py | 358 | 5760 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv
from openerp.report import report_sxw
class account_inverted_analytic_balance(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(account_inverted_analytic_balance, self).__init__(cr, uid, name, context=context)
self.localcontext.update( {
'time': time,
'lines_g': self._lines_g,
'lines_a': self._lines_a,
'sum_debit': self._sum_debit,
'sum_credit': self._sum_credit,
'sum_balance': self._sum_balance,
'sum_quantity': self._sum_quantity,
})
def _lines_g(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT aa.name AS name, aa.code AS code, "
"sum(aal.amount) AS balance, "
"sum(aal.unit_amount) AS quantity, aa.id AS id \
FROM account_analytic_line AS aal, account_account AS aa \
WHERE (aal.general_account_id=aa.id) "
"AND (aal.account_id IN %s) "
"AND (date>=%s) AND (date<=%s) AND aa.active \
GROUP BY aal.general_account_id, aa.name, aa.code, aal.code, aa.id "
"ORDER BY aal.code",
(tuple(ids), date1, date2))
res = self.cr.dictfetchall()
for r in res:
if r['balance'] > 0:
r['debit'] = r['balance']
r['credit'] = 0.0
elif r['balance'] < 0:
r['debit'] = 0.0
r['credit'] = -r['balance']
else:
r['debit'] = 0.0
r['credit'] = 0.0
return res
def _lines_a(self, accounts, general_account_id, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT sum(aal.amount) AS balance, "
"sum(aal.unit_amount) AS quantity, "
"aaa.code AS code, aaa.name AS name, account_id \
FROM account_analytic_line AS aal, "
"account_analytic_account AS aaa \
WHERE aal.account_id=aaa.id AND aal.account_id IN %s "
"AND aal.general_account_id=%s AND aal.date>=%s "
"AND aal.date<=%s \
GROUP BY aal.account_id, general_account_id, aaa.code, aaa.name "
"ORDER BY aal.account_id",
(tuple(ids), general_account_id, date1, date2))
res = self.cr.dictfetchall()
aaa_obj = self.pool.get('account.analytic.account')
res2 = aaa_obj.read(self.cr, self.uid, ids, ['complete_name'])
complete_name = {}
for r in res2:
complete_name[r['id']] = r['complete_name']
for r in res:
r['complete_name'] = complete_name[r['account_id']]
if r['balance'] > 0:
r['debit'] = r['balance']
r['credit'] = 0.0
elif r['balance'] < 0:
r['debit'] = 0.0
r['credit'] = -r['balance']
else:
r['debit'] = 0.0
r['credit'] = 0.0
return res
def _sum_debit(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT sum(amount) \
FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s AND amount>0", (tuple(ids),date1, date2,))
return self.cr.fetchone()[0] or 0.0
def _sum_credit(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT -sum(amount) \
FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s AND amount<0", (tuple(ids),date1, date2,))
return self.cr.fetchone()[0] or 0.0
def _sum_balance(self, accounts, date1, date2):
debit = self._sum_debit(accounts, date1, date2)
credit = self._sum_credit(accounts, date1, date2)
return (debit-credit)
def _sum_quantity(self, accounts, date1, date2):
ids = map(lambda x: x.id, accounts)
self.cr.execute("SELECT sum(unit_amount) \
FROM account_analytic_line \
WHERE account_id IN %s AND date>=%s AND date<=%s", (tuple(ids),date1, date2,))
return self.cr.fetchone()[0] or 0.0
class report_invertedanalyticbalance(osv.AbstractModel):
_name = 'report.account.report_invertedanalyticbalance'
_inherit = 'report.abstract_report'
_template = 'account.report_invertedanalyticbalance'
_wrapped_report_class = account_inverted_analytic_balance
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
lucasmachadorj/pypln.web | pypln/web/core/tests/views/test_document.py | 2 | 11044 | # -*- coding:utf-8 -*-
#
# Copyright 2012 NAMD-EMAP-FGV
#
# This file is part of PyPLN. You can get more information at: http://pypln.org/.
#
# PyPLN is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyPLN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyPLN. If not, see <http://www.gnu.org/licenses/>.
from mock import patch
from StringIO import StringIO
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test.client import encode_multipart, BOUNDARY, MULTIPART_CONTENT
from rest_framework.reverse import reverse as rest_framework_reverse
from pypln.web.core.models import Corpus, Document
from pypln.web.core.tests.utils import TestWithMongo
__all__ = ["DocumentListViewTest", "DocumentDetailViewTest"]
class DocumentListViewTest(TestWithMongo):
fixtures = ['users', 'corpora', 'documents']
def setUp(self):
self.user = User.objects.get(username="user")
self.fp = StringIO("Content")
self.fp.name = "document.txt"
def test_requires_login(self):
response = self.client.get(reverse('document-list'))
self.assertEqual(response.status_code, 403)
def test_only_lists_documents_that_belong_to_the_authenticated_user(self):
self.client.login(username="user", password="user")
response = self.client.get(reverse('document-list'))
self.assertEqual(response.status_code, 200)
expected_data = Document.objects.filter(
owner=User.objects.get(username="user"))
object_list = response.renderer_context['view'].get_queryset()
self.assertEqual(list(expected_data), list(object_list))
@patch('pypln.web.core.views.create_pipeline_from_document')
def test_create_new_document(self, create_pipelines):
self.assertEqual(len(self.user.document_set.all()), 1)
self.client.login(username="user", password="user")
corpus = self.user.corpus_set.all()[0]
data = {"corpus": rest_framework_reverse('corpus-detail',
kwargs={'pk': corpus.id}), "blob": self.fp}
response = self.client.post(reverse('document-list'), data)
self.assertEqual(response.status_code, 201)
self.assertEqual(len(self.user.document_set.all()), 2)
@patch('pypln.web.core.views.create_pipeline_from_document')
def test_cant_create_document_for_another_user(self, create_pipeline):
self.client.login(username="user", password="user")
corpus = self.user.corpus_set.all()[0]
corpus_url = rest_framework_reverse('corpus-detail', kwargs={'pk': corpus.id})
data = {"corpus": corpus_url, "blob": self.fp, "owner": 1}
response = self.client.post(reverse('document-list'), data)
self.assertEqual(response.status_code, 201)
document = self.user.document_set.all()[1]
self.assertEqual(document.owner, self.user)
def test_cant_create_document_for_inexistent_corpus(self):
self.client.login(username="user", password="user")
corpus_url = rest_framework_reverse('corpus-detail', kwargs={'pk': 9999})
data = {"corpus": corpus_url, "blob": self.fp}
response = self.client.post(reverse('document-list'), data)
self.assertEqual(response.status_code, 400)
@patch('pypln.web.core.views.create_pipeline_from_document')
def test_cant_create_document_in_another_users_corpus(self, create_pipelines):
self.client.login(username="user", password="user")
# We'll try to associate this document to a corpus that belongs to
# 'admin'
corpus = Corpus.objects.filter(owner__username="admin")[0]
corpus_url = rest_framework_reverse('corpus-detail', kwargs={'pk': corpus.id})
data = {"corpus": corpus_url, "blob": self.fp}
response = self.client.post(reverse('document-list'), data)
self.assertEqual(response.status_code, 400)
@patch('pypln.web.backend_adapter.pipelines.create_pipeline')
def test_creating_a_document_should_create_a_pipeline_for_it(self, create_pipeline):
self.assertEqual(len(self.user.document_set.all()), 1)
self.client.login(username="user", password="user")
corpus = self.user.corpus_set.all()[0]
data = {"corpus": rest_framework_reverse('corpus-detail',
kwargs={'pk': corpus.id}), "blob": self.fp}
response = self.client.post(reverse('document-list'), data)
self.assertEqual(response.status_code, 201)
self.assertTrue(create_pipeline.called)
doc_id = int(response.data['url'].split('/')[-2])
document = Document.objects.get(pk=doc_id)
pipeline_data = {"_id": str(document.blob.file._id), "id": document.id}
create_pipeline.assert_called_with(pipeline_data)
class DocumentDetailViewTest(TestWithMongo):
fixtures = ['users', 'corpora', 'documents']
def setUp(self):
self.user = User.objects.get(username="user")
self.fp = StringIO("Content")
self.fp.name = "document.txt"
def _get_corpus_url(self, corpus_id):
return rest_framework_reverse('corpus-detail',
kwargs={'pk': corpus_id})
def test_requires_login(self):
document = Document.objects.filter(owner__username='user')[0]
response = self.client.get(reverse('document-detail',
kwargs={'pk': document.id}))
self.assertEqual(response.status_code, 403)
def test_shows_document_correctly(self):
self.client.login(username="user", password="user")
document = Document.objects.filter(owner__username="user")[0]
response = self.client.get(reverse('document-detail',
kwargs={'pk': document.id}))
self.assertEqual(response.status_code, 200)
self.assertEqual(response.renderer_context['view'].get_object(), document)
def test_returns_404_for_inexistent_document(self):
self.client.login(username="user", password="user")
response = self.client.get(reverse('document-detail',
kwargs={'pk': 9999}))
self.assertEqual(response.status_code, 404)
def test_returns_404_if_user_is_not_the_owner_of_the_document(self):
self.client.login(username="user", password="user")
document = Document.objects.filter(owner__username="admin")[0]
response = self.client.get(reverse('document-detail',
kwargs={'pk': document.id}))
self.assertEqual(response.status_code, 404)
@patch('pypln.web.backend_adapter.pipelines.create_pipeline')
def test_edit_document(self, create_pipeline):
self.client.login(username="user", password="user")
document = self.user.document_set.all()[0]
new_corpus = Corpus.objects.create(name="New corpus",
description="", owner=self.user)
data = encode_multipart(BOUNDARY, {"corpus": self._get_corpus_url(
new_corpus.id), "blob": self.fp})
response = self.client.put(reverse('document-detail',
kwargs={'pk': document.id}), data, content_type=MULTIPART_CONTENT)
self.assertEqual(response.status_code, 200)
updated_document = Document.objects.get(id=document.id)
self.assertEqual(updated_document.corpus, new_corpus)
def test_cant_edit_other_peoples_documents(self):
self.client.login(username="user", password="user")
document = Document.objects.filter(owner__username="admin")[0]
data = encode_multipart(BOUNDARY,
{"corpus": self._get_corpus_url(document.corpus.id),
"blob": self.fp})
response = self.client.put(reverse('document-detail',
kwargs={'pk': document.id}), data, content_type=MULTIPART_CONTENT)
# Since this document belongs to another user, it's getting
# filtered out of the queryset in `view.get_queryset()` and it
# appears not to exist.
self.assertEqual(response.status_code, 404)
@patch('pypln.web.backend_adapter.pipelines.create_pipeline')
def test_cant_change_the_owner_of_a_document(self, create_pipeline):
self.client.login(username="user", password="user")
document = self.user.document_set.all()[0]
# We try to set 'admin' as the owner (id=1)
data = encode_multipart(BOUNDARY, {"blob": self.fp,
"corpus": self._get_corpus_url(document.corpus.id), "owner": 1})
response = self.client.put(reverse('document-detail',
kwargs={'pk': document.id}), data, content_type=MULTIPART_CONTENT)
self.assertEqual(response.status_code, 200)
# but the view sets the request user as the owner anyway
self.assertEqual(response.data["owner"], "user")
def test_delete_a_document(self):
self.client.login(username="user", password="user")
self.assertEqual(len(self.user.document_set.all()), 1)
document = self.user.document_set.all()[0]
response = self.client.delete(reverse('document-detail',
kwargs={'pk': document.id}))
self.assertEqual(response.status_code, 204)
self.assertEqual(len(self.user.document_set.all()), 0)
def test_cant_delete_other_peoples_documents(self):
self.client.login(username="user", password="user")
self.assertEqual(len(Corpus.objects.filter(owner__username="admin")), 1)
document = Document.objects.filter(owner__username="admin")[0]
response = self.client.delete(reverse('document-detail',
kwargs={'pk': document.id}))
self.assertEqual(response.status_code, 404)
self.assertEqual(len(Corpus.objects.filter(owner__username="admin")), 1)
@patch('pypln.web.backend_adapter.pipelines.create_pipeline')
def test_updating_a_document_should_create_a_pipeline_for_it(self, create_pipeline):
self.client.login(username="user", password="user")
document = self.user.document_set.all()[0]
corpus = self.user.corpus_set.all()[0]
# We try to set 'admin' as the owner (id=1)
data = encode_multipart(BOUNDARY, {"blob": self.fp,
"corpus": self._get_corpus_url(document.corpus.id), "owner": 2})
response = self.client.put(reverse('document-detail',
kwargs={'pk': document.id}), data, content_type=MULTIPART_CONTENT)
self.assertEqual(response.status_code, 200)
self.assertTrue(create_pipeline.called)
document = response.renderer_context['view'].get_object()
pipeline_data = {"_id": str(document.blob.file._id), "id": document.id}
create_pipeline.assert_called_with(pipeline_data)
| gpl-3.0 |
jotes/ansible | v2/ansible/plugins/connections/winrm.py | 29 | 11717 | # (c) 2014, Chris Church <[email protected]>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
import base64
import hashlib
import imp
import os
import re
import shlex
import traceback
import urlparse
from ansible import errors
from ansible import utils
from ansible.callbacks import vvv, vvvv, verbose
from ansible.runner.shell_plugins import powershell
try:
from winrm import Response
from winrm.exceptions import WinRMTransportError
from winrm.protocol import Protocol
except ImportError:
raise errors.AnsibleError("winrm is not installed")
_winrm_cache = {
# 'user:pwhash@host:port': <protocol instance>
}
def vvvvv(msg, host=None):
verbose(msg, host=host, caplevel=4)
class Connection(object):
'''WinRM connections over HTTP/HTTPS.'''
def __init__(self, runner, host, port, user, password, *args, **kwargs):
self.runner = runner
self.host = host
self.port = port
self.user = user
self.password = password
self.has_pipelining = False
self.default_shell = 'powershell'
self.default_suffixes = ['.ps1', '']
self.protocol = None
self.shell_id = None
self.delegate = None
def _winrm_connect(self):
'''
Establish a WinRM connection over HTTP/HTTPS.
'''
port = self.port or 5986
vvv("ESTABLISH WINRM CONNECTION FOR USER: %s on PORT %s TO %s" % \
(self.user, port, self.host), host=self.host)
netloc = '%s:%d' % (self.host, port)
cache_key = '%s:%s@%s:%d' % (self.user, hashlib.md5(self.password).hexdigest(), self.host, port)
if cache_key in _winrm_cache:
vvvv('WINRM REUSE EXISTING CONNECTION: %s' % cache_key, host=self.host)
return _winrm_cache[cache_key]
transport_schemes = [('plaintext', 'https'), ('plaintext', 'http')] # FIXME: ssl/kerberos
if port == 5985:
transport_schemes = reversed(transport_schemes)
exc = None
for transport, scheme in transport_schemes:
endpoint = urlparse.urlunsplit((scheme, netloc, '/wsman', '', ''))
vvvv('WINRM CONNECT: transport=%s endpoint=%s' % (transport, endpoint),
host=self.host)
protocol = Protocol(endpoint, transport=transport,
username=self.user, password=self.password)
try:
protocol.send_message('')
_winrm_cache[cache_key] = protocol
return protocol
except WinRMTransportError, exc:
err_msg = str(exc)
if re.search(r'Operation\s+?timed\s+?out', err_msg, re.I):
raise errors.AnsibleError("the connection attempt timed out")
m = re.search(r'Code\s+?(\d{3})', err_msg)
if m:
code = int(m.groups()[0])
if code == 401:
raise errors.AnsibleError("the username/password specified for this server was incorrect")
elif code == 411:
_winrm_cache[cache_key] = protocol
return protocol
vvvv('WINRM CONNECTION ERROR: %s' % err_msg, host=self.host)
continue
if exc:
raise errors.AnsibleError(str(exc))
def _winrm_exec(self, command, args=(), from_exec=False):
if from_exec:
vvvv("WINRM EXEC %r %r" % (command, args), host=self.host)
else:
vvvvv("WINRM EXEC %r %r" % (command, args), host=self.host)
if not self.protocol:
self.protocol = self._winrm_connect()
if not self.shell_id:
self.shell_id = self.protocol.open_shell()
command_id = None
try:
command_id = self.protocol.run_command(self.shell_id, command, args)
response = Response(self.protocol.get_command_output(self.shell_id, command_id))
if from_exec:
vvvv('WINRM RESULT %r' % response, host=self.host)
else:
vvvvv('WINRM RESULT %r' % response, host=self.host)
vvvvv('WINRM STDOUT %s' % response.std_out, host=self.host)
vvvvv('WINRM STDERR %s' % response.std_err, host=self.host)
return response
finally:
if command_id:
self.protocol.cleanup_command(self.shell_id, command_id)
def connect(self):
if not self.protocol:
self.protocol = self._winrm_connect()
return self
def exec_command(self, cmd, tmp_path, sudo_user=None, sudoable=False, executable=None, in_data=None, su=None, su_user=None):
cmd = cmd.encode('utf-8')
cmd_parts = shlex.split(cmd, posix=False)
if '-EncodedCommand' in cmd_parts:
encoded_cmd = cmd_parts[cmd_parts.index('-EncodedCommand') + 1]
decoded_cmd = base64.b64decode(encoded_cmd)
vvv("EXEC %s" % decoded_cmd, host=self.host)
else:
vvv("EXEC %s" % cmd, host=self.host)
# For script/raw support.
if cmd_parts and cmd_parts[0].lower().endswith('.ps1'):
script = powershell._build_file_cmd(cmd_parts)
cmd_parts = powershell._encode_script(script, as_list=True)
try:
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:], from_exec=True)
except Exception, e:
traceback.print_exc()
raise errors.AnsibleError("failed to exec cmd %s" % cmd)
return (result.status_code, '', result.std_out.encode('utf-8'), result.std_err.encode('utf-8'))
def put_file(self, in_path, out_path):
vvv("PUT %s TO %s" % (in_path, out_path), host=self.host)
if not os.path.exists(in_path):
raise errors.AnsibleFileNotFound("file or module does not exist: %s" % in_path)
with open(in_path) as in_file:
in_size = os.path.getsize(in_path)
script_template = '''
$s = [System.IO.File]::OpenWrite("%s");
[void]$s.Seek(%d, [System.IO.SeekOrigin]::Begin);
$b = [System.Convert]::FromBase64String("%s");
[void]$s.Write($b, 0, $b.length);
[void]$s.SetLength(%d);
[void]$s.Close();
'''
# Determine max size of data we can pass per command.
script = script_template % (powershell._escape(out_path), in_size, '', in_size)
cmd = powershell._encode_script(script)
# Encode script with no data, subtract its length from 8190 (max
# windows command length), divide by 2.67 (UTF16LE base64 command
# encoding), then by 1.35 again (data base64 encoding).
buffer_size = int(((8190 - len(cmd)) / 2.67) / 1.35)
for offset in xrange(0, in_size, buffer_size):
try:
out_data = in_file.read(buffer_size)
if offset == 0:
if out_data.lower().startswith('#!powershell') and not out_path.lower().endswith('.ps1'):
out_path = out_path + '.ps1'
b64_data = base64.b64encode(out_data)
script = script_template % (powershell._escape(out_path), offset, b64_data, in_size)
vvvv("WINRM PUT %s to %s (offset=%d size=%d)" % (in_path, out_path, offset, len(out_data)), host=self.host)
cmd_parts = powershell._encode_script(script, as_list=True)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
if result.status_code != 0:
raise IOError(result.std_err.encode('utf-8'))
except Exception:
traceback.print_exc()
raise errors.AnsibleError("failed to transfer file to %s" % out_path)
def fetch_file(self, in_path, out_path):
out_path = out_path.replace('\\', '/')
vvv("FETCH %s TO %s" % (in_path, out_path), host=self.host)
buffer_size = 2**20 # 1MB chunks
if not os.path.exists(os.path.dirname(out_path)):
os.makedirs(os.path.dirname(out_path))
out_file = None
try:
offset = 0
while True:
try:
script = '''
If (Test-Path -PathType Leaf "%(path)s")
{
$stream = [System.IO.File]::OpenRead("%(path)s");
$stream.Seek(%(offset)d, [System.IO.SeekOrigin]::Begin) | Out-Null;
$buffer = New-Object Byte[] %(buffer_size)d;
$bytesRead = $stream.Read($buffer, 0, %(buffer_size)d);
$bytes = $buffer[0..($bytesRead-1)];
[System.Convert]::ToBase64String($bytes);
$stream.Close() | Out-Null;
}
ElseIf (Test-Path -PathType Container "%(path)s")
{
Write-Host "[DIR]";
}
Else
{
Write-Error "%(path)s does not exist";
Exit 1;
}
''' % dict(buffer_size=buffer_size, path=powershell._escape(in_path), offset=offset)
vvvv("WINRM FETCH %s to %s (offset=%d)" % (in_path, out_path, offset), host=self.host)
cmd_parts = powershell._encode_script(script, as_list=True)
result = self._winrm_exec(cmd_parts[0], cmd_parts[1:])
if result.status_code != 0:
raise IOError(result.std_err.encode('utf-8'))
if result.std_out.strip() == '[DIR]':
data = None
else:
data = base64.b64decode(result.std_out.strip())
if data is None:
if not os.path.exists(out_path):
os.makedirs(out_path)
break
else:
if not out_file:
# If out_path is a directory and we're expecting a file, bail out now.
if os.path.isdir(out_path):
break
out_file = open(out_path, 'wb')
out_file.write(data)
if len(data) < buffer_size:
break
offset += len(data)
except Exception:
traceback.print_exc()
raise errors.AnsibleError("failed to transfer file to %s" % out_path)
finally:
if out_file:
out_file.close()
def close(self):
if self.protocol and self.shell_id:
self.protocol.close_shell(self.shell_id)
self.shell_id = None
| gpl-3.0 |
ahb0327/intellij-community | python/lib/Lib/readline.py | 82 | 5885 | from __future__ import with_statement
import os.path
import sys
from warnings import warn
import java.lang.reflect.Array
__all__ = ['add_history', 'clear_history', 'get_begidx', 'get_completer',
'get_completer_delims', 'get_current_history_length',
'get_endidx', 'get_history_item', 'get_history_length',
'get_line_buffer', 'insert_text', 'parse_and_bind',
'read_history_file', 'read_init_file', 'redisplay',
'remove_history_item', 'set_completer', 'set_completer_delims',
'set_history_length', 'set_pre_input_hook', 'set_startup_hook',
'write_history_file']
try:
_reader = sys._jy_interpreter.reader
except AttributeError:
raise ImportError("Cannot access JLineConsole")
_history_list = None
# The need for the following warnings should go away once we update
# JLine. Choosing ImportWarning as the closest warning to what is
# going on here, namely this is functionality not yet available on
# Jython.
class NotImplementedWarning(ImportWarning):
"""Not yet implemented by Jython"""
class SecurityWarning(ImportWarning):
"""Security manager prevents access to private field"""
def _setup_history():
# This is obviously not desirable, but avoids O(n) workarounds to
# modify the history (ipython uses the function
# remove_history_item to mutate the history relatively frequently)
global _history_list
history = _reader.history
try:
history_list_field = history.class.getDeclaredField("history")
history_list_field.setAccessible(True)
_history_list = history_list_field.get(history)
except:
pass
_setup_history()
def parse_and_bind(string):
if string == "tab: complete":
try:
keybindings_field = _reader.class.getDeclaredField("keybindings")
keybindings_field.setAccessible(True)
keybindings = keybindings_field.get(_reader)
COMPLETE = _reader.KEYMAP_NAMES.get('COMPLETE')
if java.lang.reflect.Array.getShort(keybindings, 9) != COMPLETE:
java.lang.reflect.Array.setShort(keybindings, 9, COMPLETE)
except:
warn("Cannot bind tab key to complete. You need to do this in a .jlinebindings.properties file instead", SecurityWarning, stacklevel=2)
else:
warn("Cannot bind key %s. You need to do this in a .jlinebindings.properties file instead" % (string,), NotImplementedWarning, stacklevel=2)
def get_line_buffer():
return str(_reader.cursorBuffer.buffer)
def insert_text(string):
_reader.putString(string)
def read_init_file(filename=None):
warn("read_init_file: %s" % (filename,), NotImplementedWarning, "module", 2)
def read_history_file(filename="~/.history"):
print "Reading history:", filename
expanded = os.path.expanduser(filename)
new_history = _reader.getHistory().getClass()()
# new_history.clear()
with open(expanded) as f:
for line in f:
new_history.addToHistory(line.rstrip())
_reader.history = new_history
_setup_history()
def write_history_file(filename="~/.history"):
expanded = os.path.expanduser(filename)
with open(expanded, 'w') as f:
for line in _reader.history.historyList:
f.write(line)
f.write("\n")
def clear_history():
_reader.history.clear()
def add_history(line):
_reader.addToHistory(line)
def get_history_length():
return _reader.history.maxSize
def set_history_length(length):
_reader.history.maxSize = length
def get_current_history_length():
return len(_reader.history.historyList)
def get_history_item(index):
return _reader.history.historyList[index]
def remove_history_item(pos):
if _history_list:
_history_list.remove(pos)
else:
warn("Cannot remove history item at position: %s" % (pos,), SecurityWarning, stacklevel=2)
def redisplay():
_reader.redrawLine()
def set_startup_hook(function=None):
sys._jy_interpreter.startupHook = function
def set_pre_input_hook(function=None):
warn("set_pre_input_hook %s" % (function,), NotImplementedWarning, stacklevel=2)
_completer_function = None
def set_completer(function=None):
"""set_completer([function]) -> None
Set or remove the completer function.
The function is called as function(text, state),
for state in 0, 1, 2, ..., until it returns a non-string.
It should return the next possible completion starting with 'text'."""
global _completer_function
_completer_function = function
def complete_handler(buffer, cursor, candidates):
start = _get_delimited(buffer, cursor)[0]
delimited = buffer[start:cursor]
for state in xrange(100): # TODO arbitrary, what's the number used by gnu readline?
completion = None
try:
completion = function(delimited, state)
except:
pass
if completion:
candidates.add(completion)
else:
break
return start
_reader.addCompletor(complete_handler)
def get_completer():
return _completer_function
def _get_delimited(buffer, cursor):
start = cursor
for i in xrange(cursor-1, -1, -1):
if buffer[i] in _completer_delims:
break
start = i
return start, cursor
def get_begidx():
return _get_delimited(str(_reader.cursorBuffer.buffer), _reader.cursorBuffer.cursor)[0]
def get_endidx():
return _get_delimited(str(_reader.cursorBuffer.buffer), _reader.cursorBuffer.cursor)[1]
def set_completer_delims(string):
global _completer_delims, _completer_delims_set
_completer_delims = string
_completer_delims_set = set(string)
def get_completer_delims():
return _completer_delims
set_completer_delims(' \t\n`~!@#$%^&*()-=+[{]}\\|;:\'",<>/?')
| apache-2.0 |
agrista/odoo-saas | openerp/report/render/rml2pdf/trml2pdf.py | 256 | 46679 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys
import copy
import reportlab
import re
from reportlab.pdfgen import canvas
from reportlab import platypus
import utils
import color
import os
import logging
from lxml import etree
import base64
from distutils.version import LooseVersion
from reportlab.platypus.doctemplate import ActionFlowable
from openerp.tools.safe_eval import safe_eval as eval
from reportlab.lib.units import inch,cm,mm
from openerp.tools.misc import file_open
from reportlab.pdfbase import pdfmetrics
from reportlab.lib.pagesizes import A4, letter
try:
from cStringIO import StringIO
_hush_pyflakes = [ StringIO ]
except ImportError:
from StringIO import StringIO
_logger = logging.getLogger(__name__)
encoding = 'utf-8'
def select_fontname(fontname, default_fontname):
if fontname not in pdfmetrics.getRegisteredFontNames()\
or fontname not in pdfmetrics.standardFonts:
# let reportlab attempt to find it
try:
pdfmetrics.getFont(fontname)
except Exception:
addition = ""
if " " in fontname:
addition = ". Your font contains spaces which is not valid in RML."
_logger.warning('Could not locate font %s, substituting default: %s%s',
fontname, default_fontname, addition)
fontname = default_fontname
return fontname
def _open_image(filename, path=None):
"""Attempt to open a binary file and return the descriptor
"""
if os.path.isfile(filename):
return open(filename, 'rb')
for p in (path or []):
if p and os.path.isabs(p):
fullpath = os.path.join(p, filename)
if os.path.isfile(fullpath):
return open(fullpath, 'rb')
try:
if p:
fullpath = os.path.join(p, filename)
else:
fullpath = filename
return file_open(fullpath)
except IOError:
pass
raise IOError("File %s cannot be found in image path" % filename)
class NumberedCanvas(canvas.Canvas):
def __init__(self, *args, **kwargs):
canvas.Canvas.__init__(self, *args, **kwargs)
self._saved_page_states = []
def showPage(self):
self._startPage()
def save(self):
"""add page info to each page (page x of y)"""
for state in self._saved_page_states:
self.__dict__.update(state)
self.draw_page_number()
canvas.Canvas.showPage(self)
canvas.Canvas.save(self)
def draw_page_number(self):
page_count = len(self._saved_page_states)
self.setFont("Helvetica", 8)
self.drawRightString((self._pagesize[0]-30), (self._pagesize[1]-40),
" %(this)i / %(total)i" % {
'this': self._pageNumber,
'total': page_count,
}
)
class PageCount(platypus.Flowable):
def __init__(self, story_count=0):
platypus.Flowable.__init__(self)
self.story_count = story_count
def draw(self):
self.canv.beginForm("pageCount%d" % self.story_count)
self.canv.setFont("Helvetica", utils.unit_get(str(8)))
self.canv.drawString(0, 0, str(self.canv.getPageNumber()))
self.canv.endForm()
class PageReset(platypus.Flowable):
def draw(self):
"""Flag to close current story page numbering and prepare for the next
should be executed after the rendering of the full story"""
self.canv._doPageReset = True
class _rml_styles(object,):
def __init__(self, nodes, localcontext):
self.localcontext = localcontext
self.styles = {}
self.styles_obj = {}
self.names = {}
self.table_styles = {}
self.default_style = reportlab.lib.styles.getSampleStyleSheet()
for node in nodes:
for style in node.findall('blockTableStyle'):
self.table_styles[style.get('id')] = self._table_style_get(style)
for style in node.findall('paraStyle'):
sname = style.get('name')
self.styles[sname] = self._para_style_update(style)
if self.default_style.has_key(sname):
for key, value in self.styles[sname].items():
setattr(self.default_style[sname], key, value)
else:
self.styles_obj[sname] = reportlab.lib.styles.ParagraphStyle(sname, self.default_style["Normal"], **self.styles[sname])
for variable in node.findall('initialize'):
for name in variable.findall('name'):
self.names[ name.get('id')] = name.get('value')
def _para_style_update(self, node):
data = {}
for attr in ['textColor', 'backColor', 'bulletColor', 'borderColor']:
if node.get(attr):
data[attr] = color.get(node.get(attr))
for attr in ['bulletFontName', 'fontName']:
if node.get(attr):
fontname= select_fontname(node.get(attr), None)
if fontname is not None:
data['fontName'] = fontname
for attr in ['bulletText']:
if node.get(attr):
data[attr] = node.get(attr)
for attr in ['fontSize', 'leftIndent', 'rightIndent', 'spaceBefore', 'spaceAfter',
'firstLineIndent', 'bulletIndent', 'bulletFontSize', 'leading',
'borderWidth','borderPadding','borderRadius']:
if node.get(attr):
data[attr] = utils.unit_get(node.get(attr))
if node.get('alignment'):
align = {
'right':reportlab.lib.enums.TA_RIGHT,
'center':reportlab.lib.enums.TA_CENTER,
'justify':reportlab.lib.enums.TA_JUSTIFY
}
data['alignment'] = align.get(node.get('alignment').lower(), reportlab.lib.enums.TA_LEFT)
data['splitLongWords'] = 0
return data
def _table_style_get(self, style_node):
styles = []
for node in style_node:
start = utils.tuple_int_get(node, 'start', (0,0) )
stop = utils.tuple_int_get(node, 'stop', (-1,-1) )
if node.tag=='blockValign':
styles.append(('VALIGN', start, stop, str(node.get('value'))))
elif node.tag=='blockFont':
styles.append(('FONT', start, stop, str(node.get('name'))))
elif node.tag=='blockTextColor':
styles.append(('TEXTCOLOR', start, stop, color.get(str(node.get('colorName')))))
elif node.tag=='blockLeading':
styles.append(('LEADING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockAlignment':
styles.append(('ALIGNMENT', start, stop, str(node.get('value'))))
elif node.tag=='blockSpan':
styles.append(('SPAN', start, stop))
elif node.tag=='blockLeftPadding':
styles.append(('LEFTPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockRightPadding':
styles.append(('RIGHTPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockTopPadding':
styles.append(('TOPPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockBottomPadding':
styles.append(('BOTTOMPADDING', start, stop, utils.unit_get(node.get('length'))))
elif node.tag=='blockBackground':
styles.append(('BACKGROUND', start, stop, color.get(node.get('colorName'))))
if node.get('size'):
styles.append(('FONTSIZE', start, stop, utils.unit_get(node.get('size'))))
elif node.tag=='lineStyle':
kind = node.get('kind')
kind_list = [ 'GRID', 'BOX', 'OUTLINE', 'INNERGRID', 'LINEBELOW', 'LINEABOVE','LINEBEFORE', 'LINEAFTER' ]
assert kind in kind_list
thick = 1
if node.get('thickness'):
thick = float(node.get('thickness'))
styles.append((kind, start, stop, thick, color.get(node.get('colorName'))))
return platypus.tables.TableStyle(styles)
def para_style_get(self, node):
style = False
sname = node.get('style')
if sname:
if sname in self.styles_obj:
style = self.styles_obj[sname]
else:
_logger.debug('Warning: style not found, %s - setting default!', node.get('style'))
if not style:
style = self.default_style['Normal']
para_update = self._para_style_update(node)
if para_update:
# update style only is necessary
style = copy.deepcopy(style)
style.__dict__.update(para_update)
return style
class _rml_doc(object):
def __init__(self, node, localcontext=None, images=None, path='.', title=None):
if images is None:
images = {}
if localcontext is None:
localcontext = {}
self.localcontext = localcontext
self.etree = node
self.filename = self.etree.get('filename')
self.images = images
self.path = path
self.title = title
def docinit(self, els):
from reportlab.lib.fonts import addMapping
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
for node in els:
for font in node.findall('registerFont'):
name = font.get('fontName').encode('ascii')
fname = font.get('fontFile').encode('ascii')
if name not in pdfmetrics._fonts:
pdfmetrics.registerFont(TTFont(name, fname))
#by default, we map the fontName to each style (bold, italic, bold and italic), so that
#if there isn't any font defined for one of these style (via a font family), the system
#will fallback on the normal font.
addMapping(name, 0, 0, name) #normal
addMapping(name, 0, 1, name) #italic
addMapping(name, 1, 0, name) #bold
addMapping(name, 1, 1, name) #italic and bold
#if registerFontFamily is defined, we register the mapping of the fontName to use for each style.
for font_family in node.findall('registerFontFamily'):
family_name = font_family.get('normal').encode('ascii')
if font_family.get('italic'):
addMapping(family_name, 0, 1, font_family.get('italic').encode('ascii'))
if font_family.get('bold'):
addMapping(family_name, 1, 0, font_family.get('bold').encode('ascii'))
if font_family.get('boldItalic'):
addMapping(family_name, 1, 1, font_family.get('boldItalic').encode('ascii'))
def setTTFontMapping(self,face, fontname, filename, mode='all'):
from reportlab.lib.fonts import addMapping
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
if mode:
mode = mode.lower()
if fontname not in pdfmetrics._fonts:
pdfmetrics.registerFont(TTFont(fontname, filename))
if mode == 'all':
addMapping(face, 0, 0, fontname) #normal
addMapping(face, 0, 1, fontname) #italic
addMapping(face, 1, 0, fontname) #bold
addMapping(face, 1, 1, fontname) #italic and bold
elif mode in ['italic', 'oblique']:
addMapping(face, 0, 1, fontname) #italic
elif mode == 'bold':
addMapping(face, 1, 0, fontname) #bold
elif mode in ('bolditalic', 'bold italic','boldoblique', 'bold oblique'):
addMapping(face, 1, 1, fontname) #italic and bold
else:
addMapping(face, 0, 0, fontname) #normal
def _textual_image(self, node):
rc = ''
for n in node:
rc +=( etree.tostring(n) or '') + n.tail
return base64.decodestring(node.tostring())
def _images(self, el):
result = {}
for node in el.findall('.//image'):
rc =( node.text or '')
result[node.get('name')] = base64.decodestring(rc)
return result
def render(self, out):
el = self.etree.findall('.//docinit')
if el:
self.docinit(el)
el = self.etree.findall('.//stylesheet')
self.styles = _rml_styles(el,self.localcontext)
el = self.etree.findall('.//images')
if el:
self.images.update( self._images(el[0]) )
el = self.etree.findall('.//template')
if len(el):
pt_obj = _rml_template(self.localcontext, out, el[0], self, images=self.images, path=self.path, title=self.title)
el = utils._child_get(self.etree, self, 'story')
pt_obj.render(el)
else:
self.canvas = canvas.Canvas(out)
pd = self.etree.find('pageDrawing')[0]
pd_obj = _rml_canvas(self.canvas, self.localcontext, None, self, self.images, path=self.path, title=self.title)
pd_obj.render(pd)
self.canvas.showPage()
self.canvas.save()
class _rml_canvas(object):
def __init__(self, canvas, localcontext, doc_tmpl=None, doc=None, images=None, path='.', title=None):
if images is None:
images = {}
self.localcontext = localcontext
self.canvas = canvas
self.styles = doc.styles
self.doc_tmpl = doc_tmpl
self.doc = doc
self.images = images
self.path = path
self.title = title
if self.title:
self.canvas.setTitle(self.title)
def _textual(self, node, x=0, y=0):
text = node.text and node.text.encode('utf-8') or ''
rc = utils._process_text(self, text)
for n in node:
if n.tag == 'seq':
from reportlab.lib.sequencer import getSequencer
seq = getSequencer()
rc += str(seq.next(n.get('id')))
if n.tag == 'pageCount':
if x or y:
self.canvas.translate(x,y)
self.canvas.doForm('pageCount%s' % (self.canvas._storyCount,))
if x or y:
self.canvas.translate(-x,-y)
if n.tag == 'pageNumber':
rc += str(self.canvas.getPageNumber())
rc += utils._process_text(self, n.tail)
return rc.replace('\n','')
def _drawString(self, node):
v = utils.attr_get(node, ['x','y'])
text=self._textual(node, **v)
text = utils.xml2str(text)
try:
self.canvas.drawString(text=text, **v)
except TypeError:
_logger.error("Bad RML: <drawString> tag requires attributes 'x' and 'y'!")
raise
def _drawCenteredString(self, node):
v = utils.attr_get(node, ['x','y'])
text=self._textual(node, **v)
text = utils.xml2str(text)
self.canvas.drawCentredString(text=text, **v)
def _drawRightString(self, node):
v = utils.attr_get(node, ['x','y'])
text=self._textual(node, **v)
text = utils.xml2str(text)
self.canvas.drawRightString(text=text, **v)
def _rect(self, node):
if node.get('round'):
self.canvas.roundRect(radius=utils.unit_get(node.get('round')), **utils.attr_get(node, ['x','y','width','height'], {'fill':'bool','stroke':'bool'}))
else:
self.canvas.rect(**utils.attr_get(node, ['x','y','width','height'], {'fill':'bool','stroke':'bool'}))
def _ellipse(self, node):
x1 = utils.unit_get(node.get('x'))
x2 = utils.unit_get(node.get('width'))
y1 = utils.unit_get(node.get('y'))
y2 = utils.unit_get(node.get('height'))
self.canvas.ellipse(x1,y1,x2,y2, **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'}))
def _curves(self, node):
line_str = node.text.split()
lines = []
while len(line_str)>7:
self.canvas.bezier(*[utils.unit_get(l) for l in line_str[0:8]])
line_str = line_str[8:]
def _lines(self, node):
line_str = node.text.split()
lines = []
while len(line_str)>3:
lines.append([utils.unit_get(l) for l in line_str[0:4]])
line_str = line_str[4:]
self.canvas.lines(lines)
def _grid(self, node):
xlist = [utils.unit_get(s) for s in node.get('xs').split(',')]
ylist = [utils.unit_get(s) for s in node.get('ys').split(',')]
self.canvas.grid(xlist, ylist)
def _translate(self, node):
dx = utils.unit_get(node.get('dx')) or 0
dy = utils.unit_get(node.get('dy')) or 0
self.canvas.translate(dx,dy)
def _circle(self, node):
self.canvas.circle(x_cen=utils.unit_get(node.get('x')), y_cen=utils.unit_get(node.get('y')), r=utils.unit_get(node.get('radius')), **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'}))
def _place(self, node):
flows = _rml_flowable(self.doc, self.localcontext, images=self.images, path=self.path, title=self.title, canvas=self.canvas).render(node)
infos = utils.attr_get(node, ['x','y','width','height'])
infos['y']+=infos['height']
for flow in flows:
w,h = flow.wrap(infos['width'], infos['height'])
if w<=infos['width'] and h<=infos['height']:
infos['y']-=h
flow.drawOn(self.canvas,infos['x'],infos['y'])
infos['height']-=h
else:
raise ValueError("Not enough space")
def _line_mode(self, node):
ljoin = {'round':1, 'mitered':0, 'bevelled':2}
lcap = {'default':0, 'round':1, 'square':2}
if node.get('width'):
self.canvas.setLineWidth(utils.unit_get(node.get('width')))
if node.get('join'):
self.canvas.setLineJoin(ljoin[node.get('join')])
if node.get('cap'):
self.canvas.setLineCap(lcap[node.get('cap')])
if node.get('miterLimit'):
self.canvas.setDash(utils.unit_get(node.get('miterLimit')))
if node.get('dash'):
dashes = node.get('dash').split(',')
for x in range(len(dashes)):
dashes[x]=utils.unit_get(dashes[x])
self.canvas.setDash(node.get('dash').split(','))
def _image(self, node):
import urllib
import urlparse
from reportlab.lib.utils import ImageReader
nfile = node.get('file')
if not nfile:
if node.get('name'):
image_data = self.images[node.get('name')]
_logger.debug("Image %s used", node.get('name'))
s = StringIO(image_data)
else:
newtext = node.text
if self.localcontext:
res = utils._regex.findall(newtext)
for key in res:
newtext = eval(key, {}, self.localcontext) or ''
image_data = None
if newtext:
image_data = base64.decodestring(newtext)
if image_data:
s = StringIO(image_data)
else:
_logger.debug("No image data!")
return False
else:
if nfile in self.images:
s = StringIO(self.images[nfile])
else:
try:
up = urlparse.urlparse(str(nfile))
except ValueError:
up = False
if up and up.scheme:
# RFC: do we really want to open external URLs?
# Are we safe from cross-site scripting or attacks?
_logger.debug("Retrieve image from %s", nfile)
u = urllib.urlopen(str(nfile))
s = StringIO(u.read())
else:
_logger.debug("Open image file %s ", nfile)
s = _open_image(nfile, path=self.path)
try:
img = ImageReader(s)
(sx,sy) = img.getSize()
_logger.debug("Image is %dx%d", sx, sy)
args = { 'x': 0.0, 'y': 0.0, 'mask': 'auto'}
for tag in ('width','height','x','y'):
if node.get(tag):
args[tag] = utils.unit_get(node.get(tag))
if ('width' in args) and (not 'height' in args):
args['height'] = sy * args['width'] / sx
elif ('height' in args) and (not 'width' in args):
args['width'] = sx * args['height'] / sy
elif ('width' in args) and ('height' in args):
if (float(args['width'])/args['height'])>(float(sx)>sy):
args['width'] = sx * args['height'] / sy
else:
args['height'] = sy * args['width'] / sx
self.canvas.drawImage(img, **args)
finally:
s.close()
# self.canvas._doc.SaveToFile(self.canvas._filename, self.canvas)
def _path(self, node):
self.path = self.canvas.beginPath()
self.path.moveTo(**utils.attr_get(node, ['x','y']))
for n in utils._child_get(node, self):
if not n.text :
if n.tag=='moveto':
vals = utils.text_get(n).split()
self.path.moveTo(utils.unit_get(vals[0]), utils.unit_get(vals[1]))
elif n.tag=='curvesto':
vals = utils.text_get(n).split()
while len(vals)>5:
pos=[]
while len(pos)<6:
pos.append(utils.unit_get(vals.pop(0)))
self.path.curveTo(*pos)
elif n.text:
data = n.text.split() # Not sure if I must merge all TEXT_NODE ?
while len(data)>1:
x = utils.unit_get(data.pop(0))
y = utils.unit_get(data.pop(0))
self.path.lineTo(x,y)
if (not node.get('close')) or utils.bool_get(node.get('close')):
self.path.close()
self.canvas.drawPath(self.path, **utils.attr_get(node, [], {'fill':'bool','stroke':'bool'}))
def setFont(self, node):
fontname = select_fontname(node.get('name'), self.canvas._fontname)
return self.canvas.setFont(fontname, utils.unit_get(node.get('size')))
def render(self, node):
tags = {
'drawCentredString': self._drawCenteredString,
'drawRightString': self._drawRightString,
'drawString': self._drawString,
'rect': self._rect,
'ellipse': self._ellipse,
'lines': self._lines,
'grid': self._grid,
'curves': self._curves,
'fill': lambda node: self.canvas.setFillColor(color.get(node.get('color'))),
'stroke': lambda node: self.canvas.setStrokeColor(color.get(node.get('color'))),
'setFont': self.setFont ,
'place': self._place,
'circle': self._circle,
'lineMode': self._line_mode,
'path': self._path,
'rotate': lambda node: self.canvas.rotate(float(node.get('degrees'))),
'translate': self._translate,
'image': self._image
}
for n in utils._child_get(node, self):
if n.tag in tags:
tags[n.tag](n)
class _rml_draw(object):
def __init__(self, localcontext, node, styles, images=None, path='.', title=None):
if images is None:
images = {}
self.localcontext = localcontext
self.node = node
self.styles = styles
self.canvas = None
self.images = images
self.path = path
self.canvas_title = title
def render(self, canvas, doc):
canvas.saveState()
cnv = _rml_canvas(canvas, self.localcontext, doc, self.styles, images=self.images, path=self.path, title=self.canvas_title)
cnv.render(self.node)
canvas.restoreState()
class _rml_Illustration(platypus.flowables.Flowable):
def __init__(self, node, localcontext, styles, self2):
self.localcontext = (localcontext or {}).copy()
self.node = node
self.styles = styles
self.width = utils.unit_get(node.get('width'))
self.height = utils.unit_get(node.get('height'))
self.self2 = self2
def wrap(self, *args):
return self.width, self.height
def draw(self):
drw = _rml_draw(self.localcontext ,self.node,self.styles, images=self.self2.images, path=self.self2.path, title=self.self2.title)
drw.render(self.canv, None)
# Workaround for issue #15: https://bitbucket.org/rptlab/reportlab/issue/15/infinite-pages-produced-when-splitting
original_pto_split = platypus.flowables.PTOContainer.split
def split(self, availWidth, availHeight):
res = original_pto_split(self, availWidth, availHeight)
if len(res) > 2 and len(self._content) > 0:
header = self._content[0]._ptoinfo.header
trailer = self._content[0]._ptoinfo.trailer
if isinstance(res[-2], platypus.flowables.UseUpSpace) and len(header + trailer) == len(res[:-2]):
return []
return res
platypus.flowables.PTOContainer.split = split
class _rml_flowable(object):
def __init__(self, doc, localcontext, images=None, path='.', title=None, canvas=None):
if images is None:
images = {}
self.localcontext = localcontext
self.doc = doc
self.styles = doc.styles
self.images = images
self.path = path
self.title = title
self.canvas = canvas
def _textual(self, node):
rc1 = utils._process_text(self, node.text or '')
for n in utils._child_get(node,self):
txt_n = copy.deepcopy(n)
for key in txt_n.attrib.keys():
if key in ('rml_except', 'rml_loop', 'rml_tag'):
del txt_n.attrib[key]
if not n.tag == 'bullet':
if n.tag == 'pageNumber':
txt_n.text = self.canvas and str(self.canvas.getPageNumber()) or ''
else:
txt_n.text = utils.xml2str(self._textual(n))
txt_n.tail = n.tail and utils.xml2str(utils._process_text(self, n.tail.replace('\n',''))) or ''
rc1 += etree.tostring(txt_n)
return rc1
def _table(self, node):
children = utils._child_get(node,self,'tr')
if not children:
return None
length = 0
colwidths = None
rowheights = None
data = []
styles = []
posy = 0
for tr in children:
paraStyle = None
if tr.get('style'):
st = copy.deepcopy(self.styles.table_styles[tr.get('style')])
for si in range(len(st._cmds)):
s = list(st._cmds[si])
s[1] = (s[1][0],posy)
s[2] = (s[2][0],posy)
st._cmds[si] = tuple(s)
styles.append(st)
if tr.get('paraStyle'):
paraStyle = self.styles.styles[tr.get('paraStyle')]
data2 = []
posx = 0
for td in utils._child_get(tr, self,'td'):
if td.get('style'):
st = copy.deepcopy(self.styles.table_styles[td.get('style')])
for s in st._cmds:
s[1][1] = posy
s[2][1] = posy
s[1][0] = posx
s[2][0] = posx
styles.append(st)
if td.get('paraStyle'):
# TODO: merge styles
paraStyle = self.styles.styles[td.get('paraStyle')]
posx += 1
flow = []
for n in utils._child_get(td, self):
if n.tag == etree.Comment:
n.text = ''
continue
fl = self._flowable(n, extra_style=paraStyle)
if isinstance(fl,list):
flow += fl
else:
flow.append( fl )
if not len(flow):
flow = self._textual(td)
data2.append( flow )
if len(data2)>length:
length=len(data2)
for ab in data:
while len(ab)<length:
ab.append('')
while len(data2)<length:
data2.append('')
data.append( data2 )
posy += 1
if node.get('colWidths'):
assert length == len(node.get('colWidths').split(','))
colwidths = [utils.unit_get(f.strip()) for f in node.get('colWidths').split(',')]
if node.get('rowHeights'):
rowheights = [utils.unit_get(f.strip()) for f in node.get('rowHeights').split(',')]
if len(rowheights) == 1:
rowheights = rowheights[0]
table = platypus.LongTable(data = data, colWidths=colwidths, rowHeights=rowheights, **(utils.attr_get(node, ['splitByRow'] ,{'repeatRows':'int','repeatCols':'int'})))
if node.get('style'):
table.setStyle(self.styles.table_styles[node.get('style')])
for s in styles:
table.setStyle(s)
return table
def _illustration(self, node):
return _rml_Illustration(node, self.localcontext, self.styles, self)
def _textual_image(self, node):
return base64.decodestring(node.text)
def _pto(self, node):
sub_story = []
pto_header = None
pto_trailer = None
for node in utils._child_get(node, self):
if node.tag == etree.Comment:
node.text = ''
continue
elif node.tag=='pto_header':
pto_header = self.render(node)
elif node.tag=='pto_trailer':
pto_trailer = self.render(node)
else:
flow = self._flowable(node)
if flow:
if isinstance(flow,list):
sub_story = sub_story + flow
else:
sub_story.append(flow)
return platypus.flowables.PTOContainer(sub_story, trailer=pto_trailer, header=pto_header)
def _flowable(self, node, extra_style=None):
if node.tag=='pto':
return self._pto(node)
if node.tag=='para':
style = self.styles.para_style_get(node)
if extra_style:
style.__dict__.update(extra_style)
text_node = self._textual(node).strip().replace('\n\n', '\n').replace('\n', '<br/>')
instance = platypus.Paragraph(text_node, style, **(utils.attr_get(node, [], {'bulletText':'str'})))
result = [instance]
if LooseVersion(reportlab.Version) > LooseVersion('3.0') and not instance.getPlainText().strip() and instance.text.strip():
result.append(platypus.Paragraph(' <br/>', style, **(utils.attr_get(node, [], {'bulletText': 'str'}))))
return result
elif node.tag=='barCode':
try:
from reportlab.graphics.barcode import code128
from reportlab.graphics.barcode import code39
from reportlab.graphics.barcode import code93
from reportlab.graphics.barcode import common
from reportlab.graphics.barcode import fourstate
from reportlab.graphics.barcode import usps
from reportlab.graphics.barcode import createBarcodeDrawing
except ImportError:
_logger.warning("Cannot use barcode renderers:", exc_info=True)
return None
args = utils.attr_get(node, [], {'ratio':'float','xdim':'unit','height':'unit','checksum':'int','quiet':'int','width':'unit','stop':'bool','bearers':'int','barWidth':'float','barHeight':'float'})
codes = {
'codabar': lambda x: common.Codabar(x, **args),
'code11': lambda x: common.Code11(x, **args),
'code128': lambda x: code128.Code128(str(x), **args),
'standard39': lambda x: code39.Standard39(str(x), **args),
'standard93': lambda x: code93.Standard93(str(x), **args),
'i2of5': lambda x: common.I2of5(x, **args),
'extended39': lambda x: code39.Extended39(str(x), **args),
'extended93': lambda x: code93.Extended93(str(x), **args),
'msi': lambda x: common.MSI(x, **args),
'fim': lambda x: usps.FIM(x, **args),
'postnet': lambda x: usps.POSTNET(x, **args),
'ean13': lambda x: createBarcodeDrawing('EAN13', value=str(x), **args),
'qrcode': lambda x: createBarcodeDrawing('QR', value=x, **args),
}
code = 'code128'
if node.get('code'):
code = node.get('code').lower()
return codes[code](self._textual(node))
elif node.tag=='name':
self.styles.names[ node.get('id')] = node.get('value')
return None
elif node.tag=='xpre':
style = self.styles.para_style_get(node)
return platypus.XPreformatted(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str','dedent':'int','frags':'int'})))
elif node.tag=='pre':
style = self.styles.para_style_get(node)
return platypus.Preformatted(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str','dedent':'int'})))
elif node.tag=='illustration':
return self._illustration(node)
elif node.tag=='blockTable':
return self._table(node)
elif node.tag=='title':
styles = reportlab.lib.styles.getSampleStyleSheet()
style = styles['Title']
return platypus.Paragraph(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str'})))
elif re.match('^h([1-9]+[0-9]*)$', (node.tag or '')):
styles = reportlab.lib.styles.getSampleStyleSheet()
style = styles['Heading'+str(node.tag[1:])]
return platypus.Paragraph(self._textual(node), style, **(utils.attr_get(node, [], {'bulletText':'str'})))
elif node.tag=='image':
image_data = False
if not node.get('file'):
if node.get('name'):
if node.get('name') in self.doc.images:
_logger.debug("Image %s read ", node.get('name'))
image_data = self.doc.images[node.get('name')].read()
else:
_logger.warning("Image %s not defined", node.get('name'))
return False
else:
import base64
newtext = node.text
if self.localcontext:
newtext = utils._process_text(self, node.text or '')
image_data = base64.decodestring(newtext)
if not image_data:
_logger.debug("No inline image data")
return False
image = StringIO(image_data)
else:
_logger.debug("Image get from file %s", node.get('file'))
image = _open_image(node.get('file'), path=self.doc.path)
return platypus.Image(image, mask=(250,255,250,255,250,255), **(utils.attr_get(node, ['width','height'])))
elif node.tag=='spacer':
if node.get('width'):
width = utils.unit_get(node.get('width'))
else:
width = utils.unit_get('1cm')
length = utils.unit_get(node.get('length'))
return platypus.Spacer(width=width, height=length)
elif node.tag=='section':
return self.render(node)
elif node.tag == 'pageNumberReset':
return PageReset()
elif node.tag in ('pageBreak', 'nextPage'):
return platypus.PageBreak()
elif node.tag=='condPageBreak':
return platypus.CondPageBreak(**(utils.attr_get(node, ['height'])))
elif node.tag=='setNextTemplate':
return platypus.NextPageTemplate(str(node.get('name')))
elif node.tag=='nextFrame':
return platypus.CondPageBreak(1000) # TODO: change the 1000 !
elif node.tag == 'setNextFrame':
from reportlab.platypus.doctemplate import NextFrameFlowable
return NextFrameFlowable(str(node.get('name')))
elif node.tag == 'currentFrame':
from reportlab.platypus.doctemplate import CurrentFrameFlowable
return CurrentFrameFlowable(str(node.get('name')))
elif node.tag == 'frameEnd':
return EndFrameFlowable()
elif node.tag == 'hr':
width_hr=node.get('width') or '100%'
color_hr=node.get('color') or 'black'
thickness_hr=node.get('thickness') or 1
lineCap_hr=node.get('lineCap') or 'round'
return platypus.flowables.HRFlowable(width=width_hr,color=color.get(color_hr),thickness=float(thickness_hr),lineCap=str(lineCap_hr))
else:
sys.stderr.write('Warning: flowable not yet implemented: %s !\n' % (node.tag,))
return None
def render(self, node_story):
def process_story(node_story):
sub_story = []
for node in utils._child_get(node_story, self):
if node.tag == etree.Comment:
node.text = ''
continue
flow = self._flowable(node)
if flow:
if isinstance(flow,list):
sub_story = sub_story + flow
else:
sub_story.append(flow)
return sub_story
return process_story(node_story)
class EndFrameFlowable(ActionFlowable):
def __init__(self,resume=0):
ActionFlowable.__init__(self,('frameEnd',resume))
class TinyDocTemplate(platypus.BaseDocTemplate):
def beforeDocument(self):
# Store some useful value directly inside canvas, so it's available
# on flowable drawing (needed for proper PageCount handling)
self.canv._doPageReset = False
self.canv._storyCount = 0
def ___handle_pageBegin(self):
self.page += 1
self.pageTemplate.beforeDrawPage(self.canv,self)
self.pageTemplate.checkPageSize(self.canv,self)
self.pageTemplate.onPage(self.canv,self)
for f in self.pageTemplate.frames: f._reset()
self.beforePage()
self._curPageFlowableCount = 0
if hasattr(self,'_nextFrameIndex'):
del self._nextFrameIndex
for f in self.pageTemplate.frames:
if f.id == 'first':
self.frame = f
break
self.handle_frameBegin()
def afterPage(self):
if isinstance(self.canv, NumberedCanvas):
# save current page states before eventual reset
self.canv._saved_page_states.append(dict(self.canv.__dict__))
if self.canv._doPageReset:
# Following a <pageReset/> tag:
# - we reset page number to 0
# - we add an new PageCount flowable (relative to the current
# story number), but not for NumeredCanvas at is handle page
# count itself)
# NOTE: _rml_template render() method add a PageReset flowable at end
# of each story, so we're sure to pass here at least once per story.
if not isinstance(self.canv, NumberedCanvas):
self.handle_flowable([ PageCount(story_count=self.canv._storyCount) ])
self.canv._pageCount = self.page
self.page = 0
self.canv._flag = True
self.canv._pageNumber = 0
self.canv._doPageReset = False
self.canv._storyCount += 1
class _rml_template(object):
def __init__(self, localcontext, out, node, doc, images=None, path='.', title=None):
if images is None:
images = {}
if not localcontext:
localcontext={'internal_header':True}
self.localcontext = localcontext
self.images= images
self.path = path
self.title = title
pagesize_map = {'a4': A4,
'us_letter': letter
}
pageSize = A4
if self.localcontext.get('company'):
pageSize = pagesize_map.get(self.localcontext.get('company').rml_paper_format, A4)
if node.get('pageSize'):
ps = map(lambda x:x.strip(), node.get('pageSize').replace(')', '').replace('(', '').split(','))
pageSize = ( utils.unit_get(ps[0]),utils.unit_get(ps[1]) )
self.doc_tmpl = TinyDocTemplate(out, pagesize=pageSize, **utils.attr_get(node, ['leftMargin','rightMargin','topMargin','bottomMargin'], {'allowSplitting':'int','showBoundary':'bool','rotation':'int','title':'str','author':'str'}))
self.page_templates = []
self.styles = doc.styles
self.doc = doc
self.image=[]
pts = node.findall('pageTemplate')
for pt in pts:
frames = []
for frame_el in pt.findall('frame'):
frame = platypus.Frame( **(utils.attr_get(frame_el, ['x1','y1', 'width','height', 'leftPadding', 'rightPadding', 'bottomPadding', 'topPadding'], {'id':'str', 'showBoundary':'bool'})) )
if utils.attr_get(frame_el, ['last']):
frame.lastFrame = True
frames.append( frame )
try :
gr = pt.findall('pageGraphics')\
or pt[1].findall('pageGraphics')
except Exception: # FIXME: be even more specific, perhaps?
gr=''
if len(gr):
# self.image=[ n for n in utils._child_get(gr[0], self) if n.tag=='image' or not self.localcontext]
drw = _rml_draw(self.localcontext,gr[0], self.doc, images=images, path=self.path, title=self.title)
self.page_templates.append( platypus.PageTemplate(frames=frames, onPage=drw.render, **utils.attr_get(pt, [], {'id':'str'}) ))
else:
drw = _rml_draw(self.localcontext,node,self.doc,title=self.title)
self.page_templates.append( platypus.PageTemplate(frames=frames,onPage=drw.render, **utils.attr_get(pt, [], {'id':'str'}) ))
self.doc_tmpl.addPageTemplates(self.page_templates)
def render(self, node_stories):
if self.localcontext and not self.localcontext.get('internal_header',False):
del self.localcontext['internal_header']
fis = []
r = _rml_flowable(self.doc,self.localcontext, images=self.images, path=self.path, title=self.title, canvas=None)
story_cnt = 0
for node_story in node_stories:
if story_cnt > 0:
fis.append(platypus.PageBreak())
fis += r.render(node_story)
# end of story numbering computation
fis.append(PageReset())
story_cnt += 1
try:
if self.localcontext and self.localcontext.get('internal_header',False):
self.doc_tmpl.afterFlowable(fis)
self.doc_tmpl.build(fis,canvasmaker=NumberedCanvas)
else:
self.doc_tmpl.build(fis)
except platypus.doctemplate.LayoutError, e:
e.name = 'Print Error'
e.value = 'The document you are trying to print contains a table row that does not fit on one page. Please try to split it in smaller rows or contact your administrator.'
raise
def parseNode(rml, localcontext=None, fout=None, images=None, path='.', title=None):
node = etree.XML(rml)
r = _rml_doc(node, localcontext, images, path, title=title)
#try to override some font mappings
try:
from customfonts import SetCustomFonts
SetCustomFonts(r)
except ImportError:
# means there is no custom fonts mapping in this system.
pass
except Exception:
_logger.warning('Cannot set font mapping', exc_info=True)
pass
fp = StringIO()
r.render(fp)
return fp.getvalue()
def parseString(rml, localcontext=None, fout=None, images=None, path='.', title=None):
node = etree.XML(rml)
r = _rml_doc(node, localcontext, images, path, title=title)
#try to override some font mappings
try:
from customfonts import SetCustomFonts
SetCustomFonts(r)
except Exception:
pass
if fout:
fp = file(fout,'wb')
r.render(fp)
fp.close()
return fout
else:
fp = StringIO()
r.render(fp)
return fp.getvalue()
def trml2pdf_help():
print 'Usage: trml2pdf input.rml >output.pdf'
print 'Render the standard input (RML) and output a PDF file'
sys.exit(0)
if __name__=="__main__":
if len(sys.argv)>1:
if sys.argv[1]=='--help':
trml2pdf_help()
print parseString(file(sys.argv[1], 'r').read()),
else:
print 'Usage: trml2pdf input.rml >output.pdf'
print 'Try \'trml2pdf --help\' for more information.'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
wangyum/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/gamma_test.py | 1 | 14770 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import special
from scipy import stats
from tensorflow.contrib.distributions.python.ops import gamma as gamma_lib
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops.distributions import kullback_leibler
from tensorflow.python.platform import test
class GammaTest(test.TestCase):
def testGammaShape(self):
with self.test_session():
alpha = constant_op.constant([3.0] * 5)
beta = constant_op.constant(11.0)
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
self.assertEqual(gamma.batch_shape_tensor().eval(), (5,))
self.assertEqual(gamma.batch_shape, tensor_shape.TensorShape([5]))
self.assertAllEqual(gamma.event_shape_tensor().eval(), [])
self.assertEqual(gamma.event_shape, tensor_shape.TensorShape([]))
def testGammaLogPDF(self):
with self.test_session():
batch_size = 6
alpha = constant_op.constant([2.0] * batch_size)
beta = constant_op.constant([3.0] * batch_size)
alpha_v = 2.0
beta_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
log_pdf = gamma.log_prob(x)
self.assertEqual(log_pdf.get_shape(), (6,))
self.assertAllClose(log_pdf.eval(), expected_log_pdf)
pdf = gamma.prob(x)
self.assertEqual(pdf.get_shape(), (6,))
self.assertAllClose(pdf.eval(), np.exp(expected_log_pdf))
def testGammaLogPDFMultidimensional(self):
with self.test_session():
batch_size = 6
alpha = constant_op.constant([[2.0, 4.0]] * batch_size)
beta = constant_op.constant([[3.0, 4.0]] * batch_size)
alpha_v = np.array([2.0, 4.0])
beta_v = np.array([3.0, 4.0])
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
log_pdf = gamma.log_prob(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.get_shape(), (6, 2))
self.assertAllClose(log_pdf_values, expected_log_pdf)
pdf = gamma.prob(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.get_shape(), (6, 2))
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testGammaLogPDFMultidimensionalBroadcasting(self):
with self.test_session():
batch_size = 6
alpha = constant_op.constant([[2.0, 4.0]] * batch_size)
beta = constant_op.constant(3.0)
alpha_v = np.array([2.0, 4.0])
beta_v = 3.0
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
log_pdf = gamma.log_prob(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.get_shape(), (6, 2))
self.assertAllClose(log_pdf_values, expected_log_pdf)
pdf = gamma.prob(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.get_shape(), (6, 2))
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testGammaCDF(self):
with self.test_session():
batch_size = 6
alpha = constant_op.constant([2.0] * batch_size)
beta = constant_op.constant([3.0] * batch_size)
alpha_v = 2.0
beta_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
expected_cdf = stats.gamma.cdf(x, alpha_v, scale=1 / beta_v)
cdf = gamma.cdf(x)
self.assertEqual(cdf.get_shape(), (6,))
self.assertAllClose(cdf.eval(), expected_cdf)
def testGammaMean(self):
with self.test_session():
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
expected_means = stats.gamma.mean(alpha_v, scale=1 / beta_v)
self.assertEqual(gamma.mean().get_shape(), (3,))
self.assertAllClose(gamma.mean().eval(), expected_means)
def testGammaModeAllowNanStatsIsFalseWorksWhenAllBatchMembersAreDefined(self):
with self.test_session():
alpha_v = np.array([5.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
expected_modes = (alpha_v - 1) / beta_v
self.assertEqual(gamma.mode().get_shape(), (3,))
self.assertAllClose(gamma.mode().eval(), expected_modes)
def testGammaModeAllowNanStatsFalseRaisesForUndefinedBatchMembers(self):
with self.test_session():
# Mode will not be defined for the first entry.
alpha_v = np.array([0.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v,
rate=beta_v,
allow_nan_stats=False)
with self.assertRaisesOpError("x < y"):
gamma.mode().eval()
def testGammaModeAllowNanStatsIsTrueReturnsNaNforUndefinedBatchMembers(self):
with self.test_session():
# Mode will not be defined for the first entry.
alpha_v = np.array([0.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v,
rate=beta_v,
allow_nan_stats=True)
expected_modes = (alpha_v - 1) / beta_v
expected_modes[0] = np.nan
self.assertEqual(gamma.mode().get_shape(), (3,))
self.assertAllClose(gamma.mode().eval(), expected_modes)
def testGammaVariance(self):
with self.test_session():
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
expected_variances = stats.gamma.var(alpha_v, scale=1 / beta_v)
self.assertEqual(gamma.variance().get_shape(), (3,))
self.assertAllClose(gamma.variance().eval(), expected_variances)
def testGammaStd(self):
with self.test_session():
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
expected_stddev = stats.gamma.std(alpha_v, scale=1. / beta_v)
self.assertEqual(gamma.stddev().get_shape(), (3,))
self.assertAllClose(gamma.stddev().eval(), expected_stddev)
def testGammaEntropy(self):
with self.test_session():
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
expected_entropy = stats.gamma.entropy(alpha_v, scale=1 / beta_v)
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
self.assertEqual(gamma.entropy().get_shape(), (3,))
self.assertAllClose(gamma.entropy().eval(), expected_entropy)
def testGammaSampleSmallAlpha(self):
with session.Session():
alpha_v = 0.05
beta_v = 1.0
alpha = constant_op.constant(alpha_v)
beta = constant_op.constant(beta_v)
n = 100000
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
samples = gamma.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertAllClose(
sample_values.mean(),
stats.gamma.mean(
alpha_v, scale=1 / beta_v),
atol=.01)
self.assertAllClose(
sample_values.var(),
stats.gamma.var(alpha_v, scale=1 / beta_v),
atol=.15)
self.assertTrue(self._kstest(alpha_v, beta_v, sample_values))
def testGammaSample(self):
with session.Session():
alpha_v = 4.0
beta_v = 3.0
alpha = constant_op.constant(alpha_v)
beta = constant_op.constant(beta_v)
n = 100000
gamma = gamma_lib.Gamma(concentration=alpha, rate=beta)
samples = gamma.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertAllClose(
sample_values.mean(),
stats.gamma.mean(
alpha_v, scale=1 / beta_v),
atol=.01)
self.assertAllClose(
sample_values.var(),
stats.gamma.var(alpha_v, scale=1 / beta_v),
atol=.15)
self.assertTrue(self._kstest(alpha_v, beta_v, sample_values))
def testGammaSampleMultiDimensional(self):
with session.Session():
alpha_v = np.array([np.arange(1, 101, dtype=np.float32)]) # 1 x 100
beta_v = np.array([np.arange(1, 11, dtype=np.float32)]).T # 10 x 1
gamma = gamma_lib.Gamma(concentration=alpha_v, rate=beta_v)
n = 10000
samples = gamma.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (n, 10, 100))
self.assertEqual(sample_values.shape, (n, 10, 100))
zeros = np.zeros_like(alpha_v + beta_v) # 10 x 100
alpha_bc = alpha_v + zeros
beta_bc = beta_v + zeros
self.assertAllClose(
sample_values.mean(axis=0),
stats.gamma.mean(
alpha_bc, scale=1 / beta_bc),
rtol=.035)
self.assertAllClose(
sample_values.var(axis=0),
stats.gamma.var(alpha_bc, scale=1 / beta_bc),
atol=4.5)
fails = 0
trials = 0
for ai, a in enumerate(np.reshape(alpha_v, [-1])):
for bi, b in enumerate(np.reshape(beta_v, [-1])):
s = sample_values[:, bi, ai]
trials += 1
fails += 0 if self._kstest(a, b, s) else 1
self.assertLess(fails, trials * 0.03)
def _kstest(self, alpha, beta, samples):
# Uses the Kolmogorov-Smirnov test for goodness of fit.
ks, _ = stats.kstest(samples, stats.gamma(alpha, scale=1 / beta).cdf)
# Return True when the test passes.
return ks < 0.02
def testGammaPdfOfSampleMultiDims(self):
with session.Session() as sess:
gamma = gamma_lib.Gamma(concentration=[7., 11.], rate=[[5.], [6.]])
num = 50000
samples = gamma.sample(num, seed=137)
pdfs = gamma.prob(samples)
sample_vals, pdf_vals = sess.run([samples, pdfs])
self.assertEqual(samples.get_shape(), (num, 2, 2))
self.assertEqual(pdfs.get_shape(), (num, 2, 2))
self.assertAllClose(
stats.gamma.mean(
[[7., 11.], [7., 11.]], scale=1 / np.array([[5., 5.], [6., 6.]])),
sample_vals.mean(axis=0),
atol=.1)
self.assertAllClose(
stats.gamma.var([[7., 11.], [7., 11.]],
scale=1 / np.array([[5., 5.], [6., 6.]])),
sample_vals.var(axis=0),
atol=.1)
self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)
self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)
self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)
self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)
def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3):
s_p = zip(sample_vals, pdf_vals)
prev = (0, 0)
total = 0
for k in sorted(s_p, key=lambda x: x[0]):
pair_pdf = (k[1] + prev[1]) / 2
total += (k[0] - prev[0]) * pair_pdf
prev = k
self.assertNear(1., total, err=err)
def testGammaNonPositiveInitializationParamsRaises(self):
with self.test_session():
alpha_v = constant_op.constant(0.0, name="alpha")
beta_v = constant_op.constant(1.0, name="beta")
gamma = gamma_lib.Gamma(concentration=alpha_v,
rate=beta_v,
validate_args=True)
with self.assertRaisesOpError("alpha"):
gamma.mean().eval()
alpha_v = constant_op.constant(1.0, name="alpha")
beta_v = constant_op.constant(0.0, name="beta")
gamma = gamma_lib.Gamma(concentration=alpha_v,
rate=beta_v,
validate_args=True)
with self.assertRaisesOpError("beta"):
gamma.mean().eval()
def testGammaWithSoftplusConcentrationRate(self):
with self.test_session():
alpha_v = constant_op.constant([0.0, -2.1], name="alpha")
beta_v = constant_op.constant([1.0, -3.6], name="beta")
gamma = gamma_lib.GammaWithSoftplusConcentrationRate(
concentration=alpha_v, rate=beta_v)
self.assertAllEqual(nn_ops.softplus(alpha_v).eval(),
gamma.concentration.eval())
self.assertAllEqual(nn_ops.softplus(beta_v).eval(),
gamma.rate.eval())
def testGammaGammaKL(self):
alpha0 = np.array([3.])
beta0 = np.array([1., 2., 3., 1.5, 2.5, 3.5])
alpha1 = np.array([0.4])
beta1 = np.array([0.5, 1., 1.5, 2., 2.5, 3.])
# Build graph.
with self.test_session() as sess:
g0 = gamma_lib.Gamma(concentration=alpha0, rate=beta0)
g1 = gamma_lib.Gamma(concentration=alpha1, rate=beta1)
x = g0.sample(int(1e4), seed=0)
kl_sample = math_ops.reduce_mean(g0.log_prob(x) - g1.log_prob(x), 0)
kl_actual = kullback_leibler.kl_divergence(g0, g1)
# Execute graph.
[kl_sample_, kl_actual_] = sess.run([kl_sample, kl_actual])
kl_expected = ((alpha0 - alpha1) * special.digamma(alpha0)
+ special.gammaln(alpha1)
- special.gammaln(alpha0)
+ alpha1 * np.log(beta0)
- alpha1 * np.log(beta1)
+ alpha0 * (beta1 / beta0 - 1.))
self.assertEqual(beta0.shape, kl_actual.get_shape())
self.assertAllClose(kl_expected, kl_actual_, atol=0., rtol=1e-6)
self.assertAllClose(kl_sample_, kl_actual_, atol=0., rtol=1e-2)
if __name__ == "__main__":
test.main()
| apache-2.0 |
rikirenz/inspire-next | tests/integration/test_detailed_records.py | 2 | 1956 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
from invenio_records.models import RecordMetadata
from inspirehep.modules.migrator.models import InspireProdRecords
def test_all_records_were_loaded(app):
records = [record.json for record in RecordMetadata.query.all()]
expected = 42
result = len(records)
assert expected == result
def test_all_records_are_valid(app):
invalid = InspireProdRecords.query.filter(InspireProdRecords.valid is False).values(InspireProdRecords.recid)
recids = [el[0] for el in invalid]
assert recids == []
def test_all_records_are_there(app_client):
failed = []
for record in [record.json for record in RecordMetadata.query.all()]:
try:
absolute_url = record['self']['$ref']
relative_url = absolute_url.partition('api')[2]
response = app_client.get(relative_url)
assert response.status_code == 200
except Exception:
failed.append(record['control_number'])
assert failed == []
| gpl-3.0 |
gurneyalex/OpenUpgrade | addons/base_import/tests/test_cases.py | 101 | 13059 | # -*- encoding: utf-8 -*-
import unittest2
from openerp.tests.common import TransactionCase
from .. import models
ID_FIELD = {'id': 'id', 'name': 'id', 'string': "External ID", 'required': False, 'fields': []}
def make_field(name='value', string='unknown', required=False, fields=[]):
return [
ID_FIELD,
{'id': name, 'name': name, 'string': string, 'required': required, 'fields': fields},
]
class test_basic_fields(TransactionCase):
def get_fields(self, field):
return self.registry('base_import.import')\
.get_fields(self.cr, self.uid, 'base_import.tests.models.' + field)
def test_base(self):
""" A basic field is not required """
self.assertEqual(self.get_fields('char'), make_field())
def test_required(self):
""" Required fields should be flagged (so they can be fill-required) """
self.assertEqual(self.get_fields('char.required'), make_field(required=True))
def test_readonly(self):
""" Readonly fields should be filtered out"""
self.assertEqual(self.get_fields('char.readonly'), [ID_FIELD])
def test_readonly_states(self):
""" Readonly fields with states should not be filtered out"""
self.assertEqual(self.get_fields('char.states'), make_field())
def test_readonly_states_noreadonly(self):
""" Readonly fields with states having nothing to do with
readonly should still be filtered out"""
self.assertEqual(self.get_fields('char.noreadonly'), [ID_FIELD])
def test_readonly_states_stillreadonly(self):
""" Readonly fields with readonly states leaving them readonly
always... filtered out"""
self.assertEqual(self.get_fields('char.stillreadonly'), [ID_FIELD])
def test_m2o(self):
""" M2O fields should allow import of themselves (name_get),
their id and their xid"""
self.assertEqual(self.get_fields('m2o'), make_field(fields=[
{'id': 'value', 'name': 'id', 'string': 'External ID', 'required': False, 'fields': []},
{'id': 'value', 'name': '.id', 'string': 'Database ID', 'required': False, 'fields': []},
]))
def test_m2o_required(self):
""" If an m2o field is required, its three sub-fields are
required as well (the client has to handle that: requiredness
is id-based)
"""
self.assertEqual(self.get_fields('m2o.required'), make_field(required=True, fields=[
{'id': 'value', 'name': 'id', 'string': 'External ID', 'required': True, 'fields': []},
{'id': 'value', 'name': '.id', 'string': 'Database ID', 'required': True, 'fields': []},
]))
class test_o2m(TransactionCase):
def get_fields(self, field):
return self.registry('base_import.import')\
.get_fields(self.cr, self.uid, 'base_import.tests.models.' + field)
def test_shallow(self):
self.assertEqual(self.get_fields('o2m'), make_field(fields=[
{'id': 'id', 'name': 'id', 'string': 'External ID', 'required': False, 'fields': []},
# FIXME: should reverse field be ignored?
{'id': 'parent_id', 'name': 'parent_id', 'string': 'unknown', 'required': False, 'fields': [
{'id': 'parent_id', 'name': 'id', 'string': 'External ID', 'required': False, 'fields': []},
{'id': 'parent_id', 'name': '.id', 'string': 'Database ID', 'required': False, 'fields': []},
]},
{'id': 'value', 'name': 'value', 'string': 'unknown', 'required': False, 'fields': []},
]))
class test_match_headers_single(TransactionCase):
def test_match_by_name(self):
match = self.registry('base_import.import')._match_header(
'f0', [{'name': 'f0'}], {})
self.assertEqual(match, [{'name': 'f0'}])
def test_match_by_string(self):
match = self.registry('base_import.import')._match_header(
'some field', [{'name': 'bob', 'string': "Some Field"}], {})
self.assertEqual(match, [{'name': 'bob', 'string': "Some Field"}])
def test_nomatch(self):
match = self.registry('base_import.import')._match_header(
'should not be', [{'name': 'bob', 'string': "wheee"}], {})
self.assertEqual(match, [])
def test_recursive_match(self):
f = {
'name': 'f0',
'string': "My Field",
'fields': [
{'name': 'f0', 'string': "Sub field 0", 'fields': []},
{'name': 'f1', 'string': "Sub field 2", 'fields': []},
]
}
match = self.registry('base_import.import')._match_header(
'f0/f1', [f], {})
self.assertEqual(match, [f, f['fields'][1]])
def test_recursive_nomatch(self):
""" Match first level, fail to match second level
"""
f = {
'name': 'f0',
'string': "My Field",
'fields': [
{'name': 'f0', 'string': "Sub field 0", 'fields': []},
{'name': 'f1', 'string': "Sub field 2", 'fields': []},
]
}
match = self.registry('base_import.import')._match_header(
'f0/f2', [f], {})
self.assertEqual(match, [])
class test_match_headers_multiple(TransactionCase):
def test_noheaders(self):
self.assertEqual(
self.registry('base_import.import')._match_headers(
[], [], {}),
(None, None)
)
def test_nomatch(self):
self.assertEqual(
self.registry('base_import.import')._match_headers(
iter([
['foo', 'bar', 'baz', 'qux'],
['v1', 'v2', 'v3', 'v4'],
]),
[],
{'headers': True}),
(
['foo', 'bar', 'baz', 'qux'],
dict.fromkeys(range(4))
)
)
def test_mixed(self):
self.assertEqual(
self.registry('base_import.import')._match_headers(
iter(['foo bar baz qux/corge'.split()]),
[
{'name': 'bar', 'string': 'Bar'},
{'name': 'bob', 'string': 'Baz'},
{'name': 'qux', 'string': 'Qux', 'fields': [
{'name': 'corge', 'fields': []},
]}
],
{'headers': True}),
(['foo', 'bar', 'baz', 'qux/corge'], {
0: None,
1: ['bar'],
2: ['bob'],
3: ['qux', 'corge'],
})
)
class test_preview(TransactionCase):
def make_import(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'res.users',
'file': u"로그인,언어\nbob,1\n".encode('euc_kr'),
})
return Import, id
def test_encoding(self):
Import, id = self.make_import()
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': '"',
'separator': ',',
})
self.assertTrue('error' in result)
def test_csv_errors(self):
Import, id = self.make_import()
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': 'foo',
'separator': ',',
'encoding': 'euc_kr',
})
self.assertTrue('error' in result)
def test_csv_errors(self):
Import, id = self.make_import()
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': '"',
'separator': 'bob',
'encoding': 'euc_kr',
})
self.assertTrue('error' in result)
def test_success(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
'bar,3,4\n'
'qux,5,6\n'
})
result = Import.parse_preview(self.cr, self.uid, id, {
'quoting': '"',
'separator': ',',
'headers': True,
})
self.assertEqual(result['matches'], {0: ['name'], 1: ['somevalue'], 2: None})
self.assertEqual(result['headers'], ['name', 'Some Value', 'Counter'])
# Order depends on iteration order of fields_get
self.assertItemsEqual(result['fields'], [
{'id': 'id', 'name': 'id', 'string': 'External ID', 'required':False, 'fields': []},
{'id': 'name', 'name': 'name', 'string': 'Name', 'required':False, 'fields': []},
{'id': 'somevalue', 'name': 'somevalue', 'string': 'Some Value', 'required':True, 'fields': []},
{'id': 'othervalue', 'name': 'othervalue', 'string': 'Other Variable', 'required':False, 'fields': []},
])
self.assertEqual(result['preview'], [
['foo', '1', '2'],
['bar', '3', '4'],
['qux', '5', '6'],
])
# Ensure we only have the response fields we expect
self.assertItemsEqual(result.keys(), ['matches', 'headers', 'fields', 'preview'])
class test_convert_import_data(TransactionCase):
""" Tests conversion of base_import.import input into data which
can be fed to Model.import_data
"""
def test_all(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
'bar,3,4\n'
'qux,5,6\n'
})
record = Import.browse(self.cr, self.uid, id)
data, fields = Import._convert_import_data(
record, ['name', 'somevalue', 'othervalue'],
{'quoting': '"', 'separator': ',', 'headers': True,})
self.assertItemsEqual(fields, ['name', 'somevalue', 'othervalue'])
self.assertItemsEqual(data, [
('foo', '1', '2'),
('bar', '3', '4'),
('qux', '5', '6'),
])
def test_filtered(self):
""" If ``False`` is provided as field mapping for a column,
that column should be removed from importable data
"""
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
'bar,3,4\n'
'qux,5,6\n'
})
record = Import.browse(self.cr, self.uid, id)
data, fields = Import._convert_import_data(
record, ['name', False, 'othervalue'],
{'quoting': '"', 'separator': ',', 'headers': True,})
self.assertItemsEqual(fields, ['name', 'othervalue'])
self.assertItemsEqual(data, [
('foo', '2'),
('bar', '4'),
('qux', '6'),
])
def test_norow(self):
""" If a row is composed only of empty values (due to having
filtered out non-empty values from it), it should be removed
"""
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
',3,\n'
',5,6\n'
})
record = Import.browse(self.cr, self.uid, id)
data, fields = Import._convert_import_data(
record, ['name', False, 'othervalue'],
{'quoting': '"', 'separator': ',', 'headers': True,})
self.assertItemsEqual(fields, ['name', 'othervalue'])
self.assertItemsEqual(data, [
('foo', '2'),
('', '6'),
])
def test_nofield(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
})
record = Import.browse(self.cr, self.uid, id)
self.assertRaises(
ValueError,
Import._convert_import_data,
record, [],
{'quoting': '"', 'separator': ',', 'headers': True,})
def test_falsefields(self):
Import = self.registry('base_import.import')
id = Import.create(self.cr, self.uid, {
'res_model': 'base_import.tests.models.preview',
'file': 'name,Some Value,Counter\n'
'foo,1,2\n'
})
record = Import.browse(self.cr, self.uid, id)
self.assertRaises(
ValueError,
Import._convert_import_data,
record, [False, False, False],
{'quoting': '"', 'separator': ',', 'headers': True,})
| agpl-3.0 |
nvbn/python-social-auth | social/tests/backends/test_github.py | 11 | 4899 | import json
from httpretty import HTTPretty
from social.exceptions import AuthFailed
from social.tests.backends.oauth import OAuth2Test
class GithubOAuth2Test(OAuth2Test):
backend_path = 'social.backends.github.GithubOAuth2'
user_data_url = 'https://api.github.com/user'
expected_username = 'foobar'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
user_data_body = json.dumps({
'login': 'foobar',
'id': 1,
'avatar_url': 'https://github.com/images/error/foobar_happy.gif',
'gravatar_id': 'somehexcode',
'url': 'https://api.github.com/users/foobar',
'name': 'monalisa foobar',
'company': 'GitHub',
'blog': 'https://github.com/blog',
'location': 'San Francisco',
'email': '[email protected]',
'hireable': False,
'bio': 'There once was...',
'public_repos': 2,
'public_gists': 1,
'followers': 20,
'following': 0,
'html_url': 'https://github.com/foobar',
'created_at': '2008-01-14T04:33:35Z',
'type': 'User',
'total_private_repos': 100,
'owned_private_repos': 100,
'private_gists': 81,
'disk_usage': 10000,
'collaborators': 8,
'plan': {
'name': 'Medium',
'space': 400,
'collaborators': 10,
'private_repos': 20
}
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
class GithubOAuth2NoEmailTest(GithubOAuth2Test):
user_data_body = json.dumps({
'login': 'foobar',
'id': 1,
'avatar_url': 'https://github.com/images/error/foobar_happy.gif',
'gravatar_id': 'somehexcode',
'url': 'https://api.github.com/users/foobar',
'name': 'monalisa foobar',
'company': 'GitHub',
'blog': 'https://github.com/blog',
'location': 'San Francisco',
'email': '',
'hireable': False,
'bio': 'There once was...',
'public_repos': 2,
'public_gists': 1,
'followers': 20,
'following': 0,
'html_url': 'https://github.com/foobar',
'created_at': '2008-01-14T04:33:35Z',
'type': 'User',
'total_private_repos': 100,
'owned_private_repos': 100,
'private_gists': 81,
'disk_usage': 10000,
'collaborators': 8,
'plan': {
'name': 'Medium',
'space': 400,
'collaborators': 10,
'private_repos': 20
}
})
def test_login(self):
url = 'https://api.github.com/user/emails'
HTTPretty.register_uri(HTTPretty.GET, url, status=200,
body=json.dumps(['[email protected]']),
content_type='application/json')
self.do_login()
def test_login_next_format(self):
url = 'https://api.github.com/user/emails'
HTTPretty.register_uri(HTTPretty.GET, url, status=200,
body=json.dumps([{'email': '[email protected]'}]),
content_type='application/json')
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
class GithubOrganizationOAuth2Test(GithubOAuth2Test):
backend_path = 'social.backends.github.GithubOrganizationOAuth2'
def auth_handlers(self, start_url):
url = 'https://api.github.com/orgs/foobar/members/foobar'
HTTPretty.register_uri(HTTPretty.GET, url, status=204, body='')
return super(GithubOrganizationOAuth2Test, self).auth_handlers(
start_url
)
def test_login(self):
self.strategy.set_settings({'SOCIAL_AUTH_GITHUB_ORG_NAME': 'foobar'})
self.do_login()
def test_partial_pipeline(self):
self.strategy.set_settings({'SOCIAL_AUTH_GITHUB_ORG_NAME': 'foobar'})
self.do_partial_pipeline()
class GithubOrganizationOAuth2FailTest(GithubOAuth2Test):
backend_path = 'social.backends.github.GithubOrganizationOAuth2'
def auth_handlers(self, start_url):
url = 'https://api.github.com/orgs/foobar/members/foobar'
HTTPretty.register_uri(HTTPretty.GET, url, status=404,
body='{"message": "Not Found"}',
content_type='application/json')
return super(GithubOrganizationOAuth2FailTest, self).auth_handlers(
start_url
)
def test_login(self):
self.strategy.set_settings({'SOCIAL_AUTH_GITHUB_ORG_NAME': 'foobar'})
self.do_login.when.called_with().should.throw(AuthFailed)
def test_partial_pipeline(self):
self.strategy.set_settings({'SOCIAL_AUTH_GITHUB_ORG_NAME': 'foobar'})
self.do_partial_pipeline.when.called_with().should.throw(AuthFailed)
| bsd-3-clause |
lucuma/solution | solution/utils.py | 2 | 2728 | # coding=utf-8
import datetime
import re
from xml.sax.saxutils import quoteattr
from markupsafe import Markup, escape_silent
from ._compat import to_unicode, iteritems
class FakeMultiDict(dict):
"""Adds a fake `getlist` method to a regular dict; or acts as a proxy to
Webob's MultiDict `getall` method.
"""
def __getattr__(self, attr):
try:
return self[attr]
except KeyError as error:
raise AttributeError(error)
def getlist(self, name):
if hasattr(self, 'getall'):
return self.getall(name)
value = self.get(name)
if value is None:
return []
return [value]
def escape(value):
return escape_silent(to_unicode(value))
def get_html_attrs(kwargs=None):
"""Generate HTML attributes from the provided keyword arguments.
The output value is sorted by the passed keys, to provide consistent
output. Because of the frequent use of the normally reserved keyword
`class`, `classes` is used instead. Also, all underscores are translated
to regular dashes.
Set any property with a `True` value.
>>> _get_html_attrs({'id': 'text1', 'classes': 'myclass',
'data_id': 1, 'checked': True})
u'class="myclass" data-id="1" id="text1" checked'
"""
kwargs = kwargs or {}
attrs = []
props = []
classes = kwargs.get('classes', '').strip()
if classes:
classes = ' '.join(re.split(r'\s+', classes))
classes = to_unicode(quoteattr(classes))
attrs.append('class=%s' % classes)
try:
del kwargs['classes']
except KeyError:
pass
for key, value in iteritems(kwargs):
key = key.replace('_', '-')
key = to_unicode(key)
if isinstance(value, bool):
if value is True:
props.append(key)
else:
value = quoteattr(Markup(value))
attrs.append(u'%s=%s' % (key, value))
attrs.sort()
props.sort()
attrs.extend(props)
return u' '.join(attrs)
def get_obj_value(obj, name, default=None):
# The field name could conflict with a native method
# if `obj` is a dictionary instance
if isinstance(obj, dict):
return obj.get(name, default)
return getattr(obj, name, default)
def set_obj_value(obj, name, value):
# The field name could conflict with a native method
# if `obj` is a dictionary instance
if isinstance(obj, dict):
obj[name] = value
else:
setattr(obj, name, value)
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime.date):
serial = obj.isoformat()
return serial
| mit |
tallakahath/pymatgen | pymatgen/util/coord_utils.py | 2 | 15753 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
from six.moves import zip
import itertools
import numpy as np
import math
from . import coord_utils_cython as cuc
"""
Utilities for manipulating coordinates or list of coordinates, under periodic
boundary conditions or otherwise. Many of these are heavily vectorized in
numpy for performance.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "Nov 27, 2011"
# array size threshold for looping instead of broadcasting
LOOP_THRESHOLD = 1e6
def find_in_coord_list(coord_list, coord, atol=1e-8):
"""
Find the indices of matches of a particular coord in a coord_list.
Args:
coord_list: List of coords to test
coord: Specific coordinates
atol: Absolute tolerance. Defaults to 1e-8. Accepts both scalar and
array.
Returns:
Indices of matches, e.g., [0, 1, 2, 3]. Empty list if not found.
"""
if len(coord_list) == 0:
return []
diff = np.array(coord_list) - np.array(coord)[None, :]
return np.where(np.all(np.abs(diff) < atol, axis=1))[0]
def in_coord_list(coord_list, coord, atol=1e-8):
"""
Tests if a particular coord is within a coord_list.
Args:
coord_list: List of coords to test
coord: Specific coordinates
atol: Absolute tolerance. Defaults to 1e-8. Accepts both scalar and
array.
Returns:
True if coord is in the coord list.
"""
return len(find_in_coord_list(coord_list, coord, atol=atol)) > 0
def is_coord_subset(subset, superset, atol=1e-8):
"""
Tests if all coords in subset are contained in superset.
Doesn't use periodic boundary conditions
Args:
subset, superset: List of coords
Returns:
True if all of subset is in superset.
"""
c1 = np.array(subset)
c2 = np.array(superset)
is_close = np.all(np.abs(c1[:, None, :] - c2[None, :, :]) < atol, axis=-1)
any_close = np.any(is_close, axis=-1)
return np.all(any_close)
def coord_list_mapping(subset, superset, atol=1e-8):
"""
Gives the index mapping from a subset to a superset.
Subset and superset cannot contain duplicate rows
Args:
subset, superset: List of coords
Returns:
list of indices such that superset[indices] = subset
"""
c1 = np.array(subset)
c2 = np.array(superset)
inds = np.where(np.all(np.isclose(c1[:, None, :], c2[None, :, :], atol=atol),
axis=2))[1]
result = c2[inds]
if not np.allclose(c1, result, atol=atol):
if not is_coord_subset(subset, superset):
raise ValueError("subset is not a subset of superset")
if not result.shape == c1.shape:
raise ValueError("Something wrong with the inputs, likely duplicates "
"in superset")
return inds
def coord_list_mapping_pbc(subset, superset, atol=1e-8):
"""
Gives the index mapping from a subset to a superset.
Superset cannot contain duplicate matching rows
Args:
subset, superset: List of frac_coords
Returns:
list of indices such that superset[indices] = subset
"""
atol = np.array([1., 1. ,1.]) * atol
return cuc.coord_list_mapping_pbc(subset, superset, atol)
def get_linear_interpolated_value(x_values, y_values, x):
"""
Returns an interpolated value by linear interpolation between two values.
This method is written to avoid dependency on scipy, which causes issues on
threading servers.
Args:
x_values: Sequence of x values.
y_values: Corresponding sequence of y values
x: Get value at particular x
Returns:
Value at x.
"""
a = np.array(sorted(zip(x_values, y_values), key=lambda d: d[0]))
ind = np.where(a[:, 0] >= x)[0]
if len(ind) == 0 or ind[0] == 0:
raise ValueError("x is out of range of provided x_values")
i = ind[0]
x1, x2 = a[i - 1][0], a[i][0]
y1, y2 = a[i - 1][1], a[i][1]
return y1 + (y2 - y1) / (x2 - x1) * (x - x1)
def all_distances(coords1, coords2):
"""
Returns the distances between two lists of coordinates
Args:
coords1: First set of cartesian coordinates.
coords2: Second set of cartesian coordinates.
Returns:
2d array of cartesian distances. E.g the distance between
coords1[i] and coords2[j] is distances[i,j]
"""
c1 = np.array(coords1)
c2 = np.array(coords2)
z = (c1[:, None, :] - c2[None, :, :]) ** 2
return np.sum(z, axis=-1) ** 0.5
def pbc_diff(fcoords1, fcoords2):
"""
Returns the 'fractional distance' between two coordinates taking into
account periodic boundary conditions.
Args:
fcoords1: First set of fractional coordinates. e.g., [0.5, 0.6,
0.7] or [[1.1, 1.2, 4.3], [0.5, 0.6, 0.7]]. It can be a single
coord or any array of coords.
fcoords2: Second set of fractional coordinates.
Returns:
Fractional distance. Each coordinate must have the property that
abs(a) <= 0.5. Examples:
pbc_diff([0.1, 0.1, 0.1], [0.3, 0.5, 0.9]) = [-0.2, -0.4, 0.2]
pbc_diff([0.9, 0.1, 1.01], [0.3, 0.5, 0.9]) = [-0.4, -0.4, 0.11]
"""
fdist = np.subtract(fcoords1, fcoords2)
return fdist - np.round(fdist)
def pbc_shortest_vectors(lattice, fcoords1, fcoords2, mask=None,
return_d2=False):
"""
Returns the shortest vectors between two lists of coordinates taking into
account periodic boundary conditions and the lattice.
Args:
lattice: lattice to use
fcoords1: First set of fractional coordinates. e.g., [0.5, 0.6, 0.7]
or [[1.1, 1.2, 4.3], [0.5, 0.6, 0.7]]. It can be a single
coord or any array of coords.
fcoords2: Second set of fractional coordinates.
mask (boolean array): Mask of matches that are not allowed.
i.e. if mask[1,2] == True, then subset[1] cannot be matched
to superset[2]
return_d2 (boolean): whether to also return the squared distances
Returns:
array of displacement vectors from fcoords1 to fcoords2
first index is fcoords1 index, second is fcoords2 index
"""
return cuc.pbc_shortest_vectors(lattice, fcoords1, fcoords2, mask,
return_d2)
def find_in_coord_list_pbc(fcoord_list, fcoord, atol=1e-8):
"""
Get the indices of all points in a fractional coord list that are
equal to a fractional coord (with a tolerance), taking into account
periodic boundary conditions.
Args:
fcoord_list: List of fractional coords
fcoord: A specific fractional coord to test.
atol: Absolute tolerance. Defaults to 1e-8.
Returns:
Indices of matches, e.g., [0, 1, 2, 3]. Empty list if not found.
"""
if len(fcoord_list) == 0:
return []
fcoords = np.tile(fcoord, (len(fcoord_list), 1))
fdist = fcoord_list - fcoords
fdist -= np.round(fdist)
return np.where(np.all(np.abs(fdist) < atol, axis=1))[0]
def in_coord_list_pbc(fcoord_list, fcoord, atol=1e-8):
"""
Tests if a particular fractional coord is within a fractional coord_list.
Args:
fcoord_list: List of fractional coords to test
fcoord: A specific fractional coord to test.
atol: Absolute tolerance. Defaults to 1e-8.
Returns:
True if coord is in the coord list.
"""
return len(find_in_coord_list_pbc(fcoord_list, fcoord, atol=atol)) > 0
def is_coord_subset_pbc(subset, superset, atol=1e-8, mask=None):
"""
Tests if all fractional coords in subset are contained in superset.
Args:
subset, superset: List of fractional coords
atol (float or size 3 array): Tolerance for matching
mask (boolean array): Mask of matches that are not allowed.
i.e. if mask[1,2] == True, then subset[1] cannot be matched
to superset[2]
Returns:
True if all of subset is in superset.
"""
c1 = np.array(subset, dtype=np.float64)
c2 = np.array(superset, dtype=np.float64)
if mask is not None:
m = np.array(mask, dtype=np.int)
else:
m = np.zeros((len(subset), len(superset)), dtype=np.int)
atol = np.zeros(3, dtype=np.float64) + atol
return cuc.is_coord_subset_pbc(c1, c2, atol, m)
def lattice_points_in_supercell(supercell_matrix):
"""
Returns the list of points on the original lattice contained in the
supercell in fractional coordinates (with the supercell basis).
e.g. [[2,0,0],[0,1,0],[0,0,1]] returns [[0,0,0],[0.5,0,0]]
Args:
supercell_matrix: 3x3 matrix describing the supercell
Returns:
numpy array of the fractional coordinates
"""
diagonals = np.array(
[[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1],
[1, 1, 0], [1, 1, 1]])
d_points = np.dot(diagonals, supercell_matrix)
mins = np.min(d_points, axis=0)
maxes = np.max(d_points, axis=0) + 1
ar = np.arange(mins[0], maxes[0])[:, None] * \
np.array([1, 0, 0])[None, :]
br = np.arange(mins[1], maxes[1])[:, None] * \
np.array([0, 1, 0])[None, :]
cr = np.arange(mins[2], maxes[2])[:, None] * \
np.array([0, 0, 1])[None, :]
all_points = ar[:, None, None] + br[None, :, None] + cr[None, None, :]
all_points = all_points.reshape((-1, 3))
frac_points = np.dot(all_points, np.linalg.inv(supercell_matrix))
tvects = frac_points[np.all(frac_points < 1 - 1e-10, axis=1)
& np.all(frac_points >= -1e-10, axis=1)]
assert len(tvects) == round(abs(np.linalg.det(supercell_matrix)))
return tvects
def barycentric_coords(coords, simplex):
"""
Converts a list of coordinates to barycentric coordinates, given a
simplex with d+1 points. Only works for d >= 2.
Args:
coords: list of n coords to transform, shape should be (n,d)
simplex: list of coordinates that form the simplex, shape should be
(d+1, d)
Returns:
a LIST of barycentric coordinates (even if the original input was 1d)
"""
coords = np.atleast_2d(coords)
t = np.transpose(simplex[:-1, :]) - np.transpose(simplex[-1, :])[:, None]
all_but_one = np.transpose(
np.linalg.solve(t, np.transpose(coords - simplex[-1])))
last_coord = 1 - np.sum(all_but_one, axis=-1)[:, None]
return np.append(all_but_one, last_coord, axis=-1)
def get_angle(v1, v2, units="degrees"):
"""
Calculates the angle between two vectors.
Args:
v1: Vector 1
v2: Vector 2
units: "degrees" or "radians". Defaults to "degrees".
Returns:
Angle between them in degrees.
"""
d = np.dot(v1, v2) / np.linalg.norm(v1) / np.linalg.norm(v2)
d = min(d, 1)
d = max(d, -1)
angle = math.acos(d)
if units == "degrees":
return math.degrees(angle)
elif units == "radians":
return angle
else:
raise ValueError("Invalid units {}".format(units))
class Simplex(object):
"""
A generalized simplex object. See http://en.wikipedia.org/wiki/Simplex.
.. attribute: space_dim
Dimension of the space. Usually, this is 1 more than the simplex_dim.
.. attribute: simplex_dim
Dimension of the simplex coordinate space.
"""
def __init__(self, coords):
"""
Initializes a Simplex from vertex coordinates.
Args:
coords ([[float]]): Coords of the vertices of the simplex. E.g.,
[[1, 2, 3], [2, 4, 5], [6, 7, 8], [8, 9, 10].
"""
self._coords = np.array(coords)
self.space_dim, self.simplex_dim = self._coords.shape
self.origin = self._coords[-1]
if self.space_dim == self.simplex_dim + 1:
# precompute augmented matrix for calculating bary_coords
self._aug = np.concatenate([coords, np.ones((self.space_dim, 1))],
axis=-1)
self._aug_inv = np.linalg.inv(self._aug)
@property
def volume(self):
"""
Volume of the simplex.
"""
return abs(np.linalg.det(self._aug)) / math.factorial(self.simplex_dim)
def bary_coords(self, point):
try:
return np.dot(np.concatenate([point, [1]]), self._aug_inv)
except AttributeError:
raise ValueError('Simplex is not full-dimensional')
def point_from_bary_coords(self, bary_coords):
try:
return np.dot(bary_coords, self._aug[:, :-1])
except AttributeError:
raise ValueError('Simplex is not full-dimensional')
def in_simplex(self, point, tolerance=1e-8):
"""
Checks if a point is in the simplex using the standard barycentric
coordinate system algorithm.
Taking an arbitrary vertex as an origin, we compute the basis for the
simplex from this origin by subtracting all other vertices from the
origin. We then project the point into this coordinate system and
determine the linear decomposition coefficients in this coordinate
system. If the coeffs satisfy that all coeffs >= 0, the composition
is in the facet.
Args:
point ([float]): Point to test
tolerance (float): Tolerance to test if point is in simplex.
"""
return (self.bary_coords(point) >= -tolerance).all()
def line_intersection(self, point1, point2, tolerance=1e-8):
"""
Computes the intersection points of a line with a simplex
Args:
point1, point2 ([float]): Points that determine the line
Returns:
points where the line intersects the simplex (0, 1, or 2)
"""
b1 = self.bary_coords(point1)
b2 = self.bary_coords(point2)
l = b1 - b2
# don't use barycentric dimension where line is parallel to face
valid = np.abs(l) > 1e-10
# array of all the barycentric coordinates on the line where
# one of the values is 0
possible = b1 - (b1[valid] / l[valid])[:, None] * l
barys = []
for p in possible:
# it's only an intersection if its in the simplex
if (p >= -tolerance).all():
found = False
# don't return duplicate points
for b in barys:
if np.all(np.abs(b - p) < tolerance):
found = True
break
if not found:
barys.append(p)
assert len(barys) < 3
return [self.point_from_bary_coords(b) for b in barys]
def __eq__(self, other):
for p in itertools.permutations(self._coords):
if np.allclose(p, other.coords):
return True
return False
def __hash__(self):
return len(self._coords)
def __repr__(self):
output = ["{}-simplex in {}D space".format(self.simplex_dim,
self.space_dim),
"Vertices:"]
for coord in self._coords:
output.append("\t({})".format(", ".join(map(str, coord))))
return "\n".join(output)
def __str__(self):
return self.__repr__()
@property
def coords(self):
"""
Returns a copy of the vertex coordinates in the simplex.
"""
return self._coords.copy()
| mit |
idooley/AnalogCrossover | calculator/high_pass_with_gain.py | 1 | 3175 | #!/opt/local/bin/python3.4
# Loads some common capacitor and resistor values and then exhaustively searches the space of possible combinations to find a good combination.
import re,sys,math,blist
from blist import sortedset
def convertToValue(x) :
y = x.strip()
if y.endswith("pF") :
return float(y.replace("pF","")) * 0.000000000001
if y.endswith("nF") :
return float(y.replace("nF","")) * 0.000000001
if y.endswith("uF") :
return float(y.replace("uF","")) * 0.000001
if y.endswith("mF") :
return float(y.replace("mF","")) * 0.001
if y.endswith("K") :
return float(y.replace("K","")) * 1000.0
if y.endswith("M") :
return float(y.replace("M","")) *1000000.0
return float(y)
def resistancesComposedOfTwoResistors(x) :
results = sortedset()
for v in x:
results.add(v)
for v1 in x:
for v2 in x:
r = v2/v1;
if r > 0.33 and r < 3.0:
results.add((v1*v2)/(v1+v2))
return results
def main() :
if len(sys.argv) != 3:
print ("usage: " + sys.argv[0] + " frequency gain")
capfile = open("capacitorValues.txt", "r")
resfile = open("resistor_E24_values.txt", "r")
capValues = []
for line in capfile.readlines():
capValues = capValues + [convertToValue(x) for x in re.split("\s*", line) if (x is not None and x is not "")]
capValues.sort()
resValues = []
for line in resfile.readlines():
resValues = resValues + [convertToValue(x) for x in re.split("\s*", line) if (x is not None and x is not "")]
resValues.sort()
# Filter to specific ranges
resValues = [x for x in resValues if (x >= convertToValue("200") and x <= convertToValue("4K")) ]
capValues = [x for x in capValues if (x >= convertToValue("47nF") and x <= convertToValue("120nF")) ]
qTol = 0.01
qExpected = 1.0 / math.sqrt(2.0)
kTol = 0.05
kExpected = float(sys.argv[2]) # target gain
fTol = 10.0
fExpected = float(sys.argv[1]) # target frequency
print ("num resistor values: ", len(resValues))
expandedResValues = resistancesComposedOfTwoResistors(resValues)
print ("num resistor pair values: ", len(expandedResValues))
# print expandedResValues
resValues1 = resValues
resValues2 = resValues
resValues3 = [1000]
resValues4 = expandedResValues
for R1 in resValues1:
for R2 in resValues2:
for R3 in resValues3:
for R4 in resValues4:
# = R3 # 2x gain
for C1 in capValues:
C2 = C1
k = 1.0 + R4 / R3
if abs(k - kExpected) < kTol:
try:
q = math.sqrt(C1*C2*R1*R2)/(R2*C2+R2*C1+R1*C2*(1-k))
if abs(q - qExpected) < qTol:
f = 1.0/(2.0*3.1415927*math.sqrt(C1*C2*R1*R2))
if abs(f - fExpected) < fTol:
print (R1, R2, R3, R4, C1, C2)
print ("q=", q)
print ("f=", f)
except:
pass
if __name__ == "__main__":
main()
| bsd-3-clause |
cebrusfs/217gdb | pwndbg/strings.py | 5 | 1331 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Functionality for resolving ASCII printable strings within
the debuggee's address space.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import string
import gdb
import pwndbg.events
import pwndbg.memory
import pwndbg.typeinfo
length = 15
@pwndbg.events.stop
def update_length():
r"""
Unfortunately there's not a better way to get at this info.
>>> gdb.execute('show print elements', from_tty=False, to_string=True)
'Limit on string chars or array elements to print is 21.\n'
"""
global length
message = gdb.execute('show print elements', from_tty=False, to_string=True)
message = message.split()[-1]
message = message.strip('.')
if message == 'unlimited':
length = 0
else:
length = int(message)
def get(address, maxlen = None):
if maxlen is None:
maxlen = length
try:
sz = pwndbg.memory.string(address, maxlen)
sz = sz.decode('latin-1', 'replace')
if not sz or not all(s in string.printable for s in sz):
return None
except Exception as e:
return None
if len(sz) < maxlen or not maxlen:
return sz
return sz[:maxlen] + '...'
| mit |
crs4/blast-python | BlastPython/blaster.py | 1 | 1869 | # BEGIN_COPYRIGHT
#
# Copyright (C) 2014 CRS4.
#
# This file is part of blast-python.
#
# blast-python is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# blast-python is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# blast-python. If not, see <http://www.gnu.org/licenses/>.
#
# END_COPYRIGHT
import ncbi_toolkit
class blaster(object):
def __init__(self, query, **kw):
# program type must be passed to the constructor to ensure correct
# default settings for all other options
program = kw.pop('Program', ncbi_toolkit.EProgram.eBlastn)
self.query = query
self.blast_engine = ncbi_toolkit.CBl2Seq(
ncbi_toolkit.SSeqLoc(),
ncbi_toolkit.SSeqLoc(),
program
)
self.blast_engine.SetQuery(query)
self.query_already_setup = False
opts = self.blast_engine.SetOptions()
for k in kw:
opts[k] = kw[k]
def get_options(self):
return self.blast_engine.GetOptions()
def set_options(self):
return self.blast_engine.SetOptions()
def blast(self, subject):
"""
Blast on subject and return results.
"""
self.blast_engine.SetSubject(subject)
self.blast_engine.SetupSearch(self.query_already_setup)
self.query_already_setup = True
self.blast_engine.ScanDB()
return subject, self.blast_engine.GetResults()
| gpl-3.0 |
40223117cda/w16cdaa | static/Brython3.1.3-20150514-095342/Lib/platform.py | 620 | 51006 | #!/usr/bin/env python3
""" This module tries to retrieve as much platform-identifying data as
possible. It makes this information available via function APIs.
If called from the command line, it prints the platform
information concatenated as single string to stdout. The output
format is useable as part of a filename.
"""
# This module is maintained by Marc-Andre Lemburg <[email protected]>.
# If you find problems, please submit bug reports/patches via the
# Python bug tracker (http://bugs.python.org) and assign them to "lemburg".
#
# Still needed:
# * more support for WinCE
# * support for MS-DOS (PythonDX ?)
# * support for Amiga and other still unsupported platforms running Python
# * support for additional Linux distributions
#
# Many thanks to all those who helped adding platform-specific
# checks (in no particular order):
#
# Charles G Waldman, David Arnold, Gordon McMillan, Ben Darnell,
# Jeff Bauer, Cliff Crawford, Ivan Van Laningham, Josef
# Betancourt, Randall Hopper, Karl Putland, John Farrell, Greg
# Andruk, Just van Rossum, Thomas Heller, Mark R. Levinson, Mark
# Hammond, Bill Tutt, Hans Nowak, Uwe Zessin (OpenVMS support),
# Colin Kong, Trent Mick, Guido van Rossum, Anthony Baxter
#
# History:
#
# <see CVS and SVN checkin messages for history>
#
# 1.0.7 - added DEV_NULL
# 1.0.6 - added linux_distribution()
# 1.0.5 - fixed Java support to allow running the module on Jython
# 1.0.4 - added IronPython support
# 1.0.3 - added normalization of Windows system name
# 1.0.2 - added more Windows support
# 1.0.1 - reformatted to make doc.py happy
# 1.0.0 - reformatted a bit and checked into Python CVS
# 0.8.0 - added sys.version parser and various new access
# APIs (python_version(), python_compiler(), etc.)
# 0.7.2 - fixed architecture() to use sizeof(pointer) where available
# 0.7.1 - added support for Caldera OpenLinux
# 0.7.0 - some fixes for WinCE; untabified the source file
# 0.6.2 - support for OpenVMS - requires version 1.5.2-V006 or higher and
# vms_lib.getsyi() configured
# 0.6.1 - added code to prevent 'uname -p' on platforms which are
# known not to support it
# 0.6.0 - fixed win32_ver() to hopefully work on Win95,98,NT and Win2k;
# did some cleanup of the interfaces - some APIs have changed
# 0.5.5 - fixed another type in the MacOS code... should have
# used more coffee today ;-)
# 0.5.4 - fixed a few typos in the MacOS code
# 0.5.3 - added experimental MacOS support; added better popen()
# workarounds in _syscmd_ver() -- still not 100% elegant
# though
# 0.5.2 - fixed uname() to return '' instead of 'unknown' in all
# return values (the system uname command tends to return
# 'unknown' instead of just leaving the field emtpy)
# 0.5.1 - included code for slackware dist; added exception handlers
# to cover up situations where platforms don't have os.popen
# (e.g. Mac) or fail on socket.gethostname(); fixed libc
# detection RE
# 0.5.0 - changed the API names referring to system commands to *syscmd*;
# added java_ver(); made syscmd_ver() a private
# API (was system_ver() in previous versions) -- use uname()
# instead; extended the win32_ver() to also return processor
# type information
# 0.4.0 - added win32_ver() and modified the platform() output for WinXX
# 0.3.4 - fixed a bug in _follow_symlinks()
# 0.3.3 - fixed popen() and "file" command invokation bugs
# 0.3.2 - added architecture() API and support for it in platform()
# 0.3.1 - fixed syscmd_ver() RE to support Windows NT
# 0.3.0 - added system alias support
# 0.2.3 - removed 'wince' again... oh well.
# 0.2.2 - added 'wince' to syscmd_ver() supported platforms
# 0.2.1 - added cache logic and changed the platform string format
# 0.2.0 - changed the API to use functions instead of module globals
# since some action take too long to be run on module import
# 0.1.0 - first release
#
# You can always get the latest version of this module at:
#
# http://www.egenix.com/files/python/platform.py
#
# If that URL should fail, try contacting the author.
__copyright__ = """
Copyright (c) 1999-2000, Marc-Andre Lemburg; mailto:[email protected]
Copyright (c) 2000-2010, eGenix.com Software GmbH; mailto:[email protected]
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee or royalty is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation or portions thereof, including modifications,
that you make.
EGENIX.COM SOFTWARE GMBH DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
"""
__version__ = '1.0.7'
import collections
import sys, os, re, subprocess
### Globals & Constants
# Determine the platform's /dev/null device
try:
DEV_NULL = os.devnull
except AttributeError:
# os.devnull was added in Python 2.4, so emulate it for earlier
# Python versions
if sys.platform in ('dos','win32','win16','os2'):
# Use the old CP/M NUL as device name
DEV_NULL = 'NUL'
else:
# Standard Unix uses /dev/null
DEV_NULL = '/dev/null'
### Platform specific APIs
_libc_search = re.compile(b'(__libc_init)'
b'|'
b'(GLIBC_([0-9.]+))'
b'|'
br'(libc(_\w+)?\.so(?:\.(\d[0-9.]*))?)', re.ASCII)
def libc_ver(executable=sys.executable,lib='',version='',
chunksize=16384):
""" Tries to determine the libc version that the file executable
(which defaults to the Python interpreter) is linked against.
Returns a tuple of strings (lib,version) which default to the
given parameters in case the lookup fails.
Note that the function has intimate knowledge of how different
libc versions add symbols to the executable and thus is probably
only useable for executables compiled using gcc.
The file is read and scanned in chunks of chunksize bytes.
"""
if hasattr(os.path, 'realpath'):
# Python 2.2 introduced os.path.realpath(); it is used
# here to work around problems with Cygwin not being
# able to open symlinks for reading
executable = os.path.realpath(executable)
f = open(executable,'rb')
binary = f.read(chunksize)
pos = 0
while 1:
if b'libc' in binary or b'GLIBC' in binary:
m = _libc_search.search(binary,pos)
else:
m = None
if not m:
binary = f.read(chunksize)
if not binary:
break
pos = 0
continue
libcinit,glibc,glibcversion,so,threads,soversion = [
s.decode('latin1') if s is not None else s
for s in m.groups()]
if libcinit and not lib:
lib = 'libc'
elif glibc:
if lib != 'glibc':
lib = 'glibc'
version = glibcversion
elif glibcversion > version:
version = glibcversion
elif so:
if lib != 'glibc':
lib = 'libc'
if soversion and soversion > version:
version = soversion
if threads and version[-len(threads):] != threads:
version = version + threads
pos = m.end()
f.close()
return lib,version
def _dist_try_harder(distname,version,id):
""" Tries some special tricks to get the distribution
information in case the default method fails.
Currently supports older SuSE Linux, Caldera OpenLinux and
Slackware Linux distributions.
"""
if os.path.exists('/var/adm/inst-log/info'):
# SuSE Linux stores distribution information in that file
distname = 'SuSE'
for line in open('/var/adm/inst-log/info'):
tv = line.split()
if len(tv) == 2:
tag,value = tv
else:
continue
if tag == 'MIN_DIST_VERSION':
version = value.strip()
elif tag == 'DIST_IDENT':
values = value.split('-')
id = values[2]
return distname,version,id
if os.path.exists('/etc/.installed'):
# Caldera OpenLinux has some infos in that file (thanks to Colin Kong)
for line in open('/etc/.installed'):
pkg = line.split('-')
if len(pkg) >= 2 and pkg[0] == 'OpenLinux':
# XXX does Caldera support non Intel platforms ? If yes,
# where can we find the needed id ?
return 'OpenLinux',pkg[1],id
if os.path.isdir('/usr/lib/setup'):
# Check for slackware version tag file (thanks to Greg Andruk)
verfiles = os.listdir('/usr/lib/setup')
for n in range(len(verfiles)-1, -1, -1):
if verfiles[n][:14] != 'slack-version-':
del verfiles[n]
if verfiles:
verfiles.sort()
distname = 'slackware'
version = verfiles[-1][14:]
return distname,version,id
return distname,version,id
_release_filename = re.compile(r'(\w+)[-_](release|version)', re.ASCII)
_lsb_release_version = re.compile(r'(.+)'
' release '
'([\d.]+)'
'[^(]*(?:\((.+)\))?', re.ASCII)
_release_version = re.compile(r'([^0-9]+)'
'(?: release )?'
'([\d.]+)'
'[^(]*(?:\((.+)\))?', re.ASCII)
# See also http://www.novell.com/coolsolutions/feature/11251.html
# and http://linuxmafia.com/faq/Admin/release-files.html
# and http://data.linux-ntfs.org/rpm/whichrpm
# and http://www.die.net/doc/linux/man/man1/lsb_release.1.html
_supported_dists = (
'SuSE', 'debian', 'fedora', 'redhat', 'centos',
'mandrake', 'mandriva', 'rocks', 'slackware', 'yellowdog', 'gentoo',
'UnitedLinux', 'turbolinux', 'arch', 'mageia')
def _parse_release_file(firstline):
# Default to empty 'version' and 'id' strings. Both defaults are used
# when 'firstline' is empty. 'id' defaults to empty when an id can not
# be deduced.
version = ''
id = ''
# Parse the first line
m = _lsb_release_version.match(firstline)
if m is not None:
# LSB format: "distro release x.x (codename)"
return tuple(m.groups())
# Pre-LSB format: "distro x.x (codename)"
m = _release_version.match(firstline)
if m is not None:
return tuple(m.groups())
# Unknown format... take the first two words
l = firstline.strip().split()
if l:
version = l[0]
if len(l) > 1:
id = l[1]
return '', version, id
def linux_distribution(distname='', version='', id='',
supported_dists=_supported_dists,
full_distribution_name=1):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
supported_dists may be given to define the set of Linux
distributions to look for. It defaults to a list of currently
supported Linux distributions identified by their release file
name.
If full_distribution_name is true (default), the full
distribution read from the OS is returned. Otherwise the short
name taken from supported_dists is used.
Returns a tuple (distname,version,id) which default to the
args given as parameters.
"""
try:
etc = os.listdir('/etc')
except os.error:
# Probably not a Unix system
return distname,version,id
etc.sort()
for file in etc:
m = _release_filename.match(file)
if m is not None:
_distname,dummy = m.groups()
if _distname in supported_dists:
distname = _distname
break
else:
return _dist_try_harder(distname,version,id)
# Read the first line
with open('/etc/'+file, 'r') as f:
firstline = f.readline()
_distname, _version, _id = _parse_release_file(firstline)
if _distname and full_distribution_name:
distname = _distname
if _version:
version = _version
if _id:
id = _id
return distname, version, id
# To maintain backwards compatibility:
def dist(distname='',version='',id='',
supported_dists=_supported_dists):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
Returns a tuple (distname,version,id) which default to the
args given as parameters.
"""
return linux_distribution(distname, version, id,
supported_dists=supported_dists,
full_distribution_name=0)
def popen(cmd, mode='r', bufsize=-1):
""" Portable popen() interface.
"""
import warnings
warnings.warn('use os.popen instead', DeprecationWarning, stacklevel=2)
return os.popen(cmd, mode, bufsize)
def _norm_version(version, build=''):
""" Normalize the version and build strings and return a single
version string using the format major.minor.build (or patchlevel).
"""
l = version.split('.')
if build:
l.append(build)
try:
ints = map(int,l)
except ValueError:
strings = l
else:
strings = list(map(str,ints))
version = '.'.join(strings[:3])
return version
_ver_output = re.compile(r'(?:([\w ]+) ([\w.]+) '
'.*'
'\[.* ([\d.]+)\])')
# Examples of VER command output:
#
# Windows 2000: Microsoft Windows 2000 [Version 5.00.2195]
# Windows XP: Microsoft Windows XP [Version 5.1.2600]
# Windows Vista: Microsoft Windows [Version 6.0.6002]
#
# Note that the "Version" string gets localized on different
# Windows versions.
def _syscmd_ver(system='', release='', version='',
supported_platforms=('win32','win16','dos','os2')):
""" Tries to figure out the OS version used and returns
a tuple (system,release,version).
It uses the "ver" shell command for this which is known
to exists on Windows, DOS and OS/2. XXX Others too ?
In case this fails, the given parameters are used as
defaults.
"""
if sys.platform not in supported_platforms:
return system,release,version
# Try some common cmd strings
for cmd in ('ver','command /c ver','cmd /c ver'):
try:
pipe = popen(cmd)
info = pipe.read()
if pipe.close():
raise os.error('command failed')
# XXX How can I suppress shell errors from being written
# to stderr ?
except os.error as why:
#print 'Command %s failed: %s' % (cmd,why)
continue
except IOError as why:
#print 'Command %s failed: %s' % (cmd,why)
continue
else:
break
else:
return system,release,version
# Parse the output
info = info.strip()
m = _ver_output.match(info)
if m is not None:
system,release,version = m.groups()
# Strip trailing dots from version and release
if release[-1] == '.':
release = release[:-1]
if version[-1] == '.':
version = version[:-1]
# Normalize the version and build strings (eliminating additional
# zeros)
version = _norm_version(version)
return system,release,version
def _win32_getvalue(key,name,default=''):
""" Read a value for name from the registry key.
In case this fails, default is returned.
"""
try:
# Use win32api if available
from win32api import RegQueryValueEx
except ImportError:
# On Python 2.0 and later, emulate using winreg
import winreg
RegQueryValueEx = winreg.QueryValueEx
try:
return RegQueryValueEx(key,name)
except:
return default
def win32_ver(release='',version='',csd='',ptype=''):
""" Get additional version information from the Windows Registry
and return a tuple (version,csd,ptype) referring to version
number, CSD level (service pack), and OS type (multi/single
processor).
As a hint: ptype returns 'Uniprocessor Free' on single
processor NT machines and 'Multiprocessor Free' on multi
processor machines. The 'Free' refers to the OS version being
free of debugging code. It could also state 'Checked' which
means the OS version uses debugging code, i.e. code that
checks arguments, ranges, etc. (Thomas Heller).
Note: this function works best with Mark Hammond's win32
package installed, but also on Python 2.3 and later. It
obviously only runs on Win32 compatible platforms.
"""
# XXX Is there any way to find out the processor type on WinXX ?
# XXX Is win32 available on Windows CE ?
#
# Adapted from code posted by Karl Putland to comp.lang.python.
#
# The mappings between reg. values and release names can be found
# here: http://msdn.microsoft.com/library/en-us/sysinfo/base/osversioninfo_str.asp
# Import the needed APIs
try:
import win32api
from win32api import RegQueryValueEx, RegOpenKeyEx, \
RegCloseKey, GetVersionEx
from win32con import HKEY_LOCAL_MACHINE, VER_PLATFORM_WIN32_NT, \
VER_PLATFORM_WIN32_WINDOWS, VER_NT_WORKSTATION
except ImportError:
# Emulate the win32api module using Python APIs
try:
sys.getwindowsversion
except AttributeError:
# No emulation possible, so return the defaults...
return release,version,csd,ptype
else:
# Emulation using winreg (added in Python 2.0) and
# sys.getwindowsversion() (added in Python 2.3)
import winreg
GetVersionEx = sys.getwindowsversion
RegQueryValueEx = winreg.QueryValueEx
RegOpenKeyEx = winreg.OpenKeyEx
RegCloseKey = winreg.CloseKey
HKEY_LOCAL_MACHINE = winreg.HKEY_LOCAL_MACHINE
VER_PLATFORM_WIN32_WINDOWS = 1
VER_PLATFORM_WIN32_NT = 2
VER_NT_WORKSTATION = 1
VER_NT_SERVER = 3
REG_SZ = 1
# Find out the registry key and some general version infos
winver = GetVersionEx()
maj,min,buildno,plat,csd = winver
version = '%i.%i.%i' % (maj,min,buildno & 0xFFFF)
if hasattr(winver, "service_pack"):
if winver.service_pack != "":
csd = 'SP%s' % winver.service_pack_major
else:
if csd[:13] == 'Service Pack ':
csd = 'SP' + csd[13:]
if plat == VER_PLATFORM_WIN32_WINDOWS:
regkey = 'SOFTWARE\\Microsoft\\Windows\\CurrentVersion'
# Try to guess the release name
if maj == 4:
if min == 0:
release = '95'
elif min == 10:
release = '98'
elif min == 90:
release = 'Me'
else:
release = 'postMe'
elif maj == 5:
release = '2000'
elif plat == VER_PLATFORM_WIN32_NT:
regkey = 'SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion'
if maj <= 4:
release = 'NT'
elif maj == 5:
if min == 0:
release = '2000'
elif min == 1:
release = 'XP'
elif min == 2:
release = '2003Server'
else:
release = 'post2003'
elif maj == 6:
if hasattr(winver, "product_type"):
product_type = winver.product_type
else:
product_type = VER_NT_WORKSTATION
# Without an OSVERSIONINFOEX capable sys.getwindowsversion(),
# or help from the registry, we cannot properly identify
# non-workstation versions.
try:
key = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regkey)
name, type = RegQueryValueEx(key, "ProductName")
# Discard any type that isn't REG_SZ
if type == REG_SZ and name.find("Server") != -1:
product_type = VER_NT_SERVER
except WindowsError:
# Use default of VER_NT_WORKSTATION
pass
if min == 0:
if product_type == VER_NT_WORKSTATION:
release = 'Vista'
else:
release = '2008Server'
elif min == 1:
if product_type == VER_NT_WORKSTATION:
release = '7'
else:
release = '2008ServerR2'
elif min == 2:
if product_type == VER_NT_WORKSTATION:
release = '8'
else:
release = '2012Server'
else:
release = 'post2012Server'
else:
if not release:
# E.g. Win3.1 with win32s
release = '%i.%i' % (maj,min)
return release,version,csd,ptype
# Open the registry key
try:
keyCurVer = RegOpenKeyEx(HKEY_LOCAL_MACHINE, regkey)
# Get a value to make sure the key exists...
RegQueryValueEx(keyCurVer, 'SystemRoot')
except:
return release,version,csd,ptype
# Parse values
#subversion = _win32_getvalue(keyCurVer,
# 'SubVersionNumber',
# ('',1))[0]
#if subversion:
# release = release + subversion # 95a, 95b, etc.
build = _win32_getvalue(keyCurVer,
'CurrentBuildNumber',
('',1))[0]
ptype = _win32_getvalue(keyCurVer,
'CurrentType',
(ptype,1))[0]
# Normalize version
version = _norm_version(version,build)
# Close key
RegCloseKey(keyCurVer)
return release,version,csd,ptype
def _mac_ver_lookup(selectors,default=None):
from _gestalt import gestalt
l = []
append = l.append
for selector in selectors:
try:
append(gestalt(selector))
except (RuntimeError, OSError):
append(default)
return l
def _bcd2str(bcd):
return hex(bcd)[2:]
def _mac_ver_gestalt():
"""
Thanks to Mark R. Levinson for mailing documentation links and
code examples for this function. Documentation for the
gestalt() API is available online at:
http://www.rgaros.nl/gestalt/
"""
# Check whether the version info module is available
try:
import _gestalt
except ImportError:
return None
# Get the infos
sysv, sysa = _mac_ver_lookup(('sysv','sysa'))
# Decode the infos
if sysv:
major = (sysv & 0xFF00) >> 8
minor = (sysv & 0x00F0) >> 4
patch = (sysv & 0x000F)
if (major, minor) >= (10, 4):
# the 'sysv' gestald cannot return patchlevels
# higher than 9. Apple introduced 3 new
# gestalt codes in 10.4 to deal with this
# issue (needed because patch levels can
# run higher than 9, such as 10.4.11)
major,minor,patch = _mac_ver_lookup(('sys1','sys2','sys3'))
release = '%i.%i.%i' %(major, minor, patch)
else:
release = '%s.%i.%i' % (_bcd2str(major),minor,patch)
if sysa:
machine = {0x1: '68k',
0x2: 'PowerPC',
0xa: 'i386'}.get(sysa,'')
versioninfo=('', '', '')
return release,versioninfo,machine
def _mac_ver_xml():
fn = '/System/Library/CoreServices/SystemVersion.plist'
if not os.path.exists(fn):
return None
try:
import plistlib
except ImportError:
return None
pl = plistlib.readPlist(fn)
release = pl['ProductVersion']
versioninfo=('', '', '')
machine = os.uname().machine
if machine in ('ppc', 'Power Macintosh'):
# for compatibility with the gestalt based code
machine = 'PowerPC'
return release,versioninfo,machine
def mac_ver(release='',versioninfo=('','',''),machine=''):
""" Get MacOS version information and return it as tuple (release,
versioninfo, machine) with versioninfo being a tuple (version,
dev_stage, non_release_version).
Entries which cannot be determined are set to the parameter values
which default to ''. All tuple entries are strings.
"""
# First try reading the information from an XML file which should
# always be present
info = _mac_ver_xml()
if info is not None:
return info
# If that doesn't work for some reason fall back to reading the
# information using gestalt calls.
info = _mac_ver_gestalt()
if info is not None:
return info
# If that also doesn't work return the default values
return release,versioninfo,machine
def _java_getprop(name,default):
from java.lang import System
try:
value = System.getProperty(name)
if value is None:
return default
return value
except AttributeError:
return default
def java_ver(release='',vendor='',vminfo=('','',''),osinfo=('','','')):
""" Version interface for Jython.
Returns a tuple (release,vendor,vminfo,osinfo) with vminfo being
a tuple (vm_name,vm_release,vm_vendor) and osinfo being a
tuple (os_name,os_version,os_arch).
Values which cannot be determined are set to the defaults
given as parameters (which all default to '').
"""
# Import the needed APIs
try:
import java.lang
except ImportError:
return release,vendor,vminfo,osinfo
vendor = _java_getprop('java.vendor', vendor)
release = _java_getprop('java.version', release)
vm_name, vm_release, vm_vendor = vminfo
vm_name = _java_getprop('java.vm.name', vm_name)
vm_vendor = _java_getprop('java.vm.vendor', vm_vendor)
vm_release = _java_getprop('java.vm.version', vm_release)
vminfo = vm_name, vm_release, vm_vendor
os_name, os_version, os_arch = osinfo
os_arch = _java_getprop('java.os.arch', os_arch)
os_name = _java_getprop('java.os.name', os_name)
os_version = _java_getprop('java.os.version', os_version)
osinfo = os_name, os_version, os_arch
return release, vendor, vminfo, osinfo
### System name aliasing
def system_alias(system,release,version):
""" Returns (system,release,version) aliased to common
marketing names used for some systems.
It also does some reordering of the information in some cases
where it would otherwise cause confusion.
"""
if system == 'Rhapsody':
# Apple's BSD derivative
# XXX How can we determine the marketing release number ?
return 'MacOS X Server',system+release,version
elif system == 'SunOS':
# Sun's OS
if release < '5':
# These releases use the old name SunOS
return system,release,version
# Modify release (marketing release = SunOS release - 3)
l = release.split('.')
if l:
try:
major = int(l[0])
except ValueError:
pass
else:
major = major - 3
l[0] = str(major)
release = '.'.join(l)
if release < '6':
system = 'Solaris'
else:
# XXX Whatever the new SunOS marketing name is...
system = 'Solaris'
elif system == 'IRIX64':
# IRIX reports IRIX64 on platforms with 64-bit support; yet it
# is really a version and not a different platform, since 32-bit
# apps are also supported..
system = 'IRIX'
if version:
version = version + ' (64bit)'
else:
version = '64bit'
elif system in ('win32','win16'):
# In case one of the other tricks
system = 'Windows'
return system,release,version
### Various internal helpers
def _platform(*args):
""" Helper to format the platform string in a filename
compatible format e.g. "system-version-machine".
"""
# Format the platform string
platform = '-'.join(x.strip() for x in filter(len, args))
# Cleanup some possible filename obstacles...
platform = platform.replace(' ','_')
platform = platform.replace('/','-')
platform = platform.replace('\\','-')
platform = platform.replace(':','-')
platform = platform.replace(';','-')
platform = platform.replace('"','-')
platform = platform.replace('(','-')
platform = platform.replace(')','-')
# No need to report 'unknown' information...
platform = platform.replace('unknown','')
# Fold '--'s and remove trailing '-'
while 1:
cleaned = platform.replace('--','-')
if cleaned == platform:
break
platform = cleaned
while platform[-1] == '-':
platform = platform[:-1]
return platform
def _node(default=''):
""" Helper to determine the node name of this machine.
"""
try:
import socket
except ImportError:
# No sockets...
return default
try:
return socket.gethostname()
except socket.error:
# Still not working...
return default
def _follow_symlinks(filepath):
""" In case filepath is a symlink, follow it until a
real file is reached.
"""
filepath = os.path.abspath(filepath)
while os.path.islink(filepath):
filepath = os.path.normpath(
os.path.join(os.path.dirname(filepath),os.readlink(filepath)))
return filepath
def _syscmd_uname(option,default=''):
""" Interface to the system's uname command.
"""
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
try:
f = os.popen('uname %s 2> %s' % (option, DEV_NULL))
except (AttributeError,os.error):
return default
output = f.read().strip()
rc = f.close()
if not output or rc:
return default
else:
return output
def _syscmd_file(target,default=''):
""" Interface to the system's file command.
The function uses the -b option of the file command to have it
omit the filename in its output. Follow the symlinks. It returns
default in case the command should fail.
"""
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
target = _follow_symlinks(target)
try:
proc = subprocess.Popen(['file', target],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except (AttributeError,os.error):
return default
output = proc.communicate()[0].decode('latin-1')
rc = proc.wait()
if not output or rc:
return default
else:
return output
### Information about the used architecture
# Default values for architecture; non-empty strings override the
# defaults given as parameters
_default_architecture = {
'win32': ('','WindowsPE'),
'win16': ('','Windows'),
'dos': ('','MSDOS'),
}
def architecture(executable=sys.executable,bits='',linkage=''):
""" Queries the given executable (defaults to the Python interpreter
binary) for various architecture information.
Returns a tuple (bits,linkage) which contains information about
the bit architecture and the linkage format used for the
executable. Both values are returned as strings.
Values that cannot be determined are returned as given by the
parameter presets. If bits is given as '', the sizeof(pointer)
(or sizeof(long) on Python version < 1.5.2) is used as
indicator for the supported pointer size.
The function relies on the system's "file" command to do the
actual work. This is available on most if not all Unix
platforms. On some non-Unix platforms where the "file" command
does not exist and the executable is set to the Python interpreter
binary defaults from _default_architecture are used.
"""
# Use the sizeof(pointer) as default number of bits if nothing
# else is given as default.
if not bits:
import struct
try:
size = struct.calcsize('P')
except struct.error:
# Older installations can only query longs
size = struct.calcsize('l')
bits = str(size*8) + 'bit'
# Get data from the 'file' system command
if executable:
fileout = _syscmd_file(executable, '')
else:
fileout = ''
if not fileout and \
executable == sys.executable:
# "file" command did not return anything; we'll try to provide
# some sensible defaults then...
if sys.platform in _default_architecture:
b,l = _default_architecture[sys.platform]
if b:
bits = b
if l:
linkage = l
return bits,linkage
if 'executable' not in fileout:
# Format not supported
return bits,linkage
# Bits
if '32-bit' in fileout:
bits = '32bit'
elif 'N32' in fileout:
# On Irix only
bits = 'n32bit'
elif '64-bit' in fileout:
bits = '64bit'
# Linkage
if 'ELF' in fileout:
linkage = 'ELF'
elif 'PE' in fileout:
# E.g. Windows uses this format
if 'Windows' in fileout:
linkage = 'WindowsPE'
else:
linkage = 'PE'
elif 'COFF' in fileout:
linkage = 'COFF'
elif 'MS-DOS' in fileout:
linkage = 'MSDOS'
else:
# XXX the A.OUT format also falls under this class...
pass
return bits,linkage
### Portable uname() interface
uname_result = collections.namedtuple("uname_result",
"system node release version machine processor")
_uname_cache = None
def uname():
""" Fairly portable uname interface. Returns a tuple
of strings (system,node,release,version,machine,processor)
identifying the underlying platform.
Note that unlike the os.uname function this also returns
possible processor information as an additional tuple entry.
Entries which cannot be determined are set to ''.
"""
global _uname_cache
no_os_uname = 0
if _uname_cache is not None:
return _uname_cache
processor = ''
# Get some infos from the builtin os.uname API...
try:
system,node,release,version,machine = os.uname()
except AttributeError:
no_os_uname = 1
if no_os_uname or not list(filter(None, (system, node, release, version, machine))):
# Hmm, no there is either no uname or uname has returned
#'unknowns'... we'll have to poke around the system then.
if no_os_uname:
system = sys.platform
release = ''
version = ''
node = _node()
machine = ''
use_syscmd_ver = 1
# Try win32_ver() on win32 platforms
if system == 'win32':
release,version,csd,ptype = win32_ver()
if release and version:
use_syscmd_ver = 0
# Try to use the PROCESSOR_* environment variables
# available on Win XP and later; see
# http://support.microsoft.com/kb/888731 and
# http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM
if not machine:
# WOW64 processes mask the native architecture
if "PROCESSOR_ARCHITEW6432" in os.environ:
machine = os.environ.get("PROCESSOR_ARCHITEW6432", '')
else:
machine = os.environ.get('PROCESSOR_ARCHITECTURE', '')
if not processor:
processor = os.environ.get('PROCESSOR_IDENTIFIER', machine)
# Try the 'ver' system command available on some
# platforms
if use_syscmd_ver:
system,release,version = _syscmd_ver(system)
# Normalize system to what win32_ver() normally returns
# (_syscmd_ver() tends to return the vendor name as well)
if system == 'Microsoft Windows':
system = 'Windows'
elif system == 'Microsoft' and release == 'Windows':
# Under Windows Vista and Windows Server 2008,
# Microsoft changed the output of the ver command. The
# release is no longer printed. This causes the
# system and release to be misidentified.
system = 'Windows'
if '6.0' == version[:3]:
release = 'Vista'
else:
release = ''
# In case we still don't know anything useful, we'll try to
# help ourselves
if system in ('win32','win16'):
if not version:
if system == 'win32':
version = '32bit'
else:
version = '16bit'
system = 'Windows'
elif system[:4] == 'java':
release,vendor,vminfo,osinfo = java_ver()
system = 'Java'
version = ', '.join(vminfo)
if not version:
version = vendor
# System specific extensions
if system == 'OpenVMS':
# OpenVMS seems to have release and version mixed up
if not release or release == '0':
release = version
version = ''
# Get processor information
try:
import vms_lib
except ImportError:
pass
else:
csid, cpu_number = vms_lib.getsyi('SYI$_CPU',0)
if (cpu_number >= 128):
processor = 'Alpha'
else:
processor = 'VAX'
if not processor:
# Get processor information from the uname system command
processor = _syscmd_uname('-p','')
#If any unknowns still exist, replace them with ''s, which are more portable
if system == 'unknown':
system = ''
if node == 'unknown':
node = ''
if release == 'unknown':
release = ''
if version == 'unknown':
version = ''
if machine == 'unknown':
machine = ''
if processor == 'unknown':
processor = ''
# normalize name
if system == 'Microsoft' and release == 'Windows':
system = 'Windows'
release = 'Vista'
_uname_cache = uname_result(system,node,release,version,machine,processor)
return _uname_cache
### Direct interfaces to some of the uname() return values
def system():
""" Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'.
An empty string is returned if the value cannot be determined.
"""
return uname().system
def node():
""" Returns the computer's network name (which may not be fully
qualified)
An empty string is returned if the value cannot be determined.
"""
return uname().node
def release():
""" Returns the system's release, e.g. '2.2.0' or 'NT'
An empty string is returned if the value cannot be determined.
"""
return uname().release
def version():
""" Returns the system's release version, e.g. '#3 on degas'
An empty string is returned if the value cannot be determined.
"""
return uname().version
def machine():
""" Returns the machine type, e.g. 'i386'
An empty string is returned if the value cannot be determined.
"""
return uname().machine
def processor():
""" Returns the (true) processor name, e.g. 'amdk6'
An empty string is returned if the value cannot be
determined. Note that many platforms do not provide this
information or simply return the same value as for machine(),
e.g. NetBSD does this.
"""
return uname().processor
### Various APIs for extracting information from sys.version
_sys_version_parser = re.compile(
r'([\w.+]+)\s*'
'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'
'\[([^\]]+)\]?', re.ASCII)
_ironpython_sys_version_parser = re.compile(
r'IronPython\s*'
'([\d\.]+)'
'(?: \(([\d\.]+)\))?'
' on (.NET [\d\.]+)', re.ASCII)
# IronPython covering 2.6 and 2.7
_ironpython26_sys_version_parser = re.compile(
r'([\d.]+)\s*'
'\(IronPython\s*'
'[\d.]+\s*'
'\(([\d.]+)\) on ([\w.]+ [\d.]+(?: \(\d+-bit\))?)\)'
)
_pypy_sys_version_parser = re.compile(
r'([\w.+]+)\s*'
'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'
'\[PyPy [^\]]+\]?')
_sys_version_cache = {}
def _sys_version(sys_version=None):
""" Returns a parsed version of Python's sys.version as tuple
(name, version, branch, revision, buildno, builddate, compiler)
referring to the Python implementation name, version, branch,
revision, build number, build date/time as string and the compiler
identification string.
Note that unlike the Python sys.version, the returned value
for the Python version will always include the patchlevel (it
defaults to '.0').
The function returns empty strings for tuple entries that
cannot be determined.
sys_version may be given to parse an alternative version
string, e.g. if the version was read from a different Python
interpreter.
"""
# Get the Python version
if sys_version is None:
sys_version = sys.version
# Try the cache first
result = _sys_version_cache.get(sys_version, None)
if result is not None:
return result
# Parse it
if 'Brython' in sys_version:
# IronPython
name = 'Brython'
_parser=re.compile("^(\d+\.\d+\.\d+)[^[]+\[(.*)\]")
match=_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse Brython sys.version: %s' %
repr(sys_version))
#version, alt_version, compiler = match.groups()
version, compiler = match.groups()
alt_version = ''
buildno = ''
builddate = ''
elif 'IronPython' in sys_version:
# IronPython
name = 'IronPython'
if sys_version.startswith('IronPython'):
match = _ironpython_sys_version_parser.match(sys_version)
else:
match = _ironpython26_sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse IronPython sys.version: %s' %
repr(sys_version))
version, alt_version, compiler = match.groups()
buildno = ''
builddate = ''
elif sys.platform.startswith('java'):
# Jython
name = 'Jython'
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse Jython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, _ = match.groups()
compiler = sys.platform
elif "PyPy" in sys_version:
# PyPy
name = "PyPy"
match = _pypy_sys_version_parser.match(sys_version)
if match is None:
raise ValueError("failed to parse PyPy sys.version: %s" %
repr(sys_version))
version, buildno, builddate, buildtime = match.groups()
compiler = ""
else:
# CPython
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse CPython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, compiler = \
match.groups()
name = 'CPython'
builddate = builddate + ' ' + buildtime
if hasattr(sys, '_mercurial'):
_, branch, revision = sys._mercurial
elif hasattr(sys, 'subversion'):
# sys.subversion was added in Python 2.5
_, branch, revision = sys.subversion
else:
branch = ''
revision = ''
# Add the patchlevel version if missing
l = version.split('.')
if len(l) == 2:
l.append('0')
version = '.'.join(l)
# Build and cache the result
result = (name, version, branch, revision, buildno, builddate, compiler)
_sys_version_cache[sys_version] = result
return result
def python_implementation():
""" Returns a string identifying the Python implementation.
Currently, the following implementations are identified:
'CPython' (C implementation of Python),
'IronPython' (.NET implementation of Python),
'Jython' (Java implementation of Python),
'PyPy' (Python implementation of Python).
"""
return _sys_version()[0]
def python_version():
""" Returns the Python version as string 'major.minor.patchlevel'
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return _sys_version()[1]
def python_version_tuple():
""" Returns the Python version as tuple (major, minor, patchlevel)
of strings.
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return tuple(_sys_version()[1].split('.'))
def python_branch():
""" Returns a string identifying the Python implementation
branch.
For CPython this is the Subversion branch from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[2]
def python_revision():
""" Returns a string identifying the Python implementation
revision.
For CPython this is the Subversion revision from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[3]
def python_build():
""" Returns a tuple (buildno, builddate) stating the Python
build number and date as strings.
"""
return _sys_version()[4:6]
def python_compiler():
""" Returns a string identifying the compiler used for compiling
Python.
"""
return _sys_version()[6]
### The Opus Magnum of platform strings :-)
_platform_cache = {}
def platform(aliased=0, terse=0):
""" Returns a single string identifying the underlying platform
with as much useful information as possible (but no more :).
The output is intended to be human readable rather than
machine parseable. It may look different on different
platforms and this is intended.
If "aliased" is true, the function will use aliases for
various platforms that report system names which differ from
their common names, e.g. SunOS will be reported as
Solaris. The system_alias() function is used to implement
this.
Setting terse to true causes the function to return only the
absolute minimum information needed to identify the platform.
"""
result = _platform_cache.get((aliased, terse), None)
if result is not None:
return result
# Get uname information and then apply platform specific cosmetics
# to it...
system,node,release,version,machine,processor = uname()
if machine == processor:
processor = ''
if aliased:
system,release,version = system_alias(system,release,version)
if system == 'Windows':
# MS platforms
rel,vers,csd,ptype = win32_ver(version)
if terse:
platform = _platform(system,release)
else:
platform = _platform(system,release,version,csd)
elif system in ('Linux',):
# Linux based systems
distname,distversion,distid = dist('')
if distname and not terse:
platform = _platform(system,release,machine,processor,
'with',
distname,distversion,distid)
else:
# If the distribution name is unknown check for libc vs. glibc
libcname,libcversion = libc_ver(sys.executable)
platform = _platform(system,release,machine,processor,
'with',
libcname+libcversion)
elif system == 'Java':
# Java platforms
r,v,vminfo,(os_name,os_version,os_arch) = java_ver()
if terse or not os_name:
platform = _platform(system,release,version)
else:
platform = _platform(system,release,version,
'on',
os_name,os_version,os_arch)
elif system == 'MacOS':
# MacOS platforms
if terse:
platform = _platform(system,release)
else:
platform = _platform(system,release,machine)
else:
# Generic handler
if terse:
platform = _platform(system,release)
else:
bits,linkage = architecture(sys.executable)
platform = _platform(system,release,machine,processor,bits,linkage)
_platform_cache[(aliased, terse)] = platform
return platform
### Command line interface
if __name__ == '__main__':
# Default is to print the aliased verbose platform string
terse = ('terse' in sys.argv or '--terse' in sys.argv)
aliased = (not 'nonaliased' in sys.argv and not '--nonaliased' in sys.argv)
print(platform(aliased,terse))
sys.exit(0)
| gpl-3.0 |
SuriyaaKudoIsc/olympia | scripts/update/update.py | 1 | 5688 | import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from commander.deploy import hostgroups, task
import commander_settings as settings
_src_dir = lambda *p: os.path.join(settings.SRC_DIR, *p)
VIRTUALENV = os.path.join(os.path.dirname(settings.SRC_DIR), 'venv')
@task
def create_virtualenv(ctx):
with ctx.lcd(settings.SRC_DIR):
status = ctx.local('git diff HEAD@{1} HEAD --name-only')
if 'requirements/' in status.out:
venv = VIRTUALENV
if not venv.startswith('/data'):
raise Exception('venv must start with /data')
ctx.local('rm -rf %s' % venv)
ctx.local('virtualenv --distribute --never-download %s' % venv)
ctx.local('%s/bin/pip install --exists-action=w --no-deps --no-index '
'--download-cache=/tmp/pip-cache -f %s '
'-r %s/requirements/prod.txt' %
(venv, settings.PYREPO, settings.SRC_DIR))
if getattr(settings, 'LOAD_TESTING', False):
ctx.local('%s/bin/pip install --exists-action=w --no-deps '
'--no-index --download-cache=/tmp/pip-cache -f %s '
'-r %s/requirements/load.txt' %
(venv, settings.PYREPO, settings.SRC_DIR))
# make sure this always runs
ctx.local("rm -f %s/lib/python2.6/no-global-site-packages.txt" % venv)
ctx.local("%s/bin/python /usr/bin/virtualenv --relocatable %s" %
(venv, venv))
@task
def update_locales(ctx):
with ctx.lcd(_src_dir("locale")):
ctx.local("svn revert -R .")
ctx.local("svn up")
ctx.local("./compile-mo.sh .")
@task
def loadtest(ctx, repo=''):
if hasattr(settings, 'MARTEAU'):
os.environ['MACAUTH_USER'] = settings.MARTEAU_USER
os.environ['MACAUTH_SECRET'] = settings.MARTEAU_SECRET
ctx.local('%s %s --server %s' % (settings.MARTEAU, repo,
settings.MARTEAU_SERVER))
@task
def update_products(ctx):
with ctx.lcd(settings.SRC_DIR):
ctx.local('%s manage.py update_product_details' % settings.PYTHON)
@task
def compress_assets(ctx, arg=''):
with ctx.lcd(settings.SRC_DIR):
ctx.local("%s manage.py compress_assets -t %s" % (settings.PYTHON,
arg))
@task
def collectstatic(ctx):
with ctx.lcd(settings.SRC_DIR):
ctx.local("%s manage.py collectstatic --noinput" % (settings.PYTHON, ))
@task
def schematic(ctx):
with ctx.lcd(settings.SRC_DIR):
ctx.local("%s %s/bin/schematic migrations" %
(settings.PYTHON, VIRTUALENV))
@task
def update_code(ctx, ref='origin/master'):
with ctx.lcd(settings.SRC_DIR):
ctx.local("git fetch && git fetch -t")
ctx.local("git reset --hard %s" % ref)
@task
def update_info(ctx, ref='origin/master'):
with ctx.lcd(settings.SRC_DIR):
ctx.local("git status")
ctx.local("git log -1")
ctx.local("/bin/bash -c "
"'source /etc/bash_completion.d/git && __git_ps1'")
ctx.local('git show -s {0} --pretty="format:%h" '
'> media/git-rev.txt'.format(ref))
@task
def checkin_changes(ctx):
ctx.local(settings.DEPLOY_SCRIPT)
@task
def disable_cron(ctx):
ctx.local("rm -f /etc/cron.d/%s" % settings.CRON_NAME)
@task
def install_cron(ctx):
with ctx.lcd(settings.SRC_DIR):
ctx.local('%s ./scripts/crontab/gen-cron.py '
'-z %s -u apache -p %s > /etc/cron.d/.%s' %
(settings.PYTHON, settings.SRC_DIR,
settings.PYTHON, settings.CRON_NAME))
ctx.local('mv /etc/cron.d/.%s /etc/cron.d/%s' % (settings.CRON_NAME,
settings.CRON_NAME))
@hostgroups(settings.WEB_HOSTGROUP,
remote_kwargs={'ssh_key': settings.SSH_KEY})
def sync_code(ctx):
ctx.remote(settings.REMOTE_UPDATE_SCRIPT)
@hostgroups(settings.WEB_HOSTGROUP,
remote_kwargs={'ssh_key': settings.SSH_KEY})
def restart_workers(ctx):
for gservice in settings.GUNICORN:
ctx.remote("/sbin/service %s graceful" % gservice)
for gservice in getattr(settings, 'MULTI_GUNICORN', []):
ctx.remote("/sbin/service %s-a graceful" % gservice)
ctx.remote("/sbin/service %s-b graceful" % gservice)
@task
def deploy_app(ctx):
sync_code()
restart_workers()
@hostgroups(settings.CELERY_HOSTGROUP,
remote_kwargs={'ssh_key': settings.SSH_KEY})
def update_celery(ctx):
ctx.remote(settings.REMOTE_UPDATE_SCRIPT)
if getattr(settings, 'CELERY_SERVICE_PREFIX', False):
ctx.remote("/sbin/service %s restart" % settings.CELERY_SERVICE_PREFIX)
ctx.remote("/sbin/service %s-devhub restart" %
settings.CELERY_SERVICE_PREFIX)
ctx.remote("/sbin/service %s-priority restart" %
settings.CELERY_SERVICE_PREFIX)
@task
def deploy(ctx):
install_cron()
checkin_changes()
deploy_app()
update_celery()
with ctx.lcd(settings.SRC_DIR):
ctx.local('%s manage.py cron cleanup_validation_results' %
settings.PYTHON)
@task
def pre_update(ctx, ref=settings.UPDATE_REF):
ctx.local('date')
disable_cron()
update_code(ref)
update_info(ref)
@task
def update(ctx):
create_virtualenv()
update_locales()
update_products()
compress_assets()
collectstatic()
schematic()
with ctx.lcd(settings.SRC_DIR):
ctx.local('%s manage.py dump_apps' % settings.PYTHON)
ctx.local('%s manage.py statsd_ping --key=update' % settings.PYTHON)
| bsd-3-clause |
windygu/xbmc-addon | plugin.video.xunlei/mechanize/_html.py | 132 | 20888 | """HTML handling.
Copyright 2003-2006 John J. Lee <[email protected]>
This code is free software; you can redistribute it and/or modify it under
the terms of the BSD or ZPL 2.1 licenses (see the file COPYING.txt
included with the distribution).
"""
import codecs
import copy
import htmlentitydefs
import re
import _sgmllib_copy as sgmllib
import _beautifulsoup
import _form
from _headersutil import split_header_words, is_html as _is_html
import _request
import _rfc3986
DEFAULT_ENCODING = "latin-1"
COMPRESS_RE = re.compile(r"\s+")
class CachingGeneratorFunction(object):
"""Caching wrapper around a no-arguments iterable."""
def __init__(self, iterable):
self._cache = []
# wrap iterable to make it non-restartable (otherwise, repeated
# __call__ would give incorrect results)
self._iterator = iter(iterable)
def __call__(self):
cache = self._cache
for item in cache:
yield item
for item in self._iterator:
cache.append(item)
yield item
class EncodingFinder:
def __init__(self, default_encoding):
self._default_encoding = default_encoding
def encoding(self, response):
# HTTPEquivProcessor may be in use, so both HTTP and HTTP-EQUIV
# headers may be in the response. HTTP-EQUIV headers come last,
# so try in order from first to last.
for ct in response.info().getheaders("content-type"):
for k, v in split_header_words([ct])[0]:
if k == "charset":
encoding = v
try:
codecs.lookup(v)
except LookupError:
continue
else:
return encoding
return self._default_encoding
class ResponseTypeFinder:
def __init__(self, allow_xhtml):
self._allow_xhtml = allow_xhtml
def is_html(self, response, encoding):
ct_hdrs = response.info().getheaders("content-type")
url = response.geturl()
# XXX encoding
return _is_html(ct_hdrs, url, self._allow_xhtml)
class Args(object):
# idea for this argument-processing trick is from Peter Otten
def __init__(self, args_map):
self.__dict__["dictionary"] = dict(args_map)
def __getattr__(self, key):
try:
return self.dictionary[key]
except KeyError:
return getattr(self.__class__, key)
def __setattr__(self, key, value):
if key == "dictionary":
raise AttributeError()
self.dictionary[key] = value
def form_parser_args(
select_default=False,
form_parser_class=None,
request_class=None,
backwards_compat=False,
):
return Args(locals())
class Link:
def __init__(self, base_url, url, text, tag, attrs):
assert None not in [url, tag, attrs]
self.base_url = base_url
self.absolute_url = _rfc3986.urljoin(base_url, url)
self.url, self.text, self.tag, self.attrs = url, text, tag, attrs
def __cmp__(self, other):
try:
for name in "url", "text", "tag", "attrs":
if getattr(self, name) != getattr(other, name):
return -1
except AttributeError:
return -1
return 0
def __repr__(self):
return "Link(base_url=%r, url=%r, text=%r, tag=%r, attrs=%r)" % (
self.base_url, self.url, self.text, self.tag, self.attrs)
class LinksFactory:
def __init__(self,
link_parser_class=None,
link_class=Link,
urltags=None,
):
import _pullparser
if link_parser_class is None:
link_parser_class = _pullparser.TolerantPullParser
self.link_parser_class = link_parser_class
self.link_class = link_class
if urltags is None:
urltags = {
"a": "href",
"area": "href",
"frame": "src",
"iframe": "src",
}
self.urltags = urltags
self._response = None
self._encoding = None
def set_response(self, response, base_url, encoding):
self._response = response
self._encoding = encoding
self._base_url = base_url
def links(self):
"""Return an iterator that provides links of the document."""
response = self._response
encoding = self._encoding
base_url = self._base_url
p = self.link_parser_class(response, encoding=encoding)
try:
for token in p.tags(*(self.urltags.keys()+["base"])):
if token.type == "endtag":
continue
if token.data == "base":
base_href = dict(token.attrs).get("href")
if base_href is not None:
base_url = base_href
continue
attrs = dict(token.attrs)
tag = token.data
text = None
# XXX use attr_encoding for ref'd doc if that doc does not
# provide one by other means
#attr_encoding = attrs.get("charset")
url = attrs.get(self.urltags[tag]) # XXX is "" a valid URL?
if not url:
# Probably an <A NAME="blah"> link or <AREA NOHREF...>.
# For our purposes a link is something with a URL, so
# ignore this.
continue
url = _rfc3986.clean_url(url, encoding)
if tag == "a":
if token.type != "startendtag":
# hmm, this'd break if end tag is missing
text = p.get_compressed_text(("endtag", tag))
# but this doesn't work for e.g.
# <a href="blah"><b>Andy</b></a>
#text = p.get_compressed_text()
yield Link(base_url, url, text, tag, token.attrs)
except sgmllib.SGMLParseError, exc:
raise _form.ParseError(exc)
class FormsFactory:
"""Makes a sequence of objects satisfying HTMLForm interface.
After calling .forms(), the .global_form attribute is a form object
containing all controls not a descendant of any FORM element.
For constructor argument docs, see ParseResponse argument docs.
"""
def __init__(self,
select_default=False,
form_parser_class=None,
request_class=None,
backwards_compat=False,
):
self.select_default = select_default
if form_parser_class is None:
form_parser_class = _form.FormParser
self.form_parser_class = form_parser_class
if request_class is None:
request_class = _request.Request
self.request_class = request_class
self.backwards_compat = backwards_compat
self._response = None
self.encoding = None
self.global_form = None
def set_response(self, response, encoding):
self._response = response
self.encoding = encoding
self.global_form = None
def forms(self):
encoding = self.encoding
forms = _form.ParseResponseEx(
self._response,
select_default=self.select_default,
form_parser_class=self.form_parser_class,
request_class=self.request_class,
encoding=encoding,
_urljoin=_rfc3986.urljoin,
_urlparse=_rfc3986.urlsplit,
_urlunparse=_rfc3986.urlunsplit,
)
self.global_form = forms[0]
return forms[1:]
class TitleFactory:
def __init__(self):
self._response = self._encoding = None
def set_response(self, response, encoding):
self._response = response
self._encoding = encoding
def _get_title_text(self, parser):
import _pullparser
text = []
tok = None
while 1:
try:
tok = parser.get_token()
except _pullparser.NoMoreTokensError:
break
if tok.type == "data":
text.append(str(tok))
elif tok.type == "entityref":
t = unescape("&%s;" % tok.data,
parser._entitydefs, parser.encoding)
text.append(t)
elif tok.type == "charref":
t = unescape_charref(tok.data, parser.encoding)
text.append(t)
elif tok.type in ["starttag", "endtag", "startendtag"]:
tag_name = tok.data
if tok.type == "endtag" and tag_name == "title":
break
text.append(str(tok))
return COMPRESS_RE.sub(" ", "".join(text).strip())
def title(self):
import _pullparser
p = _pullparser.TolerantPullParser(
self._response, encoding=self._encoding)
try:
try:
p.get_tag("title")
except _pullparser.NoMoreTokensError:
return None
else:
return self._get_title_text(p)
except sgmllib.SGMLParseError, exc:
raise _form.ParseError(exc)
def unescape(data, entities, encoding):
if data is None or "&" not in data:
return data
def replace_entities(match):
ent = match.group()
if ent[1] == "#":
return unescape_charref(ent[2:-1], encoding)
repl = entities.get(ent[1:-1])
if repl is not None:
repl = unichr(repl)
if type(repl) != type(""):
try:
repl = repl.encode(encoding)
except UnicodeError:
repl = ent
else:
repl = ent
return repl
return re.sub(r"&#?[A-Za-z0-9]+?;", replace_entities, data)
def unescape_charref(data, encoding):
name, base = data, 10
if name.startswith("x"):
name, base= name[1:], 16
uc = unichr(int(name, base))
if encoding is None:
return uc
else:
try:
repl = uc.encode(encoding)
except UnicodeError:
repl = "&#%s;" % data
return repl
class MechanizeBs(_beautifulsoup.BeautifulSoup):
_entitydefs = htmlentitydefs.name2codepoint
# don't want the magic Microsoft-char workaround
PARSER_MASSAGE = [(re.compile('(<[^<>]*)/>'),
lambda(x):x.group(1) + ' />'),
(re.compile('<!\s+([^<>]*)>'),
lambda(x):'<!' + x.group(1) + '>')
]
def __init__(self, encoding, text=None, avoidParserProblems=True,
initialTextIsEverything=True):
self._encoding = encoding
_beautifulsoup.BeautifulSoup.__init__(
self, text, avoidParserProblems, initialTextIsEverything)
def handle_charref(self, ref):
t = unescape("&#%s;"%ref, self._entitydefs, self._encoding)
self.handle_data(t)
def handle_entityref(self, ref):
t = unescape("&%s;"%ref, self._entitydefs, self._encoding)
self.handle_data(t)
def unescape_attrs(self, attrs):
escaped_attrs = []
for key, val in attrs:
val = unescape(val, self._entitydefs, self._encoding)
escaped_attrs.append((key, val))
return escaped_attrs
class RobustLinksFactory:
compress_re = COMPRESS_RE
def __init__(self,
link_parser_class=None,
link_class=Link,
urltags=None,
):
if link_parser_class is None:
link_parser_class = MechanizeBs
self.link_parser_class = link_parser_class
self.link_class = link_class
if urltags is None:
urltags = {
"a": "href",
"area": "href",
"frame": "src",
"iframe": "src",
}
self.urltags = urltags
self._bs = None
self._encoding = None
self._base_url = None
def set_soup(self, soup, base_url, encoding):
self._bs = soup
self._base_url = base_url
self._encoding = encoding
def links(self):
bs = self._bs
base_url = self._base_url
encoding = self._encoding
for ch in bs.recursiveChildGenerator():
if (isinstance(ch, _beautifulsoup.Tag) and
ch.name in self.urltags.keys()+["base"]):
link = ch
attrs = bs.unescape_attrs(link.attrs)
attrs_dict = dict(attrs)
if link.name == "base":
base_href = attrs_dict.get("href")
if base_href is not None:
base_url = base_href
continue
url_attr = self.urltags[link.name]
url = attrs_dict.get(url_attr)
if not url:
continue
url = _rfc3986.clean_url(url, encoding)
text = link.fetchText(lambda t: True)
if not text:
# follow _pullparser's weird behaviour rigidly
if link.name == "a":
text = ""
else:
text = None
else:
text = self.compress_re.sub(" ", " ".join(text).strip())
yield Link(base_url, url, text, link.name, attrs)
class RobustFormsFactory(FormsFactory):
def __init__(self, *args, **kwds):
args = form_parser_args(*args, **kwds)
if args.form_parser_class is None:
args.form_parser_class = _form.RobustFormParser
FormsFactory.__init__(self, **args.dictionary)
def set_response(self, response, encoding):
self._response = response
self.encoding = encoding
class RobustTitleFactory:
def __init__(self):
self._bs = self._encoding = None
def set_soup(self, soup, encoding):
self._bs = soup
self._encoding = encoding
def title(self):
title = self._bs.first("title")
if title == _beautifulsoup.Null:
return None
else:
inner_html = "".join([str(node) for node in title.contents])
return COMPRESS_RE.sub(" ", inner_html.strip())
class Factory:
"""Factory for forms, links, etc.
This interface may expand in future.
Public methods:
set_request_class(request_class)
set_response(response)
forms()
links()
Public attributes:
Note that accessing these attributes may raise ParseError.
encoding: string specifying the encoding of response if it contains a text
document (this value is left unspecified for documents that do not have
an encoding, e.g. an image file)
is_html: true if response contains an HTML document (XHTML may be
regarded as HTML too)
title: page title, or None if no title or not HTML
global_form: form object containing all controls that are not descendants
of any FORM element, or None if the forms_factory does not support
supplying a global form
"""
LAZY_ATTRS = ["encoding", "is_html", "title", "global_form"]
def __init__(self, forms_factory, links_factory, title_factory,
encoding_finder=EncodingFinder(DEFAULT_ENCODING),
response_type_finder=ResponseTypeFinder(allow_xhtml=False),
):
"""
Pass keyword arguments only.
default_encoding: character encoding to use if encoding cannot be
determined (or guessed) from the response. You should turn on
HTTP-EQUIV handling if you want the best chance of getting this right
without resorting to this default. The default value of this
parameter (currently latin-1) may change in future.
"""
self._forms_factory = forms_factory
self._links_factory = links_factory
self._title_factory = title_factory
self._encoding_finder = encoding_finder
self._response_type_finder = response_type_finder
self.set_response(None)
def set_request_class(self, request_class):
"""Set request class (mechanize.Request by default).
HTMLForm instances returned by .forms() will return instances of this
class when .click()ed.
"""
self._forms_factory.request_class = request_class
def set_response(self, response):
"""Set response.
The response must either be None or implement the same interface as
objects returned by mechanize.urlopen().
"""
self._response = response
self._forms_genf = self._links_genf = None
self._get_title = None
for name in self.LAZY_ATTRS:
try:
delattr(self, name)
except AttributeError:
pass
def __getattr__(self, name):
if name not in self.LAZY_ATTRS:
return getattr(self.__class__, name)
if name == "encoding":
self.encoding = self._encoding_finder.encoding(
copy.copy(self._response))
return self.encoding
elif name == "is_html":
self.is_html = self._response_type_finder.is_html(
copy.copy(self._response), self.encoding)
return self.is_html
elif name == "title":
if self.is_html:
self.title = self._title_factory.title()
else:
self.title = None
return self.title
elif name == "global_form":
self.forms()
return self.global_form
def forms(self):
"""Return iterable over HTMLForm-like objects.
Raises mechanize.ParseError on failure.
"""
# this implementation sets .global_form as a side-effect, for benefit
# of __getattr__ impl
if self._forms_genf is None:
try:
self._forms_genf = CachingGeneratorFunction(
self._forms_factory.forms())
except: # XXXX define exception!
self.set_response(self._response)
raise
self.global_form = getattr(
self._forms_factory, "global_form", None)
return self._forms_genf()
def links(self):
"""Return iterable over mechanize.Link-like objects.
Raises mechanize.ParseError on failure.
"""
if self._links_genf is None:
try:
self._links_genf = CachingGeneratorFunction(
self._links_factory.links())
except: # XXXX define exception!
self.set_response(self._response)
raise
return self._links_genf()
class DefaultFactory(Factory):
"""Based on sgmllib."""
def __init__(self, i_want_broken_xhtml_support=False):
Factory.__init__(
self,
forms_factory=FormsFactory(),
links_factory=LinksFactory(),
title_factory=TitleFactory(),
response_type_finder=ResponseTypeFinder(
allow_xhtml=i_want_broken_xhtml_support),
)
def set_response(self, response):
Factory.set_response(self, response)
if response is not None:
self._forms_factory.set_response(
copy.copy(response), self.encoding)
self._links_factory.set_response(
copy.copy(response), response.geturl(), self.encoding)
self._title_factory.set_response(
copy.copy(response), self.encoding)
class RobustFactory(Factory):
"""Based on BeautifulSoup, hopefully a bit more robust to bad HTML than is
DefaultFactory.
"""
def __init__(self, i_want_broken_xhtml_support=False,
soup_class=None):
Factory.__init__(
self,
forms_factory=RobustFormsFactory(),
links_factory=RobustLinksFactory(),
title_factory=RobustTitleFactory(),
response_type_finder=ResponseTypeFinder(
allow_xhtml=i_want_broken_xhtml_support),
)
if soup_class is None:
soup_class = MechanizeBs
self._soup_class = soup_class
def set_response(self, response):
Factory.set_response(self, response)
if response is not None:
data = response.read()
soup = self._soup_class(self.encoding, data)
self._forms_factory.set_response(
copy.copy(response), self.encoding)
self._links_factory.set_soup(
soup, response.geturl(), self.encoding)
self._title_factory.set_soup(soup, self.encoding)
| apache-2.0 |
bitifirefly/edx-platform | common/djangoapps/cache_toolbox/templatetags/cache_toolbox.py | 239 | 2059 | from django import template
from django.core.cache import cache
from django.template import Node, TemplateSyntaxError, Variable
from django.template import resolve_variable
register = template.Library()
class CacheNode(Node):
def __init__(self, nodelist, expire_time, key):
self.nodelist = nodelist
self.expire_time = Variable(expire_time)
self.key = key
def render(self, context):
key = resolve_variable(self.key, context)
expire_time = int(self.expire_time.resolve(context))
value = cache.get(key)
if value is None:
value = self.nodelist.render(context)
cache.set(key, value, expire_time)
return value
@register.tag
def cachedeterministic(parser, token):
"""
This will cache the contents of a template fragment for a given amount of
time, just like {% cache .. %} except that the key is deterministic and not
mangled or run through MD5.
Usage::
{% cachedeterministic [expire_time] [key] %}
.. some expensive processing ..
{% endcachedeterministic %}
"""
nodelist = parser.parse(('endcachedeterministic',))
parser.delete_first_token()
tokens = token.contents.split()
if len(tokens) != 3:
raise TemplateSyntaxError(u"'%r' tag requires 2 arguments." % tokens[0])
return CacheNode(nodelist, tokens[1], tokens[2])
class ShowIfCachedNode(Node):
def __init__(self, key):
self.key = key
def render(self, context):
key = resolve_variable(self.key, context)
return cache.get(key) or ''
@register.tag
def showifcached(parser, token):
"""
Show content if it exists in the cache, otherwise display nothing.
The key is entirely deterministic and not mangled or run through MD5 (cf.
{% cache %})
Usage::
{% showifcached [key] %}
"""
tokens = token.contents.split()
if len(tokens) != 2:
raise TemplateSyntaxError(u"'%r' tag requires 1 argument." % tokens[0])
return ShowIfCachedNode(tokens[1])
| agpl-3.0 |
g12mcgov/home-assistant | homeassistant/components/thermostat/demo.py | 19 | 2080 | """
homeassistant.components.thermostat.demo
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Demo platform that offers a fake thermostat.
"""
from homeassistant.components.thermostat import ThermostatDevice
from homeassistant.const import TEMP_CELCIUS, TEMP_FAHRENHEIT
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Sets up the Demo thermostats. """
add_devices([
DemoThermostat("Nest", 21, TEMP_CELCIUS, False, 19),
DemoThermostat("Thermostat", 68, TEMP_FAHRENHEIT, True, 77),
])
# pylint: disable=too-many-arguments
class DemoThermostat(ThermostatDevice):
""" Represents a HeatControl thermostat. """
def __init__(self, name, target_temperature, unit_of_measurement,
away, current_temperature):
self._name = name
self._target_temperature = target_temperature
self._unit_of_measurement = unit_of_measurement
self._away = away
self._current_temperature = current_temperature
@property
def should_poll(self):
""" No polling needed for a demo thermostat. """
return False
@property
def name(self):
""" Returns the name. """
return self._name
@property
def unit_of_measurement(self):
""" Returns the unit of measurement. """
return self._unit_of_measurement
@property
def current_temperature(self):
""" Returns the current temperature. """
return self._current_temperature
@property
def target_temperature(self):
""" Returns the temperature we try to reach. """
return self._target_temperature
@property
def is_away_mode_on(self):
""" Returns if away mode is on. """
return self._away
def set_temperature(self, temperature):
""" Set new target temperature. """
self._target_temperature = temperature
def turn_away_mode_on(self):
""" Turns away mode on. """
self._away = True
def turn_away_mode_off(self):
""" Turns away mode off. """
self._away = False
| mit |
drewokane/seaborn | seaborn/tests/test_utils.py | 11 | 11338 | """Tests for plotting utilities."""
import warnings
import tempfile
import shutil
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import nose
import nose.tools as nt
from nose.tools import assert_equal, raises
import numpy.testing as npt
import pandas.util.testing as pdt
from distutils.version import LooseVersion
pandas_has_categoricals = LooseVersion(pd.__version__) >= "0.15"
from pandas.util.testing import network
try:
from bs4 import BeautifulSoup
except ImportError:
BeautifulSoup = None
from . import PlotTestCase
from .. import utils, rcmod
from ..utils import get_dataset_names, load_dataset
a_norm = np.random.randn(100)
def test_pmf_hist_basics():
"""Test the function to return barplot args for pmf hist."""
out = utils.pmf_hist(a_norm)
assert_equal(len(out), 3)
x, h, w = out
assert_equal(len(x), len(h))
# Test simple case
a = np.arange(10)
x, h, w = utils.pmf_hist(a, 10)
nose.tools.assert_true(np.all(h == h[0]))
def test_pmf_hist_widths():
"""Test histogram width is correct."""
x, h, w = utils.pmf_hist(a_norm)
assert_equal(x[1] - x[0], w)
def test_pmf_hist_normalization():
"""Test that output data behaves like a PMF."""
x, h, w = utils.pmf_hist(a_norm)
nose.tools.assert_almost_equal(sum(h), 1)
nose.tools.assert_less_equal(h.max(), 1)
def test_pmf_hist_bins():
"""Test bin specification."""
x, h, w = utils.pmf_hist(a_norm, 20)
assert_equal(len(x), 20)
def test_ci_to_errsize():
"""Test behavior of ci_to_errsize."""
cis = [[.5, .5],
[1.25, 1.5]]
heights = [1, 1.5]
actual_errsize = np.array([[.5, 1],
[.25, 0]])
test_errsize = utils.ci_to_errsize(cis, heights)
npt.assert_array_equal(actual_errsize, test_errsize)
def test_desaturate():
"""Test color desaturation."""
out1 = utils.desaturate("red", .5)
assert_equal(out1, (.75, .25, .25))
out2 = utils.desaturate("#00FF00", .5)
assert_equal(out2, (.25, .75, .25))
out3 = utils.desaturate((0, 0, 1), .5)
assert_equal(out3, (.25, .25, .75))
out4 = utils.desaturate("red", .5)
assert_equal(out4, (.75, .25, .25))
@raises(ValueError)
def test_desaturation_prop():
"""Test that pct outside of [0, 1] raises exception."""
utils.desaturate("blue", 50)
def test_saturate():
"""Test performance of saturation function."""
out = utils.saturate((.75, .25, .25))
assert_equal(out, (1, 0, 0))
def test_iqr():
"""Test the IQR function."""
a = np.arange(5)
iqr = utils.iqr(a)
assert_equal(iqr, 2)
class TestSpineUtils(PlotTestCase):
sides = ["left", "right", "bottom", "top"]
outer_sides = ["top", "right"]
inner_sides = ["left", "bottom"]
offset = 10
original_position = ("outward", 0)
offset_position = ("outward", offset)
def test_despine(self):
f, ax = plt.subplots()
for side in self.sides:
nt.assert_true(ax.spines[side].get_visible())
utils.despine()
for side in self.outer_sides:
nt.assert_true(~ax.spines[side].get_visible())
for side in self.inner_sides:
nt.assert_true(ax.spines[side].get_visible())
utils.despine(**dict(zip(self.sides, [True] * 4)))
for side in self.sides:
nt.assert_true(~ax.spines[side].get_visible())
def test_despine_specific_axes(self):
f, (ax1, ax2) = plt.subplots(2, 1)
utils.despine(ax=ax2)
for side in self.sides:
nt.assert_true(ax1.spines[side].get_visible())
for side in self.outer_sides:
nt.assert_true(~ax2.spines[side].get_visible())
for side in self.inner_sides:
nt.assert_true(ax2.spines[side].get_visible())
def test_despine_with_offset(self):
f, ax = plt.subplots()
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.original_position)
utils.despine(ax=ax, offset=self.offset)
for side in self.sides:
is_visible = ax.spines[side].get_visible()
new_position = ax.spines[side].get_position()
if is_visible:
nt.assert_equal(new_position, self.offset_position)
else:
nt.assert_equal(new_position, self.original_position)
def test_despine_with_offset_specific_axes(self):
f, (ax1, ax2) = plt.subplots(2, 1)
utils.despine(offset=self.offset, ax=ax2)
for side in self.sides:
nt.assert_equal(ax1.spines[side].get_position(),
self.original_position)
if ax2.spines[side].get_visible():
nt.assert_equal(ax2.spines[side].get_position(),
self.offset_position)
else:
nt.assert_equal(ax2.spines[side].get_position(),
self.original_position)
def test_despine_trim_spines(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_xlim(.75, 3.25)
utils.despine(trim=True)
for side in self.inner_sides:
bounds = ax.spines[side].get_bounds()
nt.assert_equal(bounds, (1, 3))
def test_despine_trim_inverted(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_ylim(.85, 3.15)
ax.invert_yaxis()
utils.despine(trim=True)
for side in self.inner_sides:
bounds = ax.spines[side].get_bounds()
nt.assert_equal(bounds, (1, 3))
def test_despine_trim_noticks(self):
f, ax = plt.subplots()
ax.plot([1, 2, 3], [1, 2, 3])
ax.set_yticks([])
utils.despine(trim=True)
nt.assert_equal(ax.get_yticks().size, 0)
def test_offset_spines_warns(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, ax = plt.subplots()
utils.offset_spines(offset=self.offset)
nt.assert_true('deprecated' in str(w[0].message))
nt.assert_true(issubclass(w[0].category, UserWarning))
def test_offset_spines(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, ax = plt.subplots()
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.original_position)
utils.offset_spines(offset=self.offset)
for side in self.sides:
nt.assert_equal(ax.spines[side].get_position(),
self.offset_position)
def test_offset_spines_specific_axes(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", category=UserWarning)
f, (ax1, ax2) = plt.subplots(2, 1)
utils.offset_spines(offset=self.offset, ax=ax2)
for side in self.sides:
nt.assert_equal(ax1.spines[side].get_position(),
self.original_position)
nt.assert_equal(ax2.spines[side].get_position(),
self.offset_position)
def test_ticklabels_overlap():
rcmod.set()
f, ax = plt.subplots(figsize=(2, 2))
f.tight_layout() # This gets the Agg renderer working
assert not utils.axis_ticklabels_overlap(ax.get_xticklabels())
big_strings = "abcdefgh", "ijklmnop"
ax.set_xlim(-.5, 1.5)
ax.set_xticks([0, 1])
ax.set_xticklabels(big_strings)
assert utils.axis_ticklabels_overlap(ax.get_xticklabels())
x, y = utils.axes_ticklabels_overlap(ax)
assert x
assert not y
def test_categorical_order():
x = ["a", "c", "c", "b", "a", "d"]
y = [3, 2, 5, 1, 4]
order = ["a", "b", "c", "d"]
out = utils.categorical_order(x)
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(x, order)
nt.assert_equal(out, order)
out = utils.categorical_order(x, ["b", "a"])
nt.assert_equal(out, ["b", "a"])
out = utils.categorical_order(np.array(x))
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(pd.Series(x))
nt.assert_equal(out, ["a", "c", "b", "d"])
out = utils.categorical_order(y)
nt.assert_equal(out, [1, 2, 3, 4, 5])
out = utils.categorical_order(np.array(y))
nt.assert_equal(out, [1, 2, 3, 4, 5])
out = utils.categorical_order(pd.Series(y))
nt.assert_equal(out, [1, 2, 3, 4, 5])
if pandas_has_categoricals:
x = pd.Categorical(x, order)
out = utils.categorical_order(x)
nt.assert_equal(out, list(x.categories))
x = pd.Series(x)
out = utils.categorical_order(x)
nt.assert_equal(out, list(x.cat.categories))
out = utils.categorical_order(x, ["b", "a"])
nt.assert_equal(out, ["b", "a"])
x = ["a", np.nan, "c", "c", "b", "a", "d"]
out = utils.categorical_order(x)
nt.assert_equal(out, ["a", "c", "b", "d"])
if LooseVersion(pd.__version__) >= "0.15":
def check_load_dataset(name):
ds = load_dataset(name, cache=False)
assert(isinstance(ds, pd.DataFrame))
def check_load_cached_dataset(name):
# Test the cacheing using a temporary file.
# With Python 3.2+, we could use the tempfile.TemporaryDirectory()
# context manager instead of this try...finally statement
tmpdir = tempfile.mkdtemp()
try:
# download and cache
ds = load_dataset(name, cache=True, data_home=tmpdir)
# use cached version
ds2 = load_dataset(name, cache=True, data_home=tmpdir)
pdt.assert_frame_equal(ds, ds2)
finally:
shutil.rmtree(tmpdir)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_get_dataset_names():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
names = get_dataset_names()
assert(len(names) > 0)
assert(u"titanic" in names)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_load_datasets():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
# Heavy test to verify that we can load all available datasets
for name in get_dataset_names():
# unfortunately @network somehow obscures this generator so it
# does not get in effect, so we need to call explicitly
# yield check_load_dataset, name
check_load_dataset(name)
@network(url="https://github.com/mwaskom/seaborn-data")
def test_load_cached_datasets():
if not BeautifulSoup:
raise nose.SkipTest("No BeautifulSoup available for parsing html")
# Heavy test to verify that we can load all available datasets
for name in get_dataset_names():
# unfortunately @network somehow obscures this generator so it
# does not get in effect, so we need to call explicitly
# yield check_load_dataset, name
check_load_cached_dataset(name)
| bsd-3-clause |
cbclab/MDT | mdt/gui/maps_visualizer/config_tabs/tab_general.py | 1 | 18316 | import copy
import os
from PyQt5.QtCore import pyqtSlot, Qt, QPoint, QUrl
from PyQt5.QtGui import QDesktopServices
from PyQt5.QtWidgets import QWidget, QAbstractItemView, QMenu, QWidgetAction, QLabel
from mdt.gui.maps_visualizer.actions import SetDimension, SetSliceIndex, SetVolumeIndex, SetColormap, SetRotate, \
SetZoom, SetShowAxis, SetColorBarNmrTicks, SetMapsToShow, SetFont, SetInterpolation, SetFlipud, SetPlotTitle, \
SetGeneralMask, NewDataAction, SetShowPlotColorbars, SetColorbarLocation, SetShowPlotTitles
from mdt.gui.maps_visualizer.base import DataConfigModel
from mdt.gui.maps_visualizer.design.ui_TabGeneral import Ui_TabGeneral
from mdt.gui.utils import blocked_signals, TimedUpdate, split_long_path_elements
from mdt.visualization.maps.base import Zoom, Point2d, DataInfo, Font, MapPlotConfig
__author__ = 'Robbert Harms'
__date__ = "2016-09-03"
__maintainer__ = "Robbert Harms"
__email__ = "[email protected]"
class TabGeneral(QWidget, Ui_TabGeneral):
def __init__(self, controller, parent=None):
super().__init__(parent)
self.setupUi(self)
self._controller = controller
self._controller.model_updated.connect(self.update_model)
self._previous_model = None
current_model = self._controller.get_model()
self.general_display_order.setDragDropMode(QAbstractItemView.InternalMove)
self.general_display_order.setSelectionMode(QAbstractItemView.SingleSelection)
self.general_colormap.addItems(current_model.get_config().get_available_colormaps())
self.general_rotate.addItems(['0', '90', '180', '270'])
self.general_rotate.setCurrentText(str(current_model.get_config().rotate))
self.general_DisplayOrder.set_collapse(True)
self.general_Miscellaneous.set_collapse(True)
self.general_Zoom.set_collapse(True)
self.general_Font.set_collapse(True)
self.general_Colorbar.set_collapse(True)
self.general_dimension.valueChanged.connect(lambda v: self._controller.apply_action(SetDimension(v)))
self.general_slice_index.valueChanged.connect(lambda v: self._controller.apply_action(SetSliceIndex(v)))
self.general_volume_index.valueChanged.connect(lambda v: self._controller.apply_action(SetVolumeIndex(v)))
self.general_colormap.currentIndexChanged.connect(
lambda i: self._controller.apply_action(SetColormap(self.general_colormap.itemText(i))))
self.general_rotate.currentIndexChanged.connect(
lambda i: self._controller.apply_action(SetRotate(int(self.general_rotate.itemText(i)))))
self._map_selection_timer = TimedUpdate(self._update_maps_to_show)
self.general_map_selection.itemSelectionChanged.connect(
lambda: self._map_selection_timer.add_delayed_callback(500))
self.general_map_selection.setContextMenuPolicy(Qt.CustomContextMenu)
self.general_map_selection.customContextMenuRequested.connect(self.select_maps_context_menu)
self.general_deselect_all_maps.clicked.connect(self._deleselect_all_maps)
self.general_invert_map_selection.clicked.connect(self._invert_map_selection)
self.general_zoom_x_0.valueChanged.connect(self._update_zoom)
self.general_zoom_x_1.valueChanged.connect(self._update_zoom)
self.general_zoom_y_0.valueChanged.connect(self._update_zoom)
self.general_zoom_y_1.valueChanged.connect(self._update_zoom)
self.plot_title.textEdited.connect(lambda txt: self._controller.apply_action(SetPlotTitle(txt)))
self.general_zoom_reset.clicked.connect(lambda: self._controller.apply_action(SetZoom(Zoom.no_zoom())))
self.general_zoom_fit.clicked.connect(self._zoom_fit)
self.general_display_order.items_reordered.connect(self._reorder_maps)
self.general_show_axis.clicked.connect(lambda: self._controller.apply_action(
SetShowAxis(self.general_show_axis.isChecked())))
self.general_font_family.addItems(Font.font_names())
self.general_font_family.currentTextChanged.connect(
lambda v: self._controller.apply_action(SetFont(
self._controller.get_model().get_config().font.get_updated(family=v))))
self.general_font_size.valueChanged.connect(
lambda: self._controller.apply_action(SetFont(
self._controller.get_model().get_config().font.get_updated(size=self.general_font_size.value()))))
self.general_interpolation.addItems(current_model.get_config().get_available_interpolations())
self.general_interpolation.currentTextChanged.connect(
lambda v: self._controller.apply_action(SetInterpolation(v)))
self.general_flipud.clicked.connect(lambda: self._controller.apply_action(
SetFlipud(self.general_flipud.isChecked())))
self.general_show_plot_titles.clicked.connect(lambda: self._controller.apply_action(
SetShowPlotTitles(self.general_show_plot_titles.isChecked())))
self.mask_name.currentIndexChanged.connect(self._update_mask_name)
self.general_colorbar_nmr_ticks.valueChanged.connect(
lambda v: self._controller.apply_action(SetColorBarNmrTicks(v)))
self.general_show_colorbar.clicked.connect(lambda: self._controller.apply_action(
SetShowPlotColorbars(self.general_show_colorbar.isChecked())))
self.general_colorbar_location.currentTextChanged.connect(
lambda v: self._controller.apply_action(SetColorbarLocation(v.lower())))
@pyqtSlot(DataConfigModel)
def update_model(self, model):
data = model.get_data()
config = model.get_config()
if not self._previous_model or model.get_data() != self._previous_model.get_data():
self._previous_model = model
self._update_data(data)
self._update_config(data, config)
def select_maps_context_menu(self, position):
global_position = self.general_map_selection.mapToGlobal(position)
def get_header_action(parent, map_name):
label = QLabel(map_name)
font = label.font()
font.setBold(True)
label.setFont(font)
label.setStyleSheet('color: black; margin:5px; margin-left: 15px;')
action = QWidgetAction(parent)
action.setDisabled(True)
action.setDefaultWidget(label)
return action
if self.general_map_selection.count():
row = self.general_map_selection.indexAt(position)
if row:
element = self.general_map_selection.item(row.row())
if element:
map_name = element.data(Qt.UserRole)
file_path = self._controller.get_model().get_data().get_file_path(map_name)
menu = QMenu()
menu.addAction(get_header_action(menu, map_name))
menu.addSeparator()
show_in_folder = menu.addAction('&Show in folder', lambda:
QDesktopServices.openUrl(QUrl.fromLocalFile(os.path.dirname(file_path))))
if file_path is None:
show_in_folder.setEnabled(False)
menu.addAction('Use as &mask', lambda: self._controller.apply_action(SetGeneralMask(map_name)))
menu.addAction('R&emove', lambda: self._controller.apply_action(
NewDataAction(self._controller.get_model().get_data().get_updated(removals=[map_name]))))
menu.exec(global_position)
@pyqtSlot()
def _reorder_maps(self):
items = [self.general_display_order.item(ind) for ind in range(self.general_display_order.count())]
map_names = [item.data(Qt.UserRole) for item in items]
self._controller.apply_action(SetMapsToShow(map_names))
@pyqtSlot()
def _update_maps_to_show(self):
current_model = self._controller.get_model()
map_names = copy.copy(current_model.get_config().maps_to_show)
for item in [self.general_map_selection.item(ind) for ind in range(self.general_map_selection.count())]:
map_name = item.data(Qt.UserRole)
if item.isSelected():
if map_name not in map_names:
self._insert_alphabetically(map_name, map_names)
else:
if map_name in map_names:
map_names.remove(map_name)
self._controller.apply_action(SetMapsToShow(map_names))
@pyqtSlot()
def _deleselect_all_maps(self):
self._controller.apply_action(SetMapsToShow([]))
@pyqtSlot()
def _invert_map_selection(self):
current_model = self._controller.get_model()
self._controller.apply_action(SetMapsToShow(
list(set(current_model.get_data().get_map_names()).difference(
set(current_model.get_config().maps_to_show)))))
@pyqtSlot()
def _zoom_fit(self):
current_model = self._controller.get_model()
data_info = current_model.get_data()
config = current_model.get_config()
def add_padding(bounding_box, max_x, max_y):
bounding_box[0].x = max(bounding_box[0].x - 1, 0)
bounding_box[0].y = max(bounding_box[0].y - 1, 0)
bounding_box[1].y = min(bounding_box[1].y + 2, max_y)
bounding_box[1].x = min(bounding_box[1].x + 2, max_x)
return bounding_box
if config.maps_to_show or len(data_info.get_map_names()):
bounding_box = data_info.get_bounding_box(config.dimension, config.slice_index,
config.volume_index, config.rotate, config.maps_to_show)
max_y = data_info.get_max_y_index(config.dimension, rotate=config.rotate, map_names=config.maps_to_show)
max_x = data_info.get_max_x_index(config.dimension, rotate=config.rotate, map_names=config.maps_to_show)
if not config.flipud:
# Since the renderer plots with a left top coordinate system,
# we need to flip the y coordinates upside down by default.
tmp = max_y - bounding_box[0].y
bounding_box[0].y = max_y - bounding_box[1].y
bounding_box[1].y = tmp
bounding_box = add_padding(bounding_box, max_x, max_y)
self._controller.apply_action(SetZoom(Zoom(*bounding_box)))
@pyqtSlot()
def _update_zoom(self):
np0x, np0y = self.general_zoom_x_0.value(), self.general_zoom_y_0.value()
np1x, np1y = self.general_zoom_x_1.value(), self.general_zoom_y_1.value()
if np0x > np1x:
np1x = np0x
if np0y > np1y:
np1y = np0y
self._controller.apply_action(SetZoom(Zoom.from_coords(np0x, np0y, np1x, np1y)))
@staticmethod
def _insert_alphabetically(new_item, item_list):
for ind, item in enumerate(item_list):
if item > new_item:
item_list.insert(ind, new_item)
return
item_list.append(new_item)
@pyqtSlot(int)
def _update_mask_name(self, index):
if index == 0:
self._controller.apply_action(SetGeneralMask(None))
else:
self._controller.apply_action(SetGeneralMask(self.mask_name.itemText(index)))
def _update_data(self, data_info):
sorted_keys = list(sorted(data_info.get_map_names()))
if len(data_info.get_map_names()):
self.general_info_nmr_maps.setText(str(len(data_info.get_map_names())))
else:
self.general_info_nmr_maps.setText('0')
with blocked_signals(self.general_map_selection):
self.general_map_selection.clear()
self.general_map_selection.addItems(sorted_keys)
for index, map_name in enumerate(sorted_keys):
item = self.general_map_selection.item(index)
item.setData(Qt.UserRole, map_name)
with blocked_signals(self.mask_name):
self.mask_name.clear()
self.mask_name.insertItem(0, '-- None --')
self.mask_name.insertItems(1, sorted_keys)
def _update_config(self, data, config):
map_names = config.maps_to_show
with blocked_signals(self.general_dimension):
try:
max_dimension = data.get_max_dimension(map_names)
self.general_dimension.setMaximum(max_dimension)
self.maximumDimension.setText(str(max_dimension))
except ValueError:
self.general_dimension.setMaximum(0)
self.maximumDimension.setText(str(0))
self.general_dimension.setValue(config.dimension)
with blocked_signals(self.general_slice_index):
try:
max_slice = data.get_max_slice_index(config.dimension, map_names)
self.general_slice_index.setMaximum(max_slice)
self.maximumIndex.setText(str(max_slice))
except ValueError:
self.general_slice_index.setMaximum(0)
self.maximumIndex.setText(str(0))
self.general_slice_index.setValue(config.slice_index)
with blocked_signals(self.general_volume_index):
try:
max_volume = data.get_max_volume_index(map_names)
self.general_volume_index.setMaximum(max_volume)
self.maximumVolume.setText(str(max_volume))
except ValueError:
self.general_volume_index.setMaximum(0)
self.maximumVolume.setText(str(0))
self.general_volume_index.setValue(config.volume_index)
with blocked_signals(self.general_colormap):
self.general_colormap.setCurrentText(config.colormap)
with blocked_signals(self.general_rotate):
self.general_rotate.setCurrentText(str(config.rotate))
if self.general_map_selection.count():
for map_name, map_config in config.map_plot_options.items():
if map_config.title:
index = list(sorted(data.get_map_names())).index(map_name)
item = self.general_map_selection.item(index)
item.setData(Qt.DisplayRole, map_name + ' (' + map_config.title + ')')
self.general_map_selection.blockSignals(True)
for index, map_name in enumerate(list(sorted(data.get_map_names()))):
item = self.general_map_selection.item(index)
if item:
item.setSelected(map_name in map_names)
self.general_map_selection.blockSignals(False)
try:
max_x = data.get_max_x_index(config.dimension, config.rotate, map_names)
max_y = data.get_max_y_index(config.dimension, config.rotate, map_names)
with blocked_signals(self.general_zoom_x_0, self.general_zoom_x_1,
self.general_zoom_y_0, self.general_zoom_y_1):
self.general_zoom_x_0.setMaximum(max_x)
self.general_zoom_x_0.setValue(config.zoom.p0.x)
self.general_zoom_x_1.setMaximum(max_x)
self.general_zoom_x_1.setMinimum(config.zoom.p0.x)
self.general_zoom_x_1.setValue(config.zoom.p1.x)
self.general_zoom_y_0.setMaximum(max_y)
self.general_zoom_y_0.setValue(config.zoom.p0.y)
self.general_zoom_y_1.setMaximum(max_y)
self.general_zoom_y_1.setMinimum(config.zoom.p0.y)
self.general_zoom_y_1.setValue(config.zoom.p1.y)
if config.zoom.p0.x == 0 and config.zoom.p1.x == 0:
self.general_zoom_x_1.setValue(max_x)
if config.zoom.p0.y == 0 and config.zoom.p1.y == 0:
self.general_zoom_y_1.setValue(max_y)
except ValueError:
pass
with blocked_signals(self.plot_title):
self.plot_title.setText(config.title)
with blocked_signals(self.general_display_order):
self.general_display_order.clear()
self.general_display_order.addItems(map_names)
for index, map_name in enumerate(map_names):
item = self.general_display_order.item(index)
item.setData(Qt.UserRole, map_name)
if map_name in config.map_plot_options and config.map_plot_options[map_name].title:
title = config.map_plot_options[map_name].title
item.setData(Qt.DisplayRole, map_name + ' (' + title + ')')
with blocked_signals(self.general_show_axis):
self.general_show_axis.setChecked(config.show_axis)
with blocked_signals(self.general_colorbar_nmr_ticks):
self.general_colorbar_nmr_ticks.setValue(config.colorbar_settings.get_preferred('nmr_ticks'))
with blocked_signals(self.general_show_colorbar):
self.general_show_colorbar.setChecked(config.colorbar_settings.get_preferred('visible'))
with blocked_signals(self.general_colorbar_location):
self.general_colorbar_location.setCurrentText(config.colorbar_settings.get_preferred('location').title())
with blocked_signals(self.general_show_plot_titles):
self.general_show_plot_titles.setChecked(config.show_titles)
with blocked_signals(self.general_font_family):
self.general_font_family.setCurrentText(config.font.family)
with blocked_signals(self.general_font_size):
self.general_font_size.setValue(config.font.size)
with blocked_signals(self.general_interpolation):
self.general_interpolation.setCurrentText(config.interpolation)
with blocked_signals(self.general_flipud):
self.general_flipud.setChecked(config.flipud)
with blocked_signals(self.mask_name):
if config.mask_name and config.mask_name in data.get_map_names():
for ind in range(self.mask_name.count()):
if self.mask_name.itemText(ind) == config.mask_name:
self.mask_name.setCurrentIndex(ind)
break
else:
self.mask_name.setCurrentIndex(0)
| lgpl-3.0 |
kiszk/spark | python/pyspark/mllib/util.py | 18 | 19611 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import numpy as np
if sys.version > '3':
xrange = range
basestring = str
from pyspark import SparkContext, since
from pyspark.mllib.common import callMLlibFunc, inherit_doc
from pyspark.mllib.linalg import Vectors, SparseVector, _convert_to_vector
from pyspark.sql import DataFrame
class MLUtils(object):
"""
Helper methods to load, save and pre-process data used in MLlib.
.. versionadded:: 1.0.0
"""
@staticmethod
def _parse_libsvm_line(line):
"""
Parses a line in LIBSVM format into (label, indices, values).
"""
items = line.split(None)
label = float(items[0])
nnz = len(items) - 1
indices = np.zeros(nnz, dtype=np.int32)
values = np.zeros(nnz)
for i in xrange(nnz):
index, value = items[1 + i].split(":")
indices[i] = int(index) - 1
values[i] = float(value)
return label, indices, values
@staticmethod
def _convert_labeled_point_to_libsvm(p):
"""Converts a LabeledPoint to a string in LIBSVM format."""
from pyspark.mllib.regression import LabeledPoint
assert isinstance(p, LabeledPoint)
items = [str(p.label)]
v = _convert_to_vector(p.features)
if isinstance(v, SparseVector):
nnz = len(v.indices)
for i in xrange(nnz):
items.append(str(v.indices[i] + 1) + ":" + str(v.values[i]))
else:
for i in xrange(len(v)):
items.append(str(i + 1) + ":" + str(v[i]))
return " ".join(items)
@staticmethod
@since("1.0.0")
def loadLibSVMFile(sc, path, numFeatures=-1, minPartitions=None):
"""
Loads labeled data in the LIBSVM format into an RDD of
LabeledPoint. The LIBSVM format is a text-based format used by
LIBSVM and LIBLINEAR. Each line represents a labeled sparse
feature vector using the following format:
label index1:value1 index2:value2 ...
where the indices are one-based and in ascending order. This
method parses each line into a LabeledPoint, where the feature
indices are converted to zero-based.
:param sc: Spark context
:param path: file or directory path in any Hadoop-supported file
system URI
:param numFeatures: number of features, which will be determined
from the input data if a nonpositive value
is given. This is useful when the dataset is
already split into multiple files and you
want to load them separately, because some
features may not present in certain files,
which leads to inconsistent feature
dimensions.
:param minPartitions: min number of partitions
:return: labeled data stored as an RDD of LabeledPoint
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> tempFile = NamedTemporaryFile(delete=True)
>>> _ = tempFile.write(b"+1 1:1.0 3:2.0 5:3.0\\n-1\\n-1 2:4.0 4:5.0 6:6.0")
>>> tempFile.flush()
>>> examples = MLUtils.loadLibSVMFile(sc, tempFile.name).collect()
>>> tempFile.close()
>>> examples[0]
LabeledPoint(1.0, (6,[0,2,4],[1.0,2.0,3.0]))
>>> examples[1]
LabeledPoint(-1.0, (6,[],[]))
>>> examples[2]
LabeledPoint(-1.0, (6,[1,3,5],[4.0,5.0,6.0]))
"""
from pyspark.mllib.regression import LabeledPoint
lines = sc.textFile(path, minPartitions)
parsed = lines.map(lambda l: MLUtils._parse_libsvm_line(l))
if numFeatures <= 0:
parsed.cache()
numFeatures = parsed.map(lambda x: -1 if x[1].size == 0 else x[1][-1]).reduce(max) + 1
return parsed.map(lambda x: LabeledPoint(x[0], Vectors.sparse(numFeatures, x[1], x[2])))
@staticmethod
@since("1.0.0")
def saveAsLibSVMFile(data, dir):
"""
Save labeled data in LIBSVM format.
:param data: an RDD of LabeledPoint to be saved
:param dir: directory to save the data
>>> from tempfile import NamedTemporaryFile
>>> from fileinput import input
>>> from pyspark.mllib.regression import LabeledPoint
>>> from glob import glob
>>> from pyspark.mllib.util import MLUtils
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, 1.23), (2, 4.56)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> MLUtils.saveAsLibSVMFile(sc.parallelize(examples), tempFile.name)
>>> ''.join(sorted(input(glob(tempFile.name + "/part-0000*"))))
'0.0 1:1.01 2:2.02 3:3.03\\n1.1 1:1.23 3:4.56\\n'
"""
lines = data.map(lambda p: MLUtils._convert_labeled_point_to_libsvm(p))
lines.saveAsTextFile(dir)
@staticmethod
@since("1.1.0")
def loadLabeledPoints(sc, path, minPartitions=None):
"""
Load labeled points saved using RDD.saveAsTextFile.
:param sc: Spark context
:param path: file or directory path in any Hadoop-supported file
system URI
:param minPartitions: min number of partitions
:return: labeled data stored as an RDD of LabeledPoint
>>> from tempfile import NamedTemporaryFile
>>> from pyspark.mllib.util import MLUtils
>>> from pyspark.mllib.regression import LabeledPoint
>>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, -1.23), (2, 4.56e-7)])),
... LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]
>>> tempFile = NamedTemporaryFile(delete=True)
>>> tempFile.close()
>>> sc.parallelize(examples, 1).saveAsTextFile(tempFile.name)
>>> MLUtils.loadLabeledPoints(sc, tempFile.name).collect()
[LabeledPoint(1.1, (3,[0,2],[-1.23,4.56e-07])), LabeledPoint(0.0, [1.01,2.02,3.03])]
"""
minPartitions = minPartitions or min(sc.defaultParallelism, 2)
return callMLlibFunc("loadLabeledPoints", sc, path, minPartitions)
@staticmethod
@since("1.5.0")
def appendBias(data):
"""
Returns a new vector with `1.0` (bias) appended to
the end of the input vector.
"""
vec = _convert_to_vector(data)
if isinstance(vec, SparseVector):
newIndices = np.append(vec.indices, len(vec))
newValues = np.append(vec.values, 1.0)
return SparseVector(len(vec) + 1, newIndices, newValues)
else:
return _convert_to_vector(np.append(vec.toArray(), 1.0))
@staticmethod
@since("1.5.0")
def loadVectors(sc, path):
"""
Loads vectors saved using `RDD[Vector].saveAsTextFile`
with the default number of partitions.
"""
return callMLlibFunc("loadVectors", sc, path)
@staticmethod
@since("2.0.0")
def convertVectorColumnsToML(dataset, *cols):
"""
Converts vector columns in an input DataFrame from the
:py:class:`pyspark.mllib.linalg.Vector` type to the new
:py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of vector columns to be converted.
New vector columns will be ignored. If unspecified, all old
vector columns will be converted excepted nested ones.
:return:
the input dataset with old vector columns converted to the
new vector type
>>> import pyspark
>>> from pyspark.mllib.linalg import Vectors
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))],
... ["id", "x", "y"])
>>> r1 = MLUtils.convertVectorColumnsToML(df).first()
>>> isinstance(r1.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r1.y, pyspark.ml.linalg.DenseVector)
True
>>> r2 = MLUtils.convertVectorColumnsToML(df, "x").first()
>>> isinstance(r2.x, pyspark.ml.linalg.SparseVector)
True
>>> isinstance(r2.y, pyspark.mllib.linalg.DenseVector)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertVectorColumnsToML", dataset, list(cols))
@staticmethod
@since("2.0.0")
def convertVectorColumnsFromML(dataset, *cols):
"""
Converts vector columns in an input DataFrame to the
:py:class:`pyspark.mllib.linalg.Vector` type from the new
:py:class:`pyspark.ml.linalg.Vector` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of vector columns to be converted.
Old vector columns will be ignored. If unspecified, all new
vector columns will be converted except nested ones.
:return:
the input dataset with new vector columns converted to the
old vector type
>>> import pyspark
>>> from pyspark.ml.linalg import Vectors
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Vectors.sparse(2, [1], [1.0]), Vectors.dense(2.0, 3.0))],
... ["id", "x", "y"])
>>> r1 = MLUtils.convertVectorColumnsFromML(df).first()
>>> isinstance(r1.x, pyspark.mllib.linalg.SparseVector)
True
>>> isinstance(r1.y, pyspark.mllib.linalg.DenseVector)
True
>>> r2 = MLUtils.convertVectorColumnsFromML(df, "x").first()
>>> isinstance(r2.x, pyspark.mllib.linalg.SparseVector)
True
>>> isinstance(r2.y, pyspark.ml.linalg.DenseVector)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertVectorColumnsFromML", dataset, list(cols))
@staticmethod
@since("2.0.0")
def convertMatrixColumnsToML(dataset, *cols):
"""
Converts matrix columns in an input DataFrame from the
:py:class:`pyspark.mllib.linalg.Matrix` type to the new
:py:class:`pyspark.ml.linalg.Matrix` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of matrix columns to be converted.
New matrix columns will be ignored. If unspecified, all old
matrix columns will be converted excepted nested ones.
:return:
the input dataset with old matrix columns converted to the
new matrix type
>>> import pyspark
>>> from pyspark.mllib.linalg import Matrices
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Matrices.sparse(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]),
... Matrices.dense(2, 2, range(4)))], ["id", "x", "y"])
>>> r1 = MLUtils.convertMatrixColumnsToML(df).first()
>>> isinstance(r1.x, pyspark.ml.linalg.SparseMatrix)
True
>>> isinstance(r1.y, pyspark.ml.linalg.DenseMatrix)
True
>>> r2 = MLUtils.convertMatrixColumnsToML(df, "x").first()
>>> isinstance(r2.x, pyspark.ml.linalg.SparseMatrix)
True
>>> isinstance(r2.y, pyspark.mllib.linalg.DenseMatrix)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertMatrixColumnsToML", dataset, list(cols))
@staticmethod
@since("2.0.0")
def convertMatrixColumnsFromML(dataset, *cols):
"""
Converts matrix columns in an input DataFrame to the
:py:class:`pyspark.mllib.linalg.Matrix` type from the new
:py:class:`pyspark.ml.linalg.Matrix` type under the `spark.ml`
package.
:param dataset:
input dataset
:param cols:
a list of matrix columns to be converted.
Old matrix columns will be ignored. If unspecified, all new
matrix columns will be converted except nested ones.
:return:
the input dataset with new matrix columns converted to the
old matrix type
>>> import pyspark
>>> from pyspark.ml.linalg import Matrices
>>> from pyspark.mllib.util import MLUtils
>>> df = spark.createDataFrame(
... [(0, Matrices.sparse(2, 2, [0, 2, 3], [0, 1, 1], [2, 3, 4]),
... Matrices.dense(2, 2, range(4)))], ["id", "x", "y"])
>>> r1 = MLUtils.convertMatrixColumnsFromML(df).first()
>>> isinstance(r1.x, pyspark.mllib.linalg.SparseMatrix)
True
>>> isinstance(r1.y, pyspark.mllib.linalg.DenseMatrix)
True
>>> r2 = MLUtils.convertMatrixColumnsFromML(df, "x").first()
>>> isinstance(r2.x, pyspark.mllib.linalg.SparseMatrix)
True
>>> isinstance(r2.y, pyspark.ml.linalg.DenseMatrix)
True
"""
if not isinstance(dataset, DataFrame):
raise TypeError("Input dataset must be a DataFrame but got {}.".format(type(dataset)))
return callMLlibFunc("convertMatrixColumnsFromML", dataset, list(cols))
class Saveable(object):
"""
Mixin for models and transformers which may be saved as files.
.. versionadded:: 1.3.0
"""
def save(self, sc, path):
"""
Save this model to the given path.
This saves:
* human-readable (JSON) model metadata to path/metadata/
* Parquet formatted data to path/data/
The model may be loaded using py:meth:`Loader.load`.
:param sc: Spark context used to save model data.
:param path: Path specifying the directory in which to save
this model. If the directory already exists,
this method throws an exception.
"""
raise NotImplementedError
@inherit_doc
class JavaSaveable(Saveable):
"""
Mixin for models that provide save() through their Scala
implementation.
.. versionadded:: 1.3.0
"""
@since("1.3.0")
def save(self, sc, path):
"""Save this model to the given path."""
if not isinstance(sc, SparkContext):
raise TypeError("sc should be a SparkContext, got type %s" % type(sc))
if not isinstance(path, basestring):
raise TypeError("path should be a basestring, got type %s" % type(path))
self._java_model.save(sc._jsc.sc(), path)
class Loader(object):
"""
Mixin for classes which can load saved models from files.
.. versionadded:: 1.3.0
"""
@classmethod
def load(cls, sc, path):
"""
Load a model from the given path. The model should have been
saved using py:meth:`Saveable.save`.
:param sc: Spark context used for loading model files.
:param path: Path specifying the directory to which the model
was saved.
:return: model instance
"""
raise NotImplementedError
@inherit_doc
class JavaLoader(Loader):
"""
Mixin for classes which can load saved models using its Scala
implementation.
.. versionadded:: 1.3.0
"""
@classmethod
def _java_loader_class(cls):
"""
Returns the full class name of the Java loader. The default
implementation replaces "pyspark" by "org.apache.spark" in
the Python full class name.
"""
java_package = cls.__module__.replace("pyspark", "org.apache.spark")
return ".".join([java_package, cls.__name__])
@classmethod
def _load_java(cls, sc, path):
"""
Load a Java model from the given path.
"""
java_class = cls._java_loader_class()
java_obj = sc._jvm
for name in java_class.split("."):
java_obj = getattr(java_obj, name)
return java_obj.load(sc._jsc.sc(), path)
@classmethod
@since("1.3.0")
def load(cls, sc, path):
"""Load a model from the given path."""
java_model = cls._load_java(sc, path)
return cls(java_model)
class LinearDataGenerator(object):
"""Utils for generating linear data.
.. versionadded:: 1.5.0
"""
@staticmethod
@since("1.5.0")
def generateLinearInput(intercept, weights, xMean, xVariance,
nPoints, seed, eps):
"""
:param: intercept bias factor, the term c in X'w + c
:param: weights feature vector, the term w in X'w + c
:param: xMean Point around which the data X is centered.
:param: xVariance Variance of the given data
:param: nPoints Number of points to be generated
:param: seed Random Seed
:param: eps Used to scale the noise. If eps is set high,
the amount of gaussian noise added is more.
Returns a list of LabeledPoints of length nPoints
"""
weights = [float(weight) for weight in weights]
xMean = [float(mean) for mean in xMean]
xVariance = [float(var) for var in xVariance]
return list(callMLlibFunc(
"generateLinearInputWrapper", float(intercept), weights, xMean,
xVariance, int(nPoints), int(seed), float(eps)))
@staticmethod
@since("1.5.0")
def generateLinearRDD(sc, nexamples, nfeatures, eps,
nParts=2, intercept=0.0):
"""
Generate an RDD of LabeledPoints.
"""
return callMLlibFunc(
"generateLinearRDDWrapper", sc, int(nexamples), int(nfeatures),
float(eps), int(nParts), float(intercept))
def _test():
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("mllib.util tests")\
.getOrCreate()
globs['spark'] = spark
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
kostaspl/SpiderMonkey38 | python/mozbuild/mozbuild/test/backend/test_recursivemake.py | 1 | 29117 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import unicode_literals
import json
import os
import unittest
from mozpack.manifests import (
InstallManifest,
)
from mozunit import main
from mozbuild.backend.recursivemake import (
RecursiveMakeBackend,
RecursiveMakeTraversal,
)
from mozbuild.frontend.emitter import TreeMetadataEmitter
from mozbuild.frontend.reader import BuildReader
from mozbuild.test.backend.common import BackendTester
import mozpack.path as mozpath
class TestRecursiveMakeTraversal(unittest.TestCase):
def test_traversal(self):
traversal = RecursiveMakeTraversal()
traversal.add('', dirs=['A', 'B', 'C'])
traversal.add('', dirs=['D'])
traversal.add('A')
traversal.add('B', dirs=['E', 'F'])
traversal.add('C', dirs=['G', 'H'])
traversal.add('D', dirs=['I', 'K'])
traversal.add('D', dirs=['J', 'L'])
traversal.add('E')
traversal.add('F')
traversal.add('G')
traversal.add('H')
traversal.add('I', dirs=['M', 'N'])
traversal.add('J', dirs=['O', 'P'])
traversal.add('K', dirs=['Q', 'R'])
traversal.add('L', dirs=['S'])
traversal.add('M')
traversal.add('N', dirs=['T'])
traversal.add('O')
traversal.add('P', dirs=['U'])
traversal.add('Q')
traversal.add('R', dirs=['V'])
traversal.add('S', dirs=['W'])
traversal.add('T')
traversal.add('U')
traversal.add('V')
traversal.add('W', dirs=['X'])
traversal.add('X')
parallels = set(('G', 'H', 'I', 'J', 'O', 'P', 'Q', 'R', 'U'))
def filter(current, subdirs):
return (current, [d for d in subdirs.dirs if d in parallels],
[d for d in subdirs.dirs if d not in parallels])
start, deps = traversal.compute_dependencies(filter)
self.assertEqual(start, ('X',))
self.maxDiff = None
self.assertEqual(deps, {
'A': ('',),
'B': ('A',),
'C': ('F',),
'D': ('G', 'H'),
'E': ('B',),
'F': ('E',),
'G': ('C',),
'H': ('C',),
'I': ('D',),
'J': ('D',),
'K': ('T', 'O', 'U'),
'L': ('Q', 'V'),
'M': ('I',),
'N': ('M',),
'O': ('J',),
'P': ('J',),
'Q': ('K',),
'R': ('K',),
'S': ('L',),
'T': ('N',),
'U': ('P',),
'V': ('R',),
'W': ('S',),
'X': ('W',),
})
self.assertEqual(list(traversal.traverse('', filter)),
['', 'A', 'B', 'E', 'F', 'C', 'G', 'H', 'D', 'I',
'M', 'N', 'T', 'J', 'O', 'P', 'U', 'K', 'Q', 'R',
'V', 'L', 'S', 'W', 'X'])
self.assertEqual(list(traversal.traverse('C', filter)),
['C', 'G', 'H'])
def test_traversal_2(self):
traversal = RecursiveMakeTraversal()
traversal.add('', dirs=['A', 'B', 'C'])
traversal.add('A')
traversal.add('B', dirs=['D', 'E', 'F'])
traversal.add('C', dirs=['G', 'H', 'I'])
traversal.add('D')
traversal.add('E')
traversal.add('F')
traversal.add('G')
traversal.add('H')
traversal.add('I')
start, deps = traversal.compute_dependencies()
self.assertEqual(start, ('I',))
self.assertEqual(deps, {
'A': ('',),
'B': ('A',),
'C': ('F',),
'D': ('B',),
'E': ('D',),
'F': ('E',),
'G': ('C',),
'H': ('G',),
'I': ('H',),
})
def test_traversal_filter(self):
traversal = RecursiveMakeTraversal()
traversal.add('', dirs=['A', 'B', 'C'])
traversal.add('A')
traversal.add('B', dirs=['D', 'E', 'F'])
traversal.add('C', dirs=['G', 'H', 'I'])
traversal.add('D')
traversal.add('E')
traversal.add('F')
traversal.add('G')
traversal.add('H')
traversal.add('I')
def filter(current, subdirs):
if current == 'B':
current = None
return current, [], subdirs.dirs
start, deps = traversal.compute_dependencies(filter)
self.assertEqual(start, ('I',))
self.assertEqual(deps, {
'A': ('',),
'C': ('F',),
'D': ('A',),
'E': ('D',),
'F': ('E',),
'G': ('C',),
'H': ('G',),
'I': ('H',),
})
class TestRecursiveMakeBackend(BackendTester):
def test_basic(self):
"""Ensure the RecursiveMakeBackend works without error."""
env = self._consume('stub0', RecursiveMakeBackend)
self.assertTrue(os.path.exists(mozpath.join(env.topobjdir,
'backend.RecursiveMakeBackend')))
self.assertTrue(os.path.exists(mozpath.join(env.topobjdir,
'backend.RecursiveMakeBackend.pp')))
def test_output_files(self):
"""Ensure proper files are generated."""
env = self._consume('stub0', RecursiveMakeBackend)
expected = ['', 'dir1', 'dir2']
for d in expected:
out_makefile = mozpath.join(env.topobjdir, d, 'Makefile')
out_backend = mozpath.join(env.topobjdir, d, 'backend.mk')
self.assertTrue(os.path.exists(out_makefile))
self.assertTrue(os.path.exists(out_backend))
def test_makefile_conversion(self):
"""Ensure Makefile.in is converted properly."""
env = self._consume('stub0', RecursiveMakeBackend)
p = mozpath.join(env.topobjdir, 'Makefile')
lines = [l.strip() for l in open(p, 'rt').readlines()[1:] if not l.startswith('#')]
self.assertEqual(lines, [
'DEPTH := .',
'topsrcdir := %s' % env.topsrcdir,
'srcdir := %s' % env.topsrcdir,
'VPATH := %s' % env.topsrcdir,
'relativesrcdir := .',
'include $(DEPTH)/config/autoconf.mk',
'',
'FOO := foo',
'',
'include $(topsrcdir)/config/recurse.mk',
])
def test_missing_makefile_in(self):
"""Ensure missing Makefile.in results in Makefile creation."""
env = self._consume('stub0', RecursiveMakeBackend)
p = mozpath.join(env.topobjdir, 'dir2', 'Makefile')
self.assertTrue(os.path.exists(p))
lines = [l.strip() for l in open(p, 'rt').readlines()]
self.assertEqual(len(lines), 9)
self.assertTrue(lines[0].startswith('# THIS FILE WAS AUTOMATICALLY'))
def test_backend_mk(self):
"""Ensure backend.mk file is written out properly."""
env = self._consume('stub0', RecursiveMakeBackend)
p = mozpath.join(env.topobjdir, 'backend.mk')
lines = [l.strip() for l in open(p, 'rt').readlines()[2:]]
self.assertEqual(lines, [
'DIRS := dir1 dir2',
'TEST_DIRS := dir3',
])
def test_mtime_no_change(self):
"""Ensure mtime is not updated if file content does not change."""
env = self._consume('stub0', RecursiveMakeBackend)
makefile_path = mozpath.join(env.topobjdir, 'Makefile')
backend_path = mozpath.join(env.topobjdir, 'backend.mk')
makefile_mtime = os.path.getmtime(makefile_path)
backend_mtime = os.path.getmtime(backend_path)
reader = BuildReader(env)
emitter = TreeMetadataEmitter(env)
backend = RecursiveMakeBackend(env)
backend.consume(emitter.emit(reader.read_topsrcdir()))
self.assertEqual(os.path.getmtime(makefile_path), makefile_mtime)
self.assertEqual(os.path.getmtime(backend_path), backend_mtime)
def test_substitute_config_files(self):
"""Ensure substituted config files are produced."""
env = self._consume('substitute_config_files', RecursiveMakeBackend)
p = mozpath.join(env.topobjdir, 'foo')
self.assertTrue(os.path.exists(p))
lines = [l.strip() for l in open(p, 'rt').readlines()]
self.assertEqual(lines, [
'TEST = foo',
])
def test_variable_passthru(self):
"""Ensure variable passthru is written out correctly."""
env = self._consume('variable_passthru', RecursiveMakeBackend)
backend_path = mozpath.join(env.topobjdir, 'backend.mk')
lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]]
expected = {
'DISABLE_STL_WRAPPING': [
'DISABLE_STL_WRAPPING := 1',
],
'EXTRA_COMPONENTS': [
'EXTRA_COMPONENTS += bar.js',
'EXTRA_COMPONENTS += foo.js',
],
'EXTRA_PP_COMPONENTS': [
'EXTRA_PP_COMPONENTS += bar.pp.js',
'EXTRA_PP_COMPONENTS += foo.pp.js',
],
'FAIL_ON_WARNINGS': [
'FAIL_ON_WARNINGS := 1',
],
'MSVC_ENABLE_PGO': [
'MSVC_ENABLE_PGO := 1',
],
'VISIBILITY_FLAGS': [
'VISIBILITY_FLAGS :=',
],
'RCFILE': [
'RCFILE := foo.rc',
],
'RESFILE': [
'RESFILE := bar.res',
],
'RCINCLUDE': [
'RCINCLUDE := bar.rc',
],
'DEFFILE': [
'DEFFILE := baz.def',
],
'USE_STATIC_LIBS': [
'USE_STATIC_LIBS := 1',
],
'MOZBUILD_CFLAGS': [
'MOZBUILD_CFLAGS += -fno-exceptions',
'MOZBUILD_CFLAGS += -w',
],
'MOZBUILD_CXXFLAGS': [
'MOZBUILD_CXXFLAGS += -fcxx-exceptions',
'MOZBUILD_CXXFLAGS += -include foo.h',
],
'MOZBUILD_LDFLAGS': [
'MOZBUILD_LDFLAGS += -framework Foo',
'MOZBUILD_LDFLAGS += -x',
'MOZBUILD_LDFLAGS += -DELAYLOAD:foo.dll',
'MOZBUILD_LDFLAGS += -DELAYLOAD:bar.dll',
],
'WIN32_EXE_LDFLAGS': [
'WIN32_EXE_LDFLAGS += -subsystem:console',
],
}
for var, val in expected.items():
# print("test_variable_passthru[%s]" % (var))
found = [str for str in lines if str.startswith(var)]
self.assertEqual(found, val)
def test_sources(self):
"""Ensure SOURCES and HOST_SOURCES are handled properly."""
env = self._consume('sources', RecursiveMakeBackend)
backend_path = mozpath.join(env.topobjdir, 'backend.mk')
lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]]
expected = {
'ASFILES': [
'ASFILES += bar.s',
'ASFILES += foo.asm',
],
'CMMSRCS': [
'CMMSRCS += bar.mm',
'CMMSRCS += foo.mm',
],
'CSRCS': [
'CSRCS += bar.c',
'CSRCS += foo.c',
],
'HOST_CPPSRCS': [
'HOST_CPPSRCS += bar.cpp',
'HOST_CPPSRCS += foo.cpp',
],
'HOST_CSRCS': [
'HOST_CSRCS += bar.c',
'HOST_CSRCS += foo.c',
],
'SSRCS': [
'SSRCS += baz.S',
'SSRCS += foo.S',
],
}
for var, val in expected.items():
found = [str for str in lines if str.startswith(var)]
self.assertEqual(found, val)
def test_exports(self):
"""Ensure EXPORTS is handled properly."""
env = self._consume('exports', RecursiveMakeBackend)
# EXPORTS files should appear in the dist_include install manifest.
m = InstallManifest(path=mozpath.join(env.topobjdir,
'_build_manifests', 'install', 'dist_include'))
self.assertEqual(len(m), 7)
self.assertIn('foo.h', m)
self.assertIn('mozilla/mozilla1.h', m)
self.assertIn('mozilla/dom/dom2.h', m)
def test_generated_files(self):
"""Ensure GENERATED_FILES is handled properly."""
env = self._consume('generated-files', RecursiveMakeBackend)
backend_path = mozpath.join(env.topobjdir, 'backend.mk')
lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]]
expected = [
'GENERATED_FILES += bar.c',
'bar.c: %s/generate-bar.py' % env.topsrcdir,
'$(call py_action,file_generate,%s/generate-bar.py bar.c)' % env.topsrcdir,
'',
'GENERATED_FILES += foo.c',
'foo.c: %s/generate-foo.py %s/foo-data' % (env.topsrcdir, env.topsrcdir),
'$(call py_action,file_generate,%s/generate-foo.py foo.c %s/foo-data)' % (env.topsrcdir, env.topsrcdir),
'',
'GENERATED_FILES += quux.c',
]
self.maxDiff = None
self.assertEqual(lines, expected)
def test_resources(self):
"""Ensure RESOURCE_FILES is handled properly."""
env = self._consume('resources', RecursiveMakeBackend)
# RESOURCE_FILES should appear in the dist_bin install manifest.
m = InstallManifest(path=os.path.join(env.topobjdir,
'_build_manifests', 'install', 'dist_bin'))
self.assertEqual(len(m), 10)
self.assertIn('res/foo.res', m)
self.assertIn('res/fonts/font1.ttf', m)
self.assertIn('res/fonts/desktop/desktop2.ttf', m)
self.assertIn('res/bar.res', m)
self.assertIn('res/tests/test.manifest', m)
self.assertIn('res/tests/extra.manifest', m)
def test_js_preference_files(self):
"""Ensure PREF_JS_EXPORTS is written out correctly."""
env = self._consume('js_preference_files', RecursiveMakeBackend)
backend_path = os.path.join(env.topobjdir, 'backend.mk')
lines = [l.strip() for l in open(backend_path, 'rt').readlines()]
# Avoid positional parameter and async related breakage
var = 'PREF_JS_EXPORTS'
found = [val for val in lines if val.startswith(var)]
# Assignment[aa], append[cc], conditional[valid]
expected = ('aa/aa.js', 'bb/bb.js', 'cc/cc.js', 'dd/dd.js', 'valid_val/prefs.js')
expected_top = ('ee/ee.js', 'ff/ff.js')
self.assertEqual(found,
['PREF_JS_EXPORTS += $(topsrcdir)/%s' % val for val in expected_top] +
['PREF_JS_EXPORTS += $(srcdir)/%s' % val for val in expected])
def test_test_manifests_files_written(self):
"""Ensure test manifests get turned into files."""
env = self._consume('test-manifests-written', RecursiveMakeBackend)
tests_dir = mozpath.join(env.topobjdir, '_tests')
m_master = mozpath.join(tests_dir, 'testing', 'mochitest', 'tests', 'mochitest.ini')
x_master = mozpath.join(tests_dir, 'xpcshell', 'xpcshell.ini')
self.assertTrue(os.path.exists(m_master))
self.assertTrue(os.path.exists(x_master))
lines = [l.strip() for l in open(x_master, 'rt').readlines()]
self.assertEqual(lines, [
'; THIS FILE WAS AUTOMATICALLY GENERATED. DO NOT MODIFY BY HAND.',
'',
'[include:dir1/xpcshell.ini]',
'[include:xpcshell.ini]',
])
all_tests_path = mozpath.join(env.topobjdir, 'all-tests.json')
self.assertTrue(os.path.exists(all_tests_path))
with open(all_tests_path, 'rt') as fh:
o = json.load(fh)
self.assertIn('xpcshell.js', o)
self.assertIn('dir1/test_bar.js', o)
self.assertEqual(len(o['xpcshell.js']), 1)
def test_test_manifest_pattern_matches_recorded(self):
"""Pattern matches in test manifests' support-files should be recorded."""
env = self._consume('test-manifests-written', RecursiveMakeBackend)
m = InstallManifest(path=mozpath.join(env.topobjdir,
'_build_manifests', 'install', 'tests'))
# This is not the most robust test in the world, but it gets the job
# done.
entries = [e for e in m._dests.keys() if '**' in e]
self.assertEqual(len(entries), 1)
self.assertIn('support/**', entries[0])
def test_xpidl_generation(self):
"""Ensure xpidl files and directories are written out."""
env = self._consume('xpidl', RecursiveMakeBackend)
# Install manifests should contain entries.
install_dir = mozpath.join(env.topobjdir, '_build_manifests',
'install')
self.assertTrue(os.path.isfile(mozpath.join(install_dir, 'dist_idl')))
self.assertTrue(os.path.isfile(mozpath.join(install_dir, 'xpidl')))
m = InstallManifest(path=mozpath.join(install_dir, 'dist_idl'))
self.assertEqual(len(m), 2)
self.assertIn('bar.idl', m)
self.assertIn('foo.idl', m)
m = InstallManifest(path=mozpath.join(install_dir, 'xpidl'))
self.assertIn('.deps/my_module.pp', m)
m = InstallManifest(path=os.path.join(install_dir, 'dist_bin'))
self.assertIn('components/my_module.xpt', m)
m = InstallManifest(path=mozpath.join(install_dir, 'dist_include'))
self.assertIn('foo.h', m)
p = mozpath.join(env.topobjdir, 'config/makefiles/xpidl')
self.assertTrue(os.path.isdir(p))
self.assertTrue(os.path.isfile(mozpath.join(p, 'Makefile')))
def test_old_install_manifest_deleted(self):
# Simulate an install manifest from a previous backend version. Ensure
# it is deleted.
env = self._get_environment('stub0')
purge_dir = mozpath.join(env.topobjdir, '_build_manifests', 'install')
manifest_path = mozpath.join(purge_dir, 'old_manifest')
os.makedirs(purge_dir)
m = InstallManifest()
m.write(path=manifest_path)
self.assertTrue(os.path.exists(manifest_path))
self._consume('stub0', RecursiveMakeBackend, env)
self.assertFalse(os.path.exists(manifest_path))
def test_install_manifests_written(self):
env, objs = self._emit('stub0')
backend = RecursiveMakeBackend(env)
m = InstallManifest()
backend._install_manifests['testing'] = m
m.add_symlink(__file__, 'self')
backend.consume(objs)
man_dir = mozpath.join(env.topobjdir, '_build_manifests', 'install')
self.assertTrue(os.path.isdir(man_dir))
expected = ['testing']
for e in expected:
full = mozpath.join(man_dir, e)
self.assertTrue(os.path.exists(full))
m2 = InstallManifest(path=full)
self.assertEqual(m, m2)
def test_ipdl_sources(self):
"""Test that IPDL_SOURCES are written to ipdlsrcs.mk correctly."""
env = self._consume('ipdl_sources', RecursiveMakeBackend)
manifest_path = mozpath.join(env.topobjdir,
'ipc', 'ipdl', 'ipdlsrcs.mk')
lines = [l.strip() for l in open(manifest_path, 'rt').readlines()]
# Handle Windows paths correctly
topsrcdir = env.topsrcdir.replace(os.sep, '/')
expected = [
"ALL_IPDLSRCS := %s/bar/bar.ipdl %s/bar/bar2.ipdlh %s/foo/foo.ipdl %s/foo/foo2.ipdlh" % tuple([topsrcdir] * 4),
"CPPSRCS := UnifiedProtocols0.cpp",
"IPDLDIRS := %s/bar %s/foo" % (topsrcdir, topsrcdir),
]
found = [str for str in lines if str.startswith(('ALL_IPDLSRCS',
'CPPSRCS',
'IPDLDIRS'))]
self.assertEqual(found, expected)
def test_defines(self):
"""Test that DEFINES are written to backend.mk correctly."""
env = self._consume('defines', RecursiveMakeBackend)
backend_path = mozpath.join(env.topobjdir, 'backend.mk')
lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]]
var = 'DEFINES'
defines = [val for val in lines if val.startswith(var)]
expected = ['DEFINES += -DFOO -DBAZ=\'"ab\'\\\'\'cd"\' -UQUX -DBAR=7 -DVALUE=\'xyz\'']
self.assertEqual(defines, expected)
def test_local_includes(self):
"""Test that LOCAL_INCLUDES are written to backend.mk correctly."""
env = self._consume('local_includes', RecursiveMakeBackend)
backend_path = mozpath.join(env.topobjdir, 'backend.mk')
lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]]
expected = [
'LOCAL_INCLUDES += -I$(topsrcdir)/bar/baz',
'LOCAL_INCLUDES += -I$(srcdir)/foo',
]
found = [str for str in lines if str.startswith('LOCAL_INCLUDES')]
self.assertEqual(found, expected)
def test_generated_includes(self):
"""Test that GENERATED_INCLUDES are written to backend.mk correctly."""
env = self._consume('generated_includes', RecursiveMakeBackend)
backend_path = mozpath.join(env.topobjdir, 'backend.mk')
lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]]
topobjdir = env.topobjdir.replace('\\', '/')
expected = [
'LOCAL_INCLUDES += -I%s/bar/baz' % topobjdir,
'LOCAL_INCLUDES += -Ifoo',
]
found = [str for str in lines if str.startswith('LOCAL_INCLUDES')]
self.assertEqual(found, expected)
def test_final_target(self):
"""Test that FINAL_TARGET is written to backend.mk correctly."""
env = self._consume('final_target', RecursiveMakeBackend)
final_target_rule = "FINAL_TARGET = $(if $(XPI_NAME),$(DIST)/xpi-stage/$(XPI_NAME),$(DIST)/bin)$(DIST_SUBDIR:%=/%)"
expected = dict()
expected[env.topobjdir] = []
expected[mozpath.join(env.topobjdir, 'both')] = [
'XPI_NAME = mycrazyxpi',
'DIST_SUBDIR = asubdir',
final_target_rule
]
expected[mozpath.join(env.topobjdir, 'dist-subdir')] = [
'DIST_SUBDIR = asubdir',
final_target_rule
]
expected[mozpath.join(env.topobjdir, 'xpi-name')] = [
'XPI_NAME = mycrazyxpi',
final_target_rule
]
expected[mozpath.join(env.topobjdir, 'final-target')] = [
'FINAL_TARGET = $(DEPTH)/random-final-target'
]
for key, expected_rules in expected.iteritems():
backend_path = mozpath.join(key, 'backend.mk')
lines = [l.strip() for l in open(backend_path, 'rt').readlines()[2:]]
found = [str for str in lines if
str.startswith('FINAL_TARGET') or str.startswith('XPI_NAME') or
str.startswith('DIST_SUBDIR')]
self.assertEqual(found, expected_rules)
def test_config(self):
"""Test that CONFIGURE_SUBST_FILES and CONFIGURE_DEFINE_FILES are
properly handled."""
env = self._consume('test_config', RecursiveMakeBackend)
self.assertEqual(
open(os.path.join(env.topobjdir, 'file'), 'r').readlines(), [
'#ifdef foo\n',
'bar baz\n',
'@bar@\n',
])
self.assertEqual(
open(os.path.join(env.topobjdir, 'file.h'), 'r').readlines(), [
'/* Comment */\n',
'#define foo\n',
'#define foo baz qux\n',
'#define foo baz qux\n',
'#define bar\n',
'#define bar 42\n',
'/* #undef bar */\n',
'\n',
'# define baz 1\n',
'\n',
'#ifdef foo\n',
'# define foo baz qux\n',
'# define foo baz qux\n',
' # define foo baz qux \n',
'#endif\n',
])
def test_jar_manifests(self):
env = self._consume('jar-manifests', RecursiveMakeBackend)
with open(os.path.join(env.topobjdir, 'backend.mk'), 'rb') as fh:
lines = fh.readlines()
lines = [line.rstrip() for line in lines]
self.assertIn('JAR_MANIFEST := %s/jar.mn' % env.topsrcdir, lines)
def test_extra_js_modules(self):
env = self._consume('extra-js-modules', RecursiveMakeBackend)
with open(os.path.join(env.topobjdir, 'backend.mk'), 'rb') as fh:
lines = fh.readlines()
lines = [line.rstrip() for line in lines]
self.maxDiff = None
expected = [
'extra_js__FILES := module1.js module2.js',
'extra_js__DEST = $(FINAL_TARGET)/modules/',
'extra_js__TARGET := misc',
'INSTALL_TARGETS += extra_js_',
'extra_js_submodule_FILES := module3.js module4.js',
'extra_js_submodule_DEST = $(FINAL_TARGET)/modules/submodule',
'extra_js_submodule_TARGET := misc',
'INSTALL_TARGETS += extra_js_submodule',
'extra_pp_js_ := pp-module1.js',
'extra_pp_js__PATH = $(FINAL_TARGET)/modules/',
'extra_pp_js__TARGET := misc',
'PP_TARGETS += extra_pp_js_',
'extra_pp_js_ppsub := pp-module2.js',
'extra_pp_js_ppsub_PATH = $(FINAL_TARGET)/modules/ppsub',
'extra_pp_js_ppsub_TARGET := misc',
'PP_TARGETS += extra_pp_js_ppsub',
]
found = [line for line in lines if line.startswith(('extra_',
'INSTALL_TARGETS',
'PP_TARGETS'))]
self.assertEqual(expected, found)
def test_test_manifests_duplicate_support_files(self):
"""Ensure duplicate support-files in test manifests work."""
env = self._consume('test-manifests-duplicate-support-files',
RecursiveMakeBackend)
p = os.path.join(env.topobjdir, '_build_manifests', 'install', 'tests')
m = InstallManifest(p)
self.assertIn('testing/mochitest/tests/support-file.txt', m)
def test_android_eclipse(self):
env = self._consume('android_eclipse', RecursiveMakeBackend)
with open(mozpath.join(env.topobjdir, 'backend.mk'), 'rb') as fh:
lines = fh.readlines()
lines = [line.rstrip() for line in lines]
# Dependencies first.
self.assertIn('ANDROID_ECLIPSE_PROJECT_main1: target1 target2', lines)
self.assertIn('ANDROID_ECLIPSE_PROJECT_main4: target3 target4', lines)
command_template = '\t$(call py_action,process_install_manifest,' + \
'--no-remove --no-remove-all-directory-symlinks ' + \
'--no-remove-empty-directories %s %s.manifest)'
# Commands second.
for project_name in ['main1', 'main2', 'library1', 'library2']:
stem = '%s/android_eclipse/%s' % (env.topobjdir, project_name)
self.assertIn(command_template % (stem, stem), lines)
# Projects declared in subdirectories.
with open(mozpath.join(env.topobjdir, 'subdir', 'backend.mk'), 'rb') as fh:
lines = fh.readlines()
lines = [line.rstrip() for line in lines]
self.assertIn('ANDROID_ECLIPSE_PROJECT_submain: subtarget1 subtarget2', lines)
for project_name in ['submain', 'sublibrary']:
# Destination and install manifest are relative to topobjdir.
stem = '%s/android_eclipse/%s' % (env.topobjdir, project_name)
self.assertIn(command_template % (stem, stem), lines)
def test_install_manifests_package_tests(self):
"""Ensure test suites honor package_tests=False."""
env = self._consume('test-manifests-package-tests', RecursiveMakeBackend)
tests_dir = mozpath.join(env.topobjdir, '_tests')
all_tests_path = mozpath.join(env.topobjdir, 'all-tests.json')
self.assertTrue(os.path.exists(all_tests_path))
with open(all_tests_path, 'rt') as fh:
o = json.load(fh)
self.assertIn('mochitest.js', o)
self.assertIn('not_packaged.java', o)
man_dir = mozpath.join(env.topobjdir, '_build_manifests', 'install')
self.assertTrue(os.path.isdir(man_dir))
full = mozpath.join(man_dir, 'tests')
self.assertTrue(os.path.exists(full))
m = InstallManifest(path=full)
# Only mochitest.js should be in the install manifest.
self.assertTrue('testing/mochitest/tests/mochitest.js' in m)
# The path is odd here because we do not normalize at test manifest
# processing time. This is a fragile test because there's currently no
# way to iterate the manifest.
self.assertFalse('instrumentation/./not_packaged.java' in m)
if __name__ == '__main__':
main()
| mpl-2.0 |
Mazecreator/tensorflow | tensorflow/compiler/tests/xla_device_test.py | 76 | 1639 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test cases for XLA devices."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class XlaDeviceTest(test.TestCase):
def testCopies(self):
"""Tests that copies between GPU and XLA devices work."""
if not test.is_gpu_available():
return
with session_lib.Session() as sess:
x = array_ops.placeholder(dtypes.float32, [2])
with ops.device("GPU"):
y = x * 2
with ops.device("device:XLA_CPU:0"):
z = y * y
with ops.device("GPU"):
w = y + z
result = sess.run(w, {x: [1.5, 0.5]})
self.assertAllClose(result, [12., 2.], rtol=1e-3)
if __name__ == "__main__":
test.main()
| apache-2.0 |
datakortet/django-cms | cms/tests/security.py | 11 | 8935 | from __future__ import with_statement
from cms.api import create_page, add_plugin
from cms.models.pluginmodel import CMSPlugin
from cms.plugins.text.models import Text
from cms.test_utils.testcases import (CMSTestCase, URL_CMS_PLUGIN_ADD,
URL_CMS_PLUGIN_EDIT, URL_CMS_PLUGIN_REMOVE)
from django.conf import settings
from django.core.urlresolvers import reverse
class SecurityTests(CMSTestCase):
"""
Test security issues by trying some naive requests to add/alter/delete data.
"""
def get_data(self):
page = create_page("page", "nav_playground.html", "en")
placeholder = page.placeholders.get(slot='body')
superuser = self.get_superuser()
staff = self.get_staff_user_with_no_permissions()
return page, placeholder, superuser, staff
def test_add(self):
"""
Test adding a plugin to a *PAGE*.
"""
page, placeholder, superuser, staff = self.get_data()
plugin_data = {
'plugin_type':"TextPlugin",
'language':settings.LANGUAGES[0][0],
'placeholder':page.placeholders.get(slot="body").pk,
}
self.assertEqual(CMSPlugin.objects.count(), 0)
# log the user out and post the plugin data to the cms add-plugin URL.
self.client.logout()
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
# since the user is not logged in, they should be prompted to log in.
self.assertTemplateUsed(response, 'admin/login.html')
self.assertEqual(CMSPlugin.objects.count(), 0)
# now log a staff user without permissions in and do the same as above.
self.client.login(username='staff', password='staff')
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
# the user is logged in and the security check fails, so it should 403.
self.assertEqual(response.status_code, 403)
self.assertEqual(CMSPlugin.objects.count(), 0)
def test_edit(self):
"""
Test editing a *PAGE* plugin
"""
page, placeholder, superuser, staff = self.get_data()
# create the plugin using a superuser
plugin = add_plugin(placeholder, 'TextPlugin', 'en', body='body')
plugin_data = {
'plugin_id': plugin.pk,
'body': 'newbody',
}
self.assertEqual(plugin.body, 'body') # check the body is as expected.
# log the user out, try to edit the plugin
self.client.logout()
url = URL_CMS_PLUGIN_EDIT + '%s/' % plugin.pk
response = self.client.post(url, plugin_data)
# since the user is not logged in, they should be prompted to log in.
self.assertTemplateUsed(response, 'admin/login.html')
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
# now log a staff user without permissions in and do the same as above.
self.client.login(username='staff', password='staff')
response = self.client.post(url, plugin_data)
# the user is logged in and the security check fails, so it should 403.
self.assertEqual(response.status_code, 403)
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
def test_delete(self):
"""
Test deleting a *PAGE* plugin
"""
page, placeholder, superuser, staff = self.get_data()
plugin = add_plugin(placeholder, 'TextPlugin', 'en', body='body')
plugin_data = {
'plugin_id': plugin.pk,
}
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
# log the user out, try to remove the plugin
self.client.logout()
response = self.client.post(URL_CMS_PLUGIN_REMOVE, plugin_data)
# since the user is not logged in, they should be prompted to log in.
self.assertTemplateUsed(response, 'admin/login.html')
self.assertEqual(CMSPlugin.objects.count(), 1)
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
# now log a staff user without permissions in and do the same as above.
self.client.login(username='staff', password='staff')
response = self.client.post(URL_CMS_PLUGIN_REMOVE, plugin_data)
# the user is logged in and the security check fails, so it should 403.
self.assertEqual(response.status_code, 403)
self.assertEqual(CMSPlugin.objects.count(), 1)
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
def test_add_ph(self):
"""
Test adding a *NON PAGE* plugin
"""
page, placeholder, superuser, staff = self.get_data()
plugin_data = {
'plugin_type':"TextPlugin",
'language':settings.LANGUAGES[0][0],
'placeholder':page.placeholders.get(slot="body").pk,
}
url = reverse('admin:placeholderapp_example1_add_plugin')
self.assertEqual(CMSPlugin.objects.count(), 0)
# log the user out and try to add a plugin using PlaceholderAdmin
self.client.logout()
response = self.client.post(url, plugin_data)
# since the user is not logged in, they should be prompted to log in.
self.assertTemplateUsed(response, 'admin/login.html')
self.assertEqual(CMSPlugin.objects.count(), 0)
# now log a staff user without permissions in and do the same as above.
self.client.login(username='staff', password='staff')
response = self.client.post(url, plugin_data)
# the user is logged in and the security check fails, so it should 403.
self.assertEqual(response.status_code, 403)
self.assertEqual(CMSPlugin.objects.count(), 0)
def test_edit_ph(self):
"""
Test editing a *NON PAGE* plugin
"""
page, placeholder, superuser, staff = self.get_data()
plugin = add_plugin(placeholder, 'TextPlugin', 'en', body='body')
url = reverse('admin:placeholderapp_example1_edit_plugin', args=(plugin.pk,))
plugin_data = {
'body': 'newbody',
'language': 'en',
'plugin_id': plugin.pk,
}
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
# log the user out and try to edit a plugin using PlaceholderAdmin
self.client.logout()
response = self.client.post(url, plugin_data)
# since the user is not logged in, they should be prompted to log in.
self.assertTemplateUsed(response, 'admin/login.html')
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
# now log a staff user without permissions in and do the same as above.
self.client.login(username='staff', password='staff')
response = self.client.post(url, plugin_data)
# the user is logged in and the security check fails, so it should 403.
self.assertEqual(response.status_code, 403)
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
def test_delete_ph(self):
page, placeholder, superuser, staff = self.get_data()
plugin = add_plugin(placeholder, 'TextPlugin', 'en', body='body')
plugin_data = {
'plugin_id': plugin.pk,
}
plugin = self.reload(plugin)
self.assertEqual(plugin.body, 'body')
url = reverse('admin:placeholderapp_example1_remove_plugin')
# log the user out and try to remove a plugin using PlaceholderAdmin
self.client.logout()
response = self.client.post(url, plugin_data)
# since the user is not logged in, they should be prompted to log in.
self.assertTemplateUsed(response, 'admin/login.html')
self.assertEqual(CMSPlugin.objects.count(), 1)
# now log a staff user without permissions in and do the same as above.
self.client.login(username='staff', password='staff')
response = self.client.post(url, plugin_data)
# the user is logged in and the security check fails, so it should 403.
self.assertEqual(response.status_code, 403)
self.assertEqual(CMSPlugin.objects.count(), 1)
def test_text_plugin_xss(self):
page, placeholder, superuser, staff = self.get_data()
with self.login_user_context(superuser):
plugin = add_plugin(placeholder, 'TextPlugin', 'en', body='body')
# ACTUAL TEST STARTS HERE.
data = {
"body": "<div onload='do_evil_stuff();'>divcontent</div><a href='javascript:do_evil_stuff()'>acontent</a>"
}
edit_url = '%s%s/' % (URL_CMS_PLUGIN_EDIT, plugin.pk)
response = self.client.post(edit_url, data)
self.assertEquals(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertEquals(txt.body, '<div>divcontent</div><a>acontent</a>')
| bsd-3-clause |
arkmaxim/grpc | src/python/grpcio_tests/tests/unit/beta/_connectivity_channel_test.py | 15 | 2117 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests of grpc.beta._connectivity_channel."""
import unittest
from grpc.beta import interfaces
class ConnectivityStatesTest(unittest.TestCase):
def testBetaConnectivityStates(self):
self.assertIsNotNone(interfaces.ChannelConnectivity.IDLE)
self.assertIsNotNone(interfaces.ChannelConnectivity.CONNECTING)
self.assertIsNotNone(interfaces.ChannelConnectivity.READY)
self.assertIsNotNone(interfaces.ChannelConnectivity.TRANSIENT_FAILURE)
self.assertIsNotNone(interfaces.ChannelConnectivity.FATAL_FAILURE)
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause |
mzdaniel/oh-mainline | vendor/packages/Django/django/db/backends/mysql/client.py | 524 | 1380 | import os
import sys
from django.db.backends import BaseDatabaseClient
class DatabaseClient(BaseDatabaseClient):
executable_name = 'mysql'
def runshell(self):
settings_dict = self.connection.settings_dict
args = [self.executable_name]
db = settings_dict['OPTIONS'].get('db', settings_dict['NAME'])
user = settings_dict['OPTIONS'].get('user', settings_dict['USER'])
passwd = settings_dict['OPTIONS'].get('passwd', settings_dict['PASSWORD'])
host = settings_dict['OPTIONS'].get('host', settings_dict['HOST'])
port = settings_dict['OPTIONS'].get('port', settings_dict['PORT'])
defaults_file = settings_dict['OPTIONS'].get('read_default_file')
# Seems to be no good way to set sql_mode with CLI.
if defaults_file:
args += ["--defaults-file=%s" % defaults_file]
if user:
args += ["--user=%s" % user]
if passwd:
args += ["--password=%s" % passwd]
if host:
if '/' in host:
args += ["--socket=%s" % host]
else:
args += ["--host=%s" % host]
if port:
args += ["--port=%s" % port]
if db:
args += [db]
if os.name == 'nt':
sys.exit(os.system(" ".join(args)))
else:
os.execvp(self.executable_name, args)
| agpl-3.0 |
stefanhahmann/pybossa | test/test_repository/test_user_repository.py | 6 | 8682 | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2014 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
# Cache global variables for timeouts
from default import Test, db
from nose.tools import assert_raises
from factories import UserFactory
from pybossa.repositories import UserRepository
from pybossa.exc import WrongObjectError, DBIntegrityError
class TestUserRepository(Test):
def setUp(self):
super(TestUserRepository, self).setUp()
self.user_repo = UserRepository(db)
def test_get_return_none_if_no_user(self):
"""Test get method returns None if there is no user with the
specified id"""
user = self.user_repo.get(200)
assert user is None, user
def test_get_returns_user(self):
"""Test get method returns a user if exists"""
user = UserFactory.create()
retrieved_user = self.user_repo.get(user.id)
assert user == retrieved_user, retrieved_user
def test_get_by_name_return_none_if_no_user(self):
"""Test get_by_name returns None when a user with the specified
name does not exist"""
user = self.user_repo.get_by_name('thisuserdoesnotexist')
assert user is None, user
def test_get_by_name_returns_the_user(self):
"""Test get_by_name returns a user if exists"""
user = UserFactory.create()
retrieved_user = self.user_repo.get_by_name(user.name)
assert user == retrieved_user, retrieved_user
def test_get_by(self):
"""Test get_by returns a user with the specified attribute"""
user = UserFactory.create(name='Jon Snow')
retrieved_user = self.user_repo.get_by(name=user.name)
assert user == retrieved_user, retrieved_user
def test_get_by_returns_none_if_no_user(self):
"""Test get_by returns None if no user matches the query"""
UserFactory.create(name='Tyrion Lannister')
user = self.user_repo.get_by(name='no_name')
assert user is None, user
def get_all_returns_list_of_all_users(self):
"""Test get_all returns a list of all the existing users"""
users = UserFactory.create_batch(3)
retrieved_users = self.user_repo.get_all()
assert isinstance(retrieved_users, list)
assert len(retrieved_users) == len(users), retrieved_users
for user in retrieved_users:
assert user in users, user
def test_filter_by_no_matches(self):
"""Test filter_by returns an empty list if no users match the query"""
UserFactory.create(name='reek', fullname='Theon Greyjoy')
retrieved_users = self.user_repo.filter_by(name='asha')
assert isinstance(retrieved_users, list)
assert len(retrieved_users) == 0, retrieved_users
def test_filter_by_one_condition(self):
"""Test filter_by returns a list of users that meet the filtering
condition"""
UserFactory.create_batch(3, locale='es')
should_be_missing = UserFactory.create(locale='fr')
retrieved_users = self.user_repo.filter_by(locale='es')
assert len(retrieved_users) == 3, retrieved_users
assert should_be_missing not in retrieved_users, retrieved_users
def test_filter_by_multiple_conditions(self):
"""Test filter_by supports multiple-condition queries"""
UserFactory.create_batch(2, locale='es', privacy_mode=True)
user = UserFactory.create(locale='es', privacy_mode=False)
retrieved_users = self.user_repo.filter_by(locale='es',
privacy_mode=False)
assert len(retrieved_users) == 1, retrieved_users
assert user in retrieved_users, retrieved_users
def test_filter_by_limit_offset(self):
"""Test that filter_by supports limit and offset options"""
UserFactory.create_batch(4)
all_users = self.user_repo.filter_by()
first_two = self.user_repo.filter_by(limit=2)
last_two = self.user_repo.filter_by(limit=2, offset=2)
assert len(first_two) == 2, first_two
assert len(last_two) == 2, last_two
assert first_two == all_users[:2]
assert last_two == all_users[2:]
def test_search_by_name_returns_list(self):
"""Test search_by_name returns a list with search results"""
search = self.user_repo.search_by_name('')
assert isinstance(search, list), search.__class__
def test_search_by_name(self):
"""Test search_by_name returns a list with the user if searching by
either its name or fullname"""
user = UserFactory.create(name='greenseer', fullname='Jojen Reed')
search_by_name = self.user_repo.search_by_name('greenseer')
search_by_fullname = self.user_repo.search_by_name('Jojen Reed')
assert user in search_by_name, search_by_name
assert user in search_by_fullname, search_by_fullname
def test_search_by_name_capital_lower_letters(self):
"""Test search_by_name works the same with capital or lower letters"""
user_capitals = UserFactory.create(name='JOJEN')
user_lowers = UserFactory.create(name='meera')
search_lower = self.user_repo.search_by_name('jojen')
search_capital = self.user_repo.search_by_name('MEERA')
assert user_capitals in search_lower, search_lower
assert user_lowers in search_capital, search_capital
def test_search_by_name_substrings(self):
"""Test search_by_name works when searching by a substring"""
user = UserFactory.create(name='Hodor')
search = self.user_repo.search_by_name('odo')
assert user in search, search
def test_search_by_name_empty_string(self):
"""Test search_by_name returns an empty list when searching by '' """
user = UserFactory.create(name='Brandon')
search = self.user_repo.search_by_name('')
assert len(search) == 0, search
def test_total_users_no_users(self):
"""Test total_users return 0 if there are no users"""
count = self.user_repo.total_users()
assert count == 0, count
def test_total_users_count(self):
"""Test total_users return 1 if there is one user"""
UserFactory.create()
count = self.user_repo.total_users()
assert count == 1, count
def test_save(self):
"""Test save persist the user"""
user = UserFactory.build()
assert self.user_repo.get(user.id) is None
self.user_repo.save(user)
assert self.user_repo.get(user.id) == user, "User not saved"
def test_save_fails_if_integrity_error(self):
"""Test save raises a DBIntegrityError if the instance to be saved lacks
a required value"""
user = UserFactory.build(name=None)
assert_raises(DBIntegrityError, self.user_repo.save, user)
def test_save_only_saves_users(self):
"""Test save raises a WrongObjectError when an object which is not
a User instance is saved"""
bad_object = dict()
assert_raises(WrongObjectError, self.user_repo.save, bad_object)
def test_update(self):
"""Test update persists the changes made to the user"""
user = UserFactory.create(locale='en')
user.locale = 'it'
self.user_repo.update(user)
updated_user = self.user_repo.get(user.id)
assert updated_user.locale == 'it', updated_user
def test_update_fails_if_integrity_error(self):
"""Test update raises a DBIntegrityError if the instance to be updated
lacks a required value"""
user = UserFactory.create()
user.name = None
assert_raises(DBIntegrityError, self.user_repo.update, user)
def test_update_only_updates_users(self):
"""Test update raises a WrongObjectError when an object which is not
a User instance is updated"""
bad_object = dict()
assert_raises(WrongObjectError, self.user_repo.update, bad_object)
| agpl-3.0 |
albertliangcode/DiceRoll | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/sbcsgroupprober.py | 2936 | 3291 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .sbcharsetprober import SingleByteCharSetProber
from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel,
Latin5CyrillicModel, MacCyrillicModel,
Ibm866Model, Ibm855Model)
from .langgreekmodel import Latin7GreekModel, Win1253GreekModel
from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
from .langthaimodel import TIS620ThaiModel
from .langhebrewmodel import Win1255HebrewModel
from .hebrewprober import HebrewProber
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
SingleByteCharSetProber(Win1251CyrillicModel),
SingleByteCharSetProber(Koi8rModel),
SingleByteCharSetProber(Latin5CyrillicModel),
SingleByteCharSetProber(MacCyrillicModel),
SingleByteCharSetProber(Ibm866Model),
SingleByteCharSetProber(Ibm855Model),
SingleByteCharSetProber(Latin7GreekModel),
SingleByteCharSetProber(Win1253GreekModel),
SingleByteCharSetProber(Latin5BulgarianModel),
SingleByteCharSetProber(Win1251BulgarianModel),
SingleByteCharSetProber(Latin2HungarianModel),
SingleByteCharSetProber(Win1250HungarianModel),
SingleByteCharSetProber(TIS620ThaiModel),
]
hebrewProber = HebrewProber()
logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel,
False, hebrewProber)
visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, True,
hebrewProber)
hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber)
self._mProbers.extend([hebrewProber, logicalHebrewProber,
visualHebrewProber])
self.reset()
| mit |
rsunder10/PopularityBased-SearchEngine | lib/python3.4/site-packages/django/core/cache/backends/locmem.py | 586 | 4287 | "Thread-safe in-memory cache backend."
import time
from contextlib import contextmanager
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.utils.synch import RWLock
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# Global in-memory store of cache data. Keyed by name, to provide
# multiple named local memory caches.
_caches = {}
_expire_info = {}
_locks = {}
@contextmanager
def dummy():
"""A context manager that does nothing special."""
yield
class LocMemCache(BaseCache):
def __init__(self, name, params):
BaseCache.__init__(self, params)
self._cache = _caches.setdefault(name, {})
self._expire_info = _expire_info.setdefault(name, {})
self._lock = _locks.setdefault(name, RWLock())
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
with self._lock.writer():
if self._has_expired(key):
self._set(key, pickled, timeout)
return True
return False
def get(self, key, default=None, version=None, acquire_lock=True):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = None
with (self._lock.reader() if acquire_lock else dummy()):
if not self._has_expired(key):
pickled = self._cache[key]
if pickled is not None:
try:
return pickle.loads(pickled)
except pickle.PickleError:
return default
with (self._lock.writer() if acquire_lock else dummy()):
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return default
def _set(self, key, value, timeout=DEFAULT_TIMEOUT):
if len(self._cache) >= self._max_entries:
self._cull()
self._cache[key] = value
self._expire_info[key] = self.get_backend_timeout(timeout)
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
with self._lock.writer():
self._set(key, pickled, timeout)
def incr(self, key, delta=1, version=None):
with self._lock.writer():
value = self.get(key, version=version, acquire_lock=False)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
key = self.make_key(key, version=version)
pickled = pickle.dumps(new_value, pickle.HIGHEST_PROTOCOL)
self._cache[key] = pickled
return new_value
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.reader():
if not self._has_expired(key):
return True
with self._lock.writer():
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return False
def _has_expired(self, key):
exp = self._expire_info.get(key, -1)
if exp is None or exp > time.time():
return False
return True
def _cull(self):
if self._cull_frequency == 0:
self.clear()
else:
doomed = [k for (i, k) in enumerate(self._cache) if i % self._cull_frequency == 0]
for k in doomed:
self._delete(k)
def _delete(self, key):
try:
del self._cache[key]
except KeyError:
pass
try:
del self._expire_info[key]
except KeyError:
pass
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.writer():
self._delete(key)
def clear(self):
self._cache.clear()
self._expire_info.clear()
| mit |
dony71/OpenWrt_Backfire | scripts/dl_cleanup.py | 31 | 5878 | #!/usr/bin/env python
"""
# OpenWRT download directory cleanup utility.
# Delete all but the very last version of the program tarballs.
#
# Copyright (c) 2010 Michael Buesch <[email protected]>
"""
import sys
import os
import re
import getopt
# Commandline options
opt_dryrun = False
def parseVer_1234(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32) |\
(int(match.group(5)) << 16)
return (progname, progversion)
def parseVer_123(match, filepath):
progname = match.group(1)
try:
patchlevel = match.group(5)
except (IndexError), e:
patchlevel = None
if patchlevel:
patchlevel = ord(patchlevel[0])
else:
patchlevel = 0
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32) |\
patchlevel
return (progname, progversion)
def parseVer_12(match, filepath):
progname = match.group(1)
try:
patchlevel = match.group(4)
except (IndexError), e:
patchlevel = None
if patchlevel:
patchlevel = ord(patchlevel[0])
else:
patchlevel = 0
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
patchlevel
return (progname, progversion)
def parseVer_r(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64)
return (progname, progversion)
def parseVer_ymd(match, filepath):
progname = match.group(1)
progversion = (int(match.group(2)) << 64) |\
(int(match.group(3)) << 48) |\
(int(match.group(4)) << 32)
return (progname, progversion)
def parseVer_GIT(match, filepath):
progname = match.group(1)
st = os.stat(filepath)
progversion = int(st.st_mtime) << 64
return (progname, progversion)
extensions = (
".tar.gz",
".tar.bz2",
".orig.tar.gz",
".orig.tar.bz2",
".zip",
".tgz",
".tbz",
)
versionRegex = (
(re.compile(r"(.+)[-_]([0-9a-fA-F]{40,40})"), parseVer_GIT), # xxx-GIT_SHASUM
(re.compile(r"(.+)[-_](\d+)\.(\d+)\.(\d+)\.(\d+)"), parseVer_1234), # xxx-1.2.3.4
(re.compile(r"(.+)[-_](\d\d\d\d)-?(\d\d)-?(\d\d)"), parseVer_ymd), # xxx-YYYY-MM-DD
(re.compile(r"(.+)[-_](\d+)\.(\d+)\.(\d+)(\w?)"), parseVer_123), # xxx-1.2.3a
(re.compile(r"(.+)[-_](\d+)_(\d+)_(\d+)"), parseVer_123), # xxx-1_2_3
(re.compile(r"(.+)[-_](\d+)\.(\d+)(\w?)"), parseVer_12), # xxx-1.2a
(re.compile(r"(.+)[-_]r?(\d+)"), parseVer_r), # xxx-r1111
)
blacklist = [
("linux", re.compile(r"linux-.*")),
("gcc", re.compile(r"gcc-.*")),
("wl_apsta", re.compile(r"wl_apsta.*")),
(".fw", re.compile(r".*\.fw")),
(".arm", re.compile(r".*\.arm")),
(".bin", re.compile(r".*\.bin")),
("rt-firmware", re.compile(r"RT[\d\w]+_Firmware.*")),
]
class EntryParseError(Exception): pass
class Entry:
def __init__(self, directory, filename):
self.directory = directory
self.filename = filename
self.progname = ""
self.fileext = ""
for ext in extensions:
if filename.endswith(ext):
filename = filename[0:0-len(ext)]
self.fileext = ext
break
else:
print self.filename, "has an unknown file-extension"
raise EntryParseError("ext")
for (regex, parseVersion) in versionRegex:
match = regex.match(filename)
if match:
(self.progname, self.version) = parseVersion(
match, directory + "/" + filename + self.fileext)
break
else:
print self.filename, "has an unknown version pattern"
raise EntryParseError("ver")
def deleteFile(self):
path = (self.directory + "/" + self.filename).replace("//", "/")
print "Deleting", path
if not opt_dryrun:
os.unlink(path)
def __eq__(self, y):
return self.filename == y.filename
def __ge__(self, y):
return self.version >= y.version
def usage():
print "OpenWRT download directory cleanup utility"
print "Usage: " + sys.argv[0] + " [OPTIONS] <path/to/dl>"
print ""
print " -d|--dry-run Do a dry-run. Don't delete any files"
print " -B|--show-blacklist Show the blacklist and exit"
print " -w|--whitelist ITEM Remove ITEM from blacklist"
def main(argv):
global opt_dryrun
try:
(opts, args) = getopt.getopt(argv[1:],
"hdBw:",
[ "help", "dry-run", "show-blacklist", "whitelist=", ])
if len(args) != 1:
raise getopt.GetoptError()
except getopt.GetoptError:
usage()
return 1
directory = args[0]
for (o, v) in opts:
if o in ("-h", "--help"):
usage()
return 0
if o in ("-d", "--dry-run"):
opt_dryrun = True
if o in ("-w", "--whitelist"):
for i in range(0, len(blacklist)):
(name, regex) = blacklist[i]
if name == v:
del blacklist[i]
break
else:
print "Whitelist error: Item", v,\
"is not in blacklist"
return 1
if o in ("-B", "--show-blacklist"):
for (name, regex) in blacklist:
print name
return 0
# Create a directory listing and parse the file names.
entries = []
for filename in os.listdir(directory):
if filename == "." or filename == "..":
continue
for (name, regex) in blacklist:
if regex.match(filename):
if opt_dryrun:
print filename, "is blacklisted"
break
else:
try:
entries.append(Entry(directory, filename))
except (EntryParseError), e: pass
# Create a map of programs
progmap = {}
for entry in entries:
if entry.progname in progmap.keys():
progmap[entry.progname].append(entry)
else:
progmap[entry.progname] = [entry,]
# Traverse the program map and delete everything but the last version
for prog in progmap:
lastVersion = None
versions = progmap[prog]
for version in versions:
if lastVersion is None or version >= lastVersion:
lastVersion = version
if lastVersion:
for version in versions:
if version != lastVersion:
version.deleteFile()
if opt_dryrun:
print "Keeping", lastVersion.filename
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| gpl-2.0 |
weimingtom/python-for-android | python3-alpha/python3-src/Lib/plat-linux3/DLFCN.py | 171 | 1625 | # Generated by h2py from /usr/include/dlfcn.h
_DLFCN_H = 1
# Included from features.h
_FEATURES_H = 1
__USE_ANSI = 1
__FAVOR_BSD = 1
_ISOC99_SOURCE = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 199506
_XOPEN_SOURCE = 600
_XOPEN_SOURCE_EXTENDED = 1
_LARGEFILE64_SOURCE = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
_BSD_SOURCE = 1
_SVID_SOURCE = 1
__USE_ISOC99 = 1
_POSIX_SOURCE = 1
_POSIX_C_SOURCE = 2
_POSIX_C_SOURCE = 199506
__USE_POSIX = 1
__USE_POSIX2 = 1
__USE_POSIX199309 = 1
__USE_POSIX199506 = 1
__USE_XOPEN = 1
__USE_XOPEN_EXTENDED = 1
__USE_UNIX98 = 1
_LARGEFILE_SOURCE = 1
__USE_XOPEN2K = 1
__USE_ISOC99 = 1
__USE_XOPEN_EXTENDED = 1
__USE_LARGEFILE = 1
__USE_LARGEFILE64 = 1
__USE_FILE_OFFSET64 = 1
__USE_MISC = 1
__USE_BSD = 1
__USE_SVID = 1
__USE_GNU = 1
__USE_REENTRANT = 1
__STDC_IEC_559__ = 1
__STDC_IEC_559_COMPLEX__ = 1
__STDC_ISO_10646__ = 200009
__GNU_LIBRARY__ = 6
__GLIBC__ = 2
__GLIBC_MINOR__ = 2
# Included from sys/cdefs.h
_SYS_CDEFS_H = 1
def __PMT(args): return args
def __P(args): return args
def __PMT(args): return args
def __STRING(x): return #x
__flexarr = []
__flexarr = [0]
__flexarr = []
__flexarr = [1]
def __ASMNAME(cname): return __ASMNAME2 (__USER_LABEL_PREFIX__, cname)
def __attribute__(xyz): return
def __attribute_format_arg__(x): return __attribute__ ((__format_arg__ (x)))
def __attribute_format_arg__(x): return
__USE_LARGEFILE = 1
__USE_LARGEFILE64 = 1
__USE_EXTERN_INLINES = 1
# Included from gnu/stubs.h
# Included from bits/dlfcn.h
RTLD_LAZY = 0x00001
RTLD_NOW = 0x00002
RTLD_BINDING_MASK = 0x3
RTLD_NOLOAD = 0x00004
RTLD_GLOBAL = 0x00100
RTLD_LOCAL = 0
RTLD_NODELETE = 0x01000
| apache-2.0 |
alexproca/askbot-devel | askbot/management/commands/askbot_rebuild_index.py | 9 | 4020 | import sys
from optparse import make_option
from django.core.management import get_commands, load_command_class
from django.utils.translation import activate as activate_language
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
try:
from haystack.management.commands.clear_index import Command as ClearCommand
from haystack.management.commands.update_index import Command as UpdateCommand
haystack_option_list = [option for option in UpdateCommand.base_options if option.get_opt_string() != '--verbosity'] + \
[option for option in ClearCommand.base_options if not option.get_opt_string() in ['--using', '--verbosity']]
except ImportError:
haystack_option_list = []
class Command(BaseCommand):
help = "Completely rebuilds the search index by removing the old data and then updating."
base_options = [make_option("-l", "--language", action="store", type="string", dest="language",
help='Language to user, in language code format'),]
option_list = list(BaseCommand.option_list) + haystack_option_list + base_options
def handle(self, *args, **options):
lang_code = options.get('language', settings.LANGUAGE_CODE.lower())
options['using'] = ['default_%s' % lang_code[:2],]
activate_language(lang_code)
klass = self._get_command_class('clear_index')
klass.handle(*args, **options)
klass = self._get_command_class('update_index')
klass.handle(*args, **options)
def _get_command_class(self, name):
try:
app_name = get_commands()[name]
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, name)
except KeyError:
raise CommandError("Unknown command: %r" % name)
return klass
def execute(self, *args, **options):
"""
Try to execute this command, performing model validation if
needed (as controlled by the attribute
``self.requires_model_validation``). If the command raises a
``CommandError``, intercept it and print it sensibly to
stderr.
"""
show_traceback = options.get('traceback', False)
if self.can_import_settings:
try:
#language part used to be here
pass
except ImportError, e:
# If settings should be available, but aren't,
# raise the error and quit.
if show_traceback:
traceback.print_exc()
else:
sys.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
try:
self.stdout = options.get('stdout', sys.stdout)
self.stderr = options.get('stderr', sys.stderr)
if self.requires_model_validation:
self.validate()
output = self.handle(*args, **options)
if output:
if self.output_transaction:
# This needs to be imported here, because it relies on
# settings.
from django.db import connections, DEFAULT_DB_ALIAS
connection = connections[options.get('database', DEFAULT_DB_ALIAS)]
if connection.ops.start_transaction_sql():
self.stdout.write(self.style.SQL_KEYWORD(connection.ops.start_transaction_sql()) + '\n')
self.stdout.write(output)
if self.output_transaction:
self.stdout.write('\n' + self.style.SQL_KEYWORD("COMMIT;") + '\n')
except CommandError, e:
if show_traceback:
traceback.print_exc()
else:
self.stderr.write(smart_str(self.style.ERROR('Error: %s\n' % e)))
sys.exit(1)
| gpl-3.0 |
oceanobservatories/mi-instrument | mi/dataset/parser/test/test_cg_stc_eng_stc.py | 5 | 16780 | #!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_cg_stc_eng_stc
@file marine-integrations/mi/dataset/parser/test/test_cg_stc_eng_stc.py
@author Mike Nicoletti
@brief Test code for a Cg_stc_eng_stc data parser
"""
import os
import re
import ntplib
from nose.plugins.attrib import attr
from mi.core.exceptions import SampleException
from mi.core.instrument.dataset_data_particle import DataParticleKey
from mi.core.log import get_logger
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.parser.cg_stc_eng_stc import CgStcEngStcParser, CgStcEngStcParserDataParticle
from mi.dataset.parser.cg_stc_eng_stc import CgStcEngStcParserDataParticleKey
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.dataset.driver.cg_stc_eng.stc.resource import RESOURCE_PATH
log = get_logger()
@attr('UNIT', group='mi')
class CgParserUnitTestCase(ParserUnitTestCase):
"""
Cg_stc_eng_stc Parser unit test suite
"""
def state_callback(self, state, file_ingested):
""" Call back method to watch what comes in via the position callback """
self.state_callback_value = state
self.file_ingested_value = file_ingested
def pub_callback(self, pub):
""" Call back method to watch what comes in via the publish callback """
self.publish_callback_value = pub
def exception_callback(self, exception):
""" Callback method to watch what comes in via the exception callback """
self.exception_callback_value = exception
def setUp(self):
ParserUnitTestCase.setUp(self)
self.config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.cg_stc_eng_stc',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'CgStcEngStcParserDataParticle'
}
# Define test data particles and their associated timestamps which will be
# compared with returned results
fid = open(os.path.join(RESOURCE_PATH, 'stc_status.txt'))
data = fid.read()
fid.close()
utime_grp = re.search(r'Platform.utime=(.+?)(\r\n?|\n)', data)
self.timestamp_a = ntplib.system_to_ntp_time(float(utime_grp.group(1)))
self.particle_a = CgStcEngStcParserDataParticle(data,
internal_timestamp=self.timestamp_a,
preferred_timestamp=DataParticleKey.INTERNAL_TIMESTAMP)
self.comparison_list = [{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_PLATFORM_TIME,
DataParticleKey.VALUE: '2013/10/04 16:07:02.253'},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_PLATFORM_UTIME,
DataParticleKey.VALUE: 1380902822.253},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_C_GPS,
DataParticleKey.VALUE: 83},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_C_NTP,
DataParticleKey.VALUE: 0},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_C_PPS,
DataParticleKey.VALUE: 4},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_C_POWER_SYS,
DataParticleKey.VALUE: 0},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_C_SUPERV,
DataParticleKey.VALUE: 7},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MSG_CNTS_C_TELEM,
DataParticleKey.VALUE: 0},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_ERR_C_GPS,
DataParticleKey.VALUE: 1},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_ERR_C_PPS,
DataParticleKey.VALUE: 1},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_ERR_C_TELEM_SYS,
DataParticleKey.VALUE: 3},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_C_GPS,
DataParticleKey.VALUE: '***Warning, BAD GPS CHECKSUM'},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_C_PPS,
DataParticleKey.VALUE: 'C_PPS: Warning: Pulse delta [790] above warning level [500], still within window [900]'},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_ERRMSG_C_TELEM_SYS,
DataParticleKey.VALUE: ' "***Error turning on fb1 [ret=No Device]'},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_CPU_UPTIME,
DataParticleKey.VALUE: '0 days 00:01:22'},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_CPU_LOAD1,
DataParticleKey.VALUE: 1.03},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_CPU_LOAD5,
DataParticleKey.VALUE: 0.36},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_CPU_LOAD15,
DataParticleKey.VALUE: 0.12},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MEMORY_RAM,
DataParticleKey.VALUE: 127460},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MEMORY_FREE,
DataParticleKey.VALUE: 93396},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_NPROC,
DataParticleKey.VALUE: 76},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_EFLAG,
DataParticleKey.VALUE: 0},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_MAIN_V,
DataParticleKey.VALUE: 17.90},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_MAIN_C,
DataParticleKey.VALUE: 379.20},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_BAT_V,
DataParticleKey.VALUE: 0.0},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_BAT_C,
DataParticleKey.VALUE: 0.0},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_TEMP1,
DataParticleKey.VALUE: 25.0},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_TEMP2,
DataParticleKey.VALUE: 23.3},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_HUMID,
DataParticleKey.VALUE: 31.6},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_PRESS,
DataParticleKey.VALUE: 14.7},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_GF_ENA,
DataParticleKey.VALUE: 15},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_GFLT1,
DataParticleKey.VALUE: 7.7},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_GFLT2,
DataParticleKey.VALUE: 5.2},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_GFLT3,
DataParticleKey.VALUE: 2.8},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_GFLT4,
DataParticleKey.VALUE: 4.0},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_MPIC_LD_ENA,
DataParticleKey.VALUE: 3},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_GPS_DATE,
DataParticleKey.VALUE: 41013},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_GPS_TIME,
DataParticleKey.VALUE: 160701},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_GPS_LATSTR,
DataParticleKey.VALUE: '4132.1353 N'},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_GPS_LONSTR,
DataParticleKey.VALUE: '07038.8306 W'},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_GPS_LAT,
DataParticleKey.VALUE: 41.535588},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_GPS_LON,
DataParticleKey.VALUE: -70.647177},
{DataParticleKey.VALUE_ID: CgStcEngStcParserDataParticleKey.CG_ENG_GPS_SPD,
DataParticleKey.VALUE: 0.0}]
# uncomment the following to write the above comparison list in yml format to a file
# self.write_comparison_to_yml()
self.file_ingested_value = None
self.state_callback_value = None
self.publish_callback_value = None
self.exception_callback_value = None
def write_comparison_to_yml(self):
"""
Helper class to create a yml file for driver tests
"""
fid = open('particle.yml', 'a')
fid.write('header:\n')
fid.write(' particle_object: CgStcEngStcParserDataParticle\n')
fid.write(' particle_type: cg_stc_eng_stc\n')
fid.write('data:\n')
fid.write(' - _index: 1\n')
fid.write(' internal_timestamp: 0.0\n')
for item in self.comparison_list:
if isinstance(item.get('value'), float):
fid.write(' %s: %16.20f\n' % (item.get('value_id'), item.get('value')))
else:
fid.write(' %s: %s\n' % (item.get('value_id'), item.get('value')))
fid.close()
def assert_result(self, result, particle, ingested):
if result[0].raw_data == particle.raw_data:
log.debug("raw data match")
log.debug("comparing result %s, particle %s", result[0].contents, particle.contents)
self.assertEqual(result, [particle])
self.assertEqual(self.file_ingested_value, ingested)
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], particle)
def test_simple(self):
"""
Read test data and pull out data particles one at a time.
Assert that the results are those we expected.
"""
stream_handle = open(os.path.join(RESOURCE_PATH, 'stc_status.txt'))
self.parser = CgStcEngStcParser(self.config, None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
result = self.parser.get_records(1)
self.assert_result(result, self.particle_a, True)
# no data left, do not move the position
result = self.parser.get_records(1)
self.assertEqual(result, [])
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], self.particle_a)
self.assertEqual(self.exception_callback_value, None)
def test_simple_particles(self):
"""
Read test data and pull out data particles one at a time.
Assert that the results are those we expected.
"""
stream_handle = open(os.path.join(RESOURCE_PATH, 'stc_status2.txt'))
self.parser = CgStcEngStcParser(self.config, None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
result = self.parser.get_records(1)
self.assert_particles(result, 'stc_first2.result.yml', RESOURCE_PATH)
self.assertEqual(self.exception_callback_value, None)
def test_get_many(self):
"""
Read test data and try to pull out multiple data particles at one time,
but we should only get 1 .
Assert that the results are those we expected.
"""
stream_handle = open(os.path.join(RESOURCE_PATH, 'stc_status.txt'))
self.parser = CgStcEngStcParser(self.config, None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
result = self.parser.get_records(4)
self.assert_result(result, self.particle_a, True)
self.assertEqual(len(self.publish_callback_value), 1)
# no data left, do not move the position
result = self.parser.get_records(1)
self.assertEqual(result, [])
self.assert_(isinstance(self.publish_callback_value, list))
self.assertEqual(self.publish_callback_value[0], self.particle_a)
self.assertEqual(self.exception_callback_value, None)
def test_generate(self):
"""
Ensure we can generate the particle dictionary and compare it to expected ones
"""
stream_handle = open(os.path.join(RESOURCE_PATH, 'stc_status.txt'))
self.parser = CgStcEngStcParser(self.config, None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
result = self.parser.get_records(1)
res_dict = result[0].generate_dict()
# assert two lists of generated dictionaries are the same
for cdict in self.comparison_list:
for rdict in res_dict['values']:
if cdict.get('value_id') == rdict.get('value_id'):
if cdict.get('value') != rdict.get('value'):
log.error("mismatch for key %s, values '%s' '%s'", cdict.get('value_id'),
cdict.get('value'),
rdict.get('value'))
self.fail("mismatch for key %s, values '%s', '%s'" % (cdict.get('value_id'),
cdict.get('value'),
rdict.get('value')))
def test_bad_data(self):
"""
Ensure that the missing timestamp field causes a sample exception
"""
with self.assertRaises(SampleException):
stream_handle = open(os.path.join(RESOURCE_PATH, 'stc_status_missing_time.txt'))
self.parser = CgStcEngStcParser(self.config, None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
self.parser.get_records(1)
def test_encoding(self):
"""
Create an encoding error in the data and make sure an encoding error shows up
"""
stream_handle = open(os.path.join(RESOURCE_PATH, 'stc_status_bad_encode.txt'))
self.parser = CgStcEngStcParser(self.config, None, stream_handle,
self.state_callback, self.pub_callback,
self.exception_callback)
result = self.parser.get_records(1)
errors = result[0].get_encoding_errors()
log.debug("encoding errors: %s", errors)
self.assertNotEqual(errors, [])
| bsd-2-clause |
foss-for-synopsys-dwc-arc-processors/RIOT | tests/lwip/tests/01-run.py | 15 | 11444 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2016 Martine Lenders <[email protected]>
#
# Distributed under terms of the MIT license.
from __future__ import print_function
import argparse
import os, sys
import random
import pexpect
import subprocess
import time
import types
DEFAULT_TIMEOUT = 5
class Strategy(object):
def __init__(self, func=None):
if func != None:
if sys.version_info < (3,):
self.__class__.execute = types.MethodType(func, self, self.__class__)
else:
self.__class__.execute = types.MethodType(func, self)
def execute(self, *args, **kwargs):
raise NotImplementedError()
class ApplicationStrategy(Strategy):
def __init__(self, app_dir=os.getcwd(), func=None):
super(ApplicationStrategy, self).__init__(func)
self.app_dir = app_dir
class BoardStrategy(Strategy):
def __init__(self, board, func=None):
super(BoardStrategy, self).__init__(func)
self.board = board
def __run_make(self, application, make_targets, env=None):
env = os.environ.copy()
if env != None:
env.update(env)
env.update(self.board.to_env())
cmd = ("make", "-C", application) + make_targets
print(' '.join(cmd))
print(subprocess.check_output(cmd, env=env))
def execute(self, application):
super(BoardStrategy, self).execute(application)
class CleanStrategy(BoardStrategy):
def execute(self, application, env=None):
super(CleanStrategy, self).__run_make(application, ("-B", "clean"), env)
class BuildStrategy(BoardStrategy):
def execute(self, application, env=None):
super(BuildStrategy, self).__run_make(application, ("all",), env)
class FlashStrategy(BoardStrategy):
def execute(self, application, env=None):
super(FlashStrategy, self).__run_make(application, ("all",), env)
class ResetStrategy(BoardStrategy):
def execute(self, application, env=None):
super(ResetStrategy, self).__run_make(application, ("reset",), env)
class Board(object):
def __init__(self, name, port=None, serial=None, clean=None,
build=None, flash=None,
reset=None, term=None):
def _reset_native_execute(obj, application, env=None, *args, **kwargs):
pass
if (name == "native") and (reset == None):
reset = _reset_native_execute
self.name = name
self.port = port
self.serial = serial
self.clean_strategy = CleanStrategy(self, clean)
self.build_strategy = BuildStrategy(self, build)
self.flash_strategy = FlashStrategy(self, flash)
self.reset_strategy = ResetStrategy(self, reset)
def __len__(self):
return 1
def __iter__(self):
return self
def next(self):
raise StopIteration()
def __repr__(self):
return ("<Board %s,port=%s,serial=%s>" %
(repr(self.name), repr(self.port), repr(self.serial)))
def to_env(self):
env = {}
if self.name:
env['BOARD'] = self.name
if self.port:
env['PORT'] = self.port
if self.serial:
env['SERIAL'] = self.serial
return env
def clean(self, application=os.getcwd(), env=None):
self.build_strategy.execute(application, env)
def build(self, application=os.getcwd(), env=None):
self.build_strategy.execute(application, env)
def flash(self, application=os.getcwd(), env=None):
self.flash_strategy.execute(application, env)
def reset(self, application=os.getcwd(), env=None):
self.reset_strategy.execute(application, env)
class BoardGroup(object):
def __init__(self, boards):
self.boards = boards
def __len__(self):
return len(self.boards)
def __iter__(self):
return iter(self.boards)
def __repr__(self):
return str(self.boards)
def clean(self, application=os.getcwd(), env=None):
for board in self.boards:
board.clean(application, env)
def build(self, application=os.getcwd(), env=None):
for board in self.boards:
board.build(application, env)
def flash(self, application=os.getcwd(), env=None):
for board in self.boards:
board.flash(application, env)
def reset(self, application=os.getcwd(), env=None):
for board in self.boards:
board.reset(application, env)
def default_test_case(board_group, application, env=None):
for board in board_group:
env = os.environ.copy()
if env != None:
env.update(env)
env.update(board.to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env,
timeout=DEFAULT_TIMEOUT,
logfile=sys.stdout) as spawn:
spawn.expect("TEST: SUCCESS")
class TestStrategy(ApplicationStrategy):
def execute(self, board_groups, test_cases=[default_test_case],
timeout=DEFAULT_TIMEOUT, env=None):
for board_group in board_groups:
print("Testing for %s: " % board_group)
for test_case in test_cases:
board_group.reset()
test_case(board_group, self.app_dir, env=None)
sys.stdout.write('.')
sys.stdout.flush()
print()
def get_ipv6_address(spawn):
spawn.sendline(u"ifconfig")
spawn.expect(u"[A-Za-z0-9]{2}_[0-9]+: inet6 (fe80::[0-9a-f:]+)")
return spawn.match.group(1)
def test_ipv6_send(board_group, application, env=None):
env_sender = os.environ.copy()
if env != None:
env_sender.update(env)
env_sender.update(board_group.boards[0].to_env())
env_receiver = os.environ.copy()
if env != None:
env_receiver.update(env)
env_receiver.update(board_group.boards[1].to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env_sender,
timeout=DEFAULT_TIMEOUT) as sender, \
pexpect.spawnu("make", ["-C", application, "term"], env=env_receiver,
timeout=DEFAULT_TIMEOUT) as receiver:
ipprot = random.randint(0x00, 0xff)
receiver_ip = get_ipv6_address(receiver)
receiver.sendline(u"ip server start %d" % ipprot)
# wait for neighbor discovery to be done
time.sleep(5)
sender.sendline(u"ip send %s %d 01:23:45:67:89:ab:cd:ef" % (receiver_ip, ipprot))
sender.expect_exact(u"Success: send 8 byte over IPv6 to %s (next header: %d)" %
(receiver_ip, ipprot))
receiver.expect(u"00000000 01 23 45 67 89 AB CD EF")
def test_udpv6_send(board_group, application, env=None):
env_sender = os.environ.copy()
if env != None:
env_sender.update(env)
env_sender.update(board_group.boards[0].to_env())
env_receiver = os.environ.copy()
if env != None:
env_receiver.update(env)
env_receiver.update(board_group.boards[1].to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env_sender,
timeout=DEFAULT_TIMEOUT) as sender, \
pexpect.spawnu("make", ["-C", application, "term"], env=env_receiver,
timeout=DEFAULT_TIMEOUT) as receiver:
port = random.randint(0x0000, 0xffff)
receiver_ip = get_ipv6_address(receiver)
receiver.sendline(u"udp server start %d" % port)
# wait for neighbor discovery to be done
time.sleep(5)
sender.sendline(u"udp send %s %d ab:cd:ef" % (receiver_ip, port))
sender.expect_exact(u"Success: send 3 byte over UDP to [%s]:%d" %
(receiver_ip, port))
receiver.expect(u"00000000 AB CD EF")
def test_tcpv6_send(board_group, application, env=None):
env_client = os.environ.copy()
if env != None:
env_client.update(env)
env_client.update(board_group.boards[0].to_env())
env_server = os.environ.copy()
if env != None:
env_server.update(env)
env_server.update(board_group.boards[1].to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env_client,
timeout=DEFAULT_TIMEOUT) as client, \
pexpect.spawnu("make", ["-C", application, "term"], env=env_server,
timeout=DEFAULT_TIMEOUT) as server:
port = random.randint(0x0000, 0xffff)
server_ip = get_ipv6_address(server)
client_ip = get_ipv6_address(client)
server.sendline(u"tcp server start %d" % port)
# wait for neighbor discovery to be done
time.sleep(5)
client.sendline(u"tcp connect %s %d" % (server_ip, port))
server.expect(u"TCP client \\[%s\\]:[0-9]+ connected" % client_ip)
client.sendline(u"tcp send affe:abe")
client.expect_exact(u"Success: send 4 byte over TCP to server")
server.expect(u"00000000 AF FE AB E0")
client.sendline(u"tcp disconnect")
client.sendline(u"tcp send affe:abe")
client.expect_exact(u"could not send")
def test_triple_send(board_group, application, env=None):
env_sender = os.environ.copy()
if env != None:
env_sender.update(env)
env_sender.update(board_group.boards[0].to_env())
env_receiver = os.environ.copy()
if env != None:
env_receiver.update(env)
env_receiver.update(board_group.boards[1].to_env())
with pexpect.spawnu("make", ["-C", application, "term"], env=env_sender,
timeout=DEFAULT_TIMEOUT) as sender, \
pexpect.spawnu("make", ["-C", application, "term"], env=env_receiver,
timeout=DEFAULT_TIMEOUT) as receiver:
udp_port = random.randint(0x0000, 0xffff)
tcp_port = random.randint(0x0000, 0xffff)
ipprot = random.randint(0x00, 0xff)
receiver_ip = get_ipv6_address(receiver)
sender_ip = get_ipv6_address(sender)
receiver.sendline(u"ip server start %d" % ipprot)
receiver.sendline(u"udp server start %d" % udp_port)
receiver.sendline(u"tcp server start %d" % tcp_port)
# wait for neighbor discovery to be done
time.sleep(5)
sender.sendline(u"udp send %s %d 01:23" % (receiver_ip, udp_port))
sender.expect_exact(u"Success: send 2 byte over UDP to [%s]:%d" %
(receiver_ip, udp_port))
receiver.expect(u"00000000 01 23")
sender.sendline(u"ip send %s %d 01:02:03:04" % (receiver_ip, ipprot))
sender.expect_exact(u"Success: send 4 byte over IPv6 to %s (next header: %d)" %
(receiver_ip, ipprot))
receiver.expect(u"00000000 01 02 03 04")
sender.sendline(u"tcp connect %s %d" % (receiver_ip, tcp_port))
receiver.expect(u"TCP client \\[%s\\]:[0-9]+ connected" % sender_ip)
sender.sendline(u"tcp send dead:beef")
sender.expect_exact(u"Success: send 4 byte over TCP to server")
receiver.expect(u"00000000 DE AD BE EF")
if __name__ == "__main__":
del os.environ['TERMFLAGS']
TestStrategy().execute([BoardGroup((Board("native", "tap0"), \
Board("native", "tap1")))], \
[test_ipv6_send, test_udpv6_send, test_tcpv6_send,
test_triple_send])
| lgpl-2.1 |
UstadMobile/exelearning-extjs5-mirror | twisted/plugins/twisted_trial.py | 14 | 1880 |
from zope.interface import implements
from twisted.python.components import backwardsCompatImplements
from twisted.trial.itrial import IReporter
from twisted.plugin import IPlugin
class _Reporter(object):
implements(IPlugin, IReporter)
def __init__(self, name, module, description, longOpt, shortOpt, klass):
self.name = name
self.module = module
self.description = description
self.longOpt = longOpt
self.shortOpt = shortOpt
self.klass = klass
backwardsCompatImplements(_Reporter)
Tree = _Reporter("Tree Reporter",
"twisted.trial.reporter",
description="verbose color output (default reporter)",
longOpt="verbose",
shortOpt="v",
klass="TreeReporter")
BlackAndWhite = _Reporter("Black-And-White Reporter",
"twisted.trial.reporter",
description="Colorless verbose output",
longOpt="bwverbose",
shortOpt="o",
klass="VerboseTextReporter")
Minimal = _Reporter("Minimal Reporter",
"twisted.trial.reporter",
description="minimal summary output",
longOpt="summary",
shortOpt="s",
klass="MinimalReporter")
Classic = _Reporter("Classic Reporter",
"twisted.trial.reporter",
description="terse text output",
longOpt="text",
shortOpt="t",
klass="TextReporter")
Timing = _Reporter("Timing Reporter",
"twisted.trial.reporter",
description="Timing output",
longOpt="timing",
shortOpt=None,
klass="TimingTextReporter")
| gpl-2.0 |
wanderine/nipype | nipype/interfaces/slicer/filtering/histogrammatching.py | 15 | 2978 | # -*- coding: utf8 -*-
"""Autogenerated file - DO NOT EDIT
If you spot a bug, please report it on the mailing list and/or change the generator."""
from nipype.interfaces.base import CommandLine, CommandLineInputSpec, SEMLikeCommandLine, TraitedSpec, File, Directory, traits, isdefined, InputMultiPath, OutputMultiPath
import os
class HistogramMatchingInputSpec(CommandLineInputSpec):
numberOfHistogramLevels = traits.Int(desc="The number of hisogram levels to use", argstr="--numberOfHistogramLevels %d")
numberOfMatchPoints = traits.Int(desc="The number of match points to use", argstr="--numberOfMatchPoints %d")
threshold = traits.Bool(desc="If on, only pixels above the mean in each volume are thresholded.", argstr="--threshold ")
inputVolume = File(position=-3, desc="Input volume to be filtered", exists=True, argstr="%s")
referenceVolume = File(position=-2, desc="Input volume whose histogram will be matched", exists=True, argstr="%s")
outputVolume = traits.Either(traits.Bool, File(), position=-1, hash_files=False, desc="Output volume. This is the input volume with intensities matched to the reference volume.", argstr="%s")
class HistogramMatchingOutputSpec(TraitedSpec):
outputVolume = File(position=-1, desc="Output volume. This is the input volume with intensities matched to the reference volume.", exists=True)
class HistogramMatching(SEMLikeCommandLine):
"""title: Histogram Matching
category: Filtering
description: Normalizes the grayscale values of a source image based on the grayscale values of a reference image. This filter uses a histogram matching technique where the histograms of the two images are matched only at a specified number of quantile values.
The filter was orginally designed to normalize MR images of the sameMR protocol and same body part. The algorithm works best if background pixels are excluded from both the source and reference histograms. A simple background exclusion method is to exclude all pixels whose grayscale values are smaller than the mean grayscale value. ThresholdAtMeanIntensity switches on this simple background exclusion method.
Number of match points governs the number of quantile values to be matched.
The filter assumes that both the source and reference are of the same type and that the input and output image type have the same number of dimension and have scalar pixel types.
version: 0.1.0.$Revision: 19608 $(alpha)
documentation-url: http://wiki.slicer.org/slicerWiki/index.php/Documentation/4.1/Modules/HistogramMatching
contributor: Bill Lorensen (GE)
acknowledgements: This work is part of the National Alliance for Medical Image Computing (NAMIC), funded by the National Institutes of Health through the NIH Roadmap for Medical Research, Grant U54 EB005149.
"""
input_spec = HistogramMatchingInputSpec
output_spec = HistogramMatchingOutputSpec
_cmd = "HistogramMatching "
_outputs_filenames = {'outputVolume':'outputVolume.nii'}
| bsd-3-clause |
thomazs/geraldo | site/newsite/site-geraldo/django/db/models/__init__.py | 14 | 1245 | from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, ImproperlyConfigured
from django.db import connection
from django.db.models.loading import get_apps, get_app, get_models, get_model, register_models
from django.db.models.query import Q
from django.db.models.manager import Manager
from django.db.models.base import Model
from django.db.models.fields import *
from django.db.models.fields.subclassing import SubfieldBase
from django.db.models.fields.files import FileField, ImageField
from django.db.models.fields.related import ForeignKey, OneToOneField, ManyToManyField, ManyToOneRel, ManyToManyRel, OneToOneRel
from django.db.models import signals
# Admin stages.
ADD, CHANGE, BOTH = 1, 2, 3
def permalink(func):
"""
Decorator that calls urlresolvers.reverse() to return a URL using
parameters returned by the decorated function "func".
"func" should be a function that returns a tuple in one of the
following formats:
(viewname, viewargs)
(viewname, viewargs, viewkwargs)
"""
from django.core.urlresolvers import reverse
def inner(*args, **kwargs):
bits = func(*args, **kwargs)
return reverse(bits[0], None, *bits[1:3])
return inner
| lgpl-3.0 |
RackSec/ansible | lib/ansible/compat/tests/unittest.py | 375 | 1147 | # (c) 2014, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
Compat module for Python2.7's unittest module
'''
import sys
# Python 2.6
if sys.version_info < (2, 7):
try:
# Need unittest2 on python2.6
from unittest2 import *
except ImportError:
print('You need unittest2 installed on python2.6.x to run tests')
else:
from unittest import *
| gpl-3.0 |
ycl2045/nova-master | nova/tests/api/openstack/compute/plugins/v3/test_agents.py | 19 | 13966 | # Copyright 2012 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from webob import exc
from nova.api.openstack.compute.plugins.v3 import agents
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception
from nova import test
fake_agents_list = [{'hypervisor': 'kvm', 'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'id': 1},
{'hypervisor': 'kvm', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'xxx://xxxx/xxx/xxx1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'id': 2},
{'hypervisor': 'xen', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'xxx://xxxx/xxx/xxx2',
'md5hash': 'add6bb58e139be103324d04d82d8f547',
'id': 3},
{'hypervisor': 'xen', 'os': 'win',
'architecture': 'power',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx3',
'md5hash': 'add6bb58e139be103324d04d82d8f548',
'id': 4},
]
def fake_agent_build_get_all(context, hypervisor):
agent_build_all = []
for agent in fake_agents_list:
if hypervisor and hypervisor != agent['hypervisor']:
continue
agent_build_ref = models.AgentBuild()
agent_build_ref.update(agent)
agent_build_all.append(agent_build_ref)
return agent_build_all
def fake_agent_build_update(context, agent_build_id, values):
pass
def fake_agent_build_destroy(context, agent_update_id):
pass
def fake_agent_build_create(context, values):
values['id'] = 1
agent_build_ref = models.AgentBuild()
agent_build_ref.update(values)
return agent_build_ref
class FakeRequest(object):
environ = {"nova.context": context.get_admin_context()}
GET = {}
class FakeRequestWithHypervisor(object):
environ = {"nova.context": context.get_admin_context()}
GET = {'hypervisor': 'kvm'}
def fake_agent_build_create_with_exited_agent(context, values):
raise exception.AgentBuildExists(**values)
class AgentsTest(test.NoDBTestCase):
def setUp(self):
super(AgentsTest, self).setUp()
self.stubs.Set(db, "agent_build_get_all",
fake_agent_build_get_all)
self.stubs.Set(db, "agent_build_update",
fake_agent_build_update)
self.stubs.Set(db, "agent_build_destroy",
fake_agent_build_destroy)
self.stubs.Set(db, "agent_build_create",
fake_agent_build_create)
self.context = context.get_admin_context()
self.controller = agents.AgentController()
def test_agents_create(self):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
response = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1}}
res_dict = self.controller.create(req, body=body)
self.assertEqual(res_dict, response)
self.assertEqual(self.controller.create.wsgi_code, 201)
def test_agents_create_with_existed_agent(self):
self.stubs.Set(db, 'agent_build_create',
fake_agent_build_create_with_exited_agent)
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exc.HTTPConflict, self.controller.create, req,
body=body)
def test_agents_create_without_md5hash(self):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx'}}
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
def test_agents_create_without_url(self):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
def test_agents_create_without_version(self):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
def test_agents_create_without_architecture(self):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
def test_agents_create_without_os(self):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
def test_agents_create_without_hypervisor(self):
req = FakeRequest()
body = {'agent': {'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
def test_agents_create_with_wrong_type(self):
req = FakeRequest()
body = {'agent': None}
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
def test_agents_create_with_empty_type(self):
req = FakeRequest()
body = {}
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
def _test_agents_create_with_invalid_length(self, key):
req = FakeRequest()
body = {'agent': {'hypervisor': 'kvm',
'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
body['agent'][key] = 'x' * 256
self.assertRaises(exception.ValidationError, self.controller.create,
req, body=body)
def test_agents_create_with_invalid_length_hypervisor(self):
self._test_agents_create_with_invalid_length('hypervisor')
def test_agents_create_with_invalid_length_os(self):
self._test_agents_create_with_invalid_length('os')
def test_agents_create_with_invalid_length_architecture(self):
self._test_agents_create_with_invalid_length('architecture')
def test_agents_create_with_invalid_length_version(self):
self._test_agents_create_with_invalid_length('version')
def test_agents_create_with_invalid_length_url(self):
self._test_agents_create_with_invalid_length('url')
def test_agents_create_with_invalid_length_md5hash(self):
self._test_agents_create_with_invalid_length('md5hash')
def test_agents_delete(self):
req = FakeRequest()
self.controller.delete(req, 1)
def test_agents_list(self):
req = FakeRequest()
res_dict = self.controller.index(req)
agents_list = [{'hypervisor': 'kvm', 'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1},
{'hypervisor': 'kvm', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'xxx://xxxx/xxx/xxx1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'agent_id': 2},
{'hypervisor': 'xen', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'xxx://xxxx/xxx/xxx2',
'md5hash': 'add6bb58e139be103324d04d82d8f547',
'agent_id': 3},
{'hypervisor': 'xen', 'os': 'win',
'architecture': 'power',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx3',
'md5hash': 'add6bb58e139be103324d04d82d8f548',
'agent_id': 4},
]
self.assertEqual(res_dict, {'agents': agents_list})
def test_agents_list_with_hypervisor(self):
req = FakeRequestWithHypervisor()
res_dict = self.controller.index(req)
response = [{'hypervisor': 'kvm', 'os': 'win',
'architecture': 'x86',
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545',
'agent_id': 1},
{'hypervisor': 'kvm', 'os': 'linux',
'architecture': 'x86',
'version': '16.0',
'url': 'xxx://xxxx/xxx/xxx1',
'md5hash': 'add6bb58e139be103324d04d82d8f546',
'agent_id': 2},
]
self.assertEqual(res_dict, {'agents': response})
def test_agents_update(self):
req = FakeRequest()
body = {'agent': {'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
response = {'agent': {'agent_id': 1,
'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
res_dict = self.controller.update(req, 1, body=body)
self.assertEqual(res_dict, response)
def test_agents_update_without_md5hash(self):
req = FakeRequest()
body = {'agent': {'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx'}}
self.assertRaises(exception.ValidationError, self.controller.update,
req, 1, body=body)
def test_agents_update_without_url(self):
req = FakeRequest()
body = {'agent': {'version': '7.0'}}
self.assertRaises(exception.ValidationError, self.controller.update,
req, 1, body=body)
def test_agents_update_without_version(self):
req = FakeRequest()
body = {'agent': {}}
self.assertRaises(exception.ValidationError, self.controller.update,
req, 1, body=body)
def test_agents_update_with_wrong_type(self):
req = FakeRequest()
body = {'agent': None}
self.assertRaises(exception.ValidationError, self.controller.update,
req, 1, body=body)
def test_agents_update_with_empty(self):
req = FakeRequest()
body = {}
self.assertRaises(exception.ValidationError, self.controller.update,
req, 1, body=body)
def _test_agents_update_with_invalid_length(self, key):
req = FakeRequest()
body = {'agent': {'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
'md5hash': 'add6bb58e139be103324d04d82d8f545'}}
body['agent'][key] = 'x' * 256
self.assertRaises(exception.ValidationError, self.controller.update,
req, 1, body=body)
def test_agents_update_with_invalid_length_version(self):
self._test_agents_update_with_invalid_length('version')
def test_agents_update_with_invalid_length_url(self):
self._test_agents_update_with_invalid_length('url')
def test_agents_update_with_invalid_length_md5hash(self):
self._test_agents_update_with_invalid_length('md5hash')
| apache-2.0 |
FTBZ/librenms | LibreNMS/service.py | 14 | 31186 | import LibreNMS
import json
import logging
import os
import subprocess
import threading
import sys
import time
import timeit
from datetime import timedelta
from logging import debug, info, warning, error, critical, exception
from platform import python_version
from time import sleep
from socket import gethostname
from signal import signal, SIGTERM
from uuid import uuid1
class PerformanceCounter(object):
"""
This is a simple counter to record execution time and number of jobs. It's unique to each
poller instance, so does not need to be globally syncronised, just locally.
"""
def __init__(self):
self._count = 0
self._jobs = 0
self._lock = threading.Lock()
def add(self, n):
"""
Add n to the counter and increment the number of jobs by 1
:param n: Number to increment by
"""
with self._lock:
self._count += n
self._jobs += 1
def split(self, precise=False):
"""
Return the current counter value and keep going
:param precise: Whether floating point precision is desired
:return: ((INT or FLOAT), INT)
"""
return (self._count if precise else int(self._count)), self._jobs
def reset(self, precise=False):
"""
Return the current counter value and then zero it.
:param precise: Whether floating point precision is desired
:return: ((INT or FLOAT), INT)
"""
with self._lock:
c = self._count
j = self._jobs
self._count = 0
self._jobs = 0
return (c if precise else int(c)), j
class TimeitContext(object):
"""
Wrapper around timeit to allow the timing of larger blocks of code by wrapping them in "with"
"""
def __init__(self):
self._t = timeit.default_timer()
def __enter__(self):
return self
def __exit__(self, *args):
del self._t
def delta(self):
"""
Calculate the elapsed time since the context was initialised
:return: FLOAT
"""
if not self._t:
raise ArithmeticError("Timer has not been started, cannot return delta")
return timeit.default_timer() - self._t
@classmethod
def start(cls):
"""
Factory method for TimeitContext
:param cls:
:return: TimeitContext
"""
return cls()
class ServiceConfig:
def __init__(self):
"""
Stores all of the configuration variables for the LibreNMS service in a common object
Starts with defaults, but can be populated with variables from config.php by calling populate()
"""
self._uuid = str(uuid1())
self.set_name(gethostname())
def set_name(self, name):
if name:
self.name = name.strip()
self.unique_name = "{}-{}".format(self.name, self._uuid)
class PollerConfig:
def __init__(self, workers, frequency, calculate=None):
self.workers = workers
self.frequency = frequency
self.calculate = calculate
# config variables with defaults
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
node_id = None
name = None
unique_name = None
single_instance = True
distributed = False
group = 0
debug = False
log_level = 20
alerting = PollerConfig(1, 60)
poller = PollerConfig(24, 300)
services = PollerConfig(8, 300)
discovery = PollerConfig(16, 21600)
billing = PollerConfig(2, 300, 60)
down_retry = 60
update_frequency = 86400
master_resolution = 1
master_timeout = 10
redis_host = 'localhost'
redis_port = 6379
redis_db = 0
redis_pass = None
redis_socket = None
db_host = 'localhost'
db_port = 0
db_socket = None
db_user = 'librenms'
db_pass = ''
db_name = 'librenms'
def populate(self):
config = self._get_config_data()
# populate config variables
self.node_id = os.getenv('NODE_ID')
self.set_name(config.get('distributed_poller_name', None))
self.distributed = config.get('distributed_poller', ServiceConfig.distributed)
self.group = ServiceConfig.parse_group(config.get('distributed_poller_group', ServiceConfig.group))
# backward compatible options
self.poller.workers = config.get('poller_service_workers', ServiceConfig.poller.workers)
self.poller.frequency = config.get('poller_service_poll_frequency', ServiceConfig.poller.frequency)
self.discovery.frequency = config.get('poller_service_discover_frequency', ServiceConfig.discovery.frequency)
self.down_retry = config.get('poller_service_down_retry', ServiceConfig.down_retry)
self.log_level = config.get('poller_service_loglevel', ServiceConfig.log_level)
# new options
self.poller.workers = config.get('service_poller_workers', ServiceConfig.poller.workers)
self.poller.frequency = config.get('service_poller_frequency', ServiceConfig.poller.frequency)
self.services.workers = config.get('service_services_workers', ServiceConfig.services.workers)
self.services.frequency = config.get('service_services_frequency', ServiceConfig.services.frequency)
self.discovery.workers = config.get('service_discovery_workers', ServiceConfig.discovery.workers)
self.discovery.frequency = config.get('service_discovery_frequency', ServiceConfig.discovery.frequency)
self.billing.frequency = config.get('service_billing_frequency', ServiceConfig.billing.frequency)
self.billing.calculate = config.get('service_billing_calculate_frequency', ServiceConfig.billing.calculate)
self.down_retry = config.get('service_poller_down_retry', ServiceConfig.down_retry)
self.log_level = config.get('service_loglevel', ServiceConfig.log_level)
self.update_frequency = config.get('service_update_frequency', ServiceConfig.update_frequency)
self.redis_host = os.getenv('REDIS_HOST', config.get('redis_host', ServiceConfig.redis_host))
self.redis_db = os.getenv('REDIS_DB', config.get('redis_db', ServiceConfig.redis_db))
self.redis_pass = os.getenv('REDIS_PASSWORD', config.get('redis_pass', ServiceConfig.redis_pass))
self.redis_port = int(os.getenv('REDIS_PORT', config.get('redis_port', ServiceConfig.redis_port)))
self.redis_socket = os.getenv('REDIS_SOCKET', config.get('redis_socket', ServiceConfig.redis_socket))
self.db_host = os.getenv('DB_HOST', config.get('db_host', ServiceConfig.db_host))
self.db_name = os.getenv('DB_DATABASE', config.get('db_name', ServiceConfig.db_name))
self.db_pass = os.getenv('DB_PASSWORD', config.get('db_pass', ServiceConfig.db_pass))
self.db_port = int(os.getenv('DB_PORT', config.get('db_port', ServiceConfig.db_port)))
self.db_socket = os.getenv('DB_SOCKET', config.get('db_socket', ServiceConfig.db_socket))
self.db_user = os.getenv('DB_USERNAME', config.get('db_user', ServiceConfig.db_user))
# set convenient debug variable
self.debug = logging.getLogger().isEnabledFor(logging.DEBUG)
if not self.debug and self.log_level:
try:
logging.getLogger().setLevel(self.log_level)
except ValueError:
error("Unknown log level {}, must be one of 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'".format(self.log_level))
logging.getLogger().setLevel(logging.INFO)
def _get_config_data(self):
try:
import dotenv
env_path = "{}/.env".format(self.BASE_DIR)
info("Attempting to load .env from '%s'", env_path)
dotenv.load_dotenv(dotenv_path=env_path, verbose=True)
if not os.getenv('NODE_ID'):
raise ImportError(".env does not contain a valid NODE_ID setting.")
except ImportError as e:
exception("Could not import .env - check that the poller user can read the file, and that composer install has been run recently")
sys.exit(3)
config_cmd = ['/usr/bin/env', 'php', '{}/config_to_json.php'.format(self.BASE_DIR), '2>&1']
try:
return json.loads(subprocess.check_output(config_cmd).decode())
except subprocess.CalledProcessError as e:
error("ERROR: Could not load or parse configuration! {}: {}"
.format(subprocess.list2cmdline(e.cmd), e.output.decode()))
@staticmethod
def parse_group(g):
if g is None:
return [0]
elif type(g) is int:
return [g]
elif type(g) is str:
try:
return [int(x) for x in set(g.split(','))]
except ValueError:
pass
error("Could not parse group string, defaulting to 0")
return [0]
class Service:
config = ServiceConfig()
_fp = False
_started = False
alerting_manager = None
poller_manager = None
discovery_manager = None
services_manager = None
billing_manager = None
last_poll = {}
terminate_flag = False
def __init__(self):
self.config.populate()
threading.current_thread().name = self.config.name # rename main thread
self.attach_signals()
# init database connections different ones for different threads
self._db = LibreNMS.DB(self.config) # main
self._services_db = LibreNMS.DB(self.config) # services dispatch
self._discovery_db = LibreNMS.DB(self.config) # discovery dispatch
self._lm = self.create_lock_manager()
self.daily_timer = LibreNMS.RecurringTimer(self.config.update_frequency, self.run_maintenance, 'maintenance')
self.stats_timer = LibreNMS.RecurringTimer(self.config.poller.frequency, self.log_performance_stats, 'performance')
self.is_master = False
self.performance_stats = {'poller': PerformanceCounter(), 'discovery': PerformanceCounter(), 'services': PerformanceCounter()}
def attach_signals(self):
info("Attaching signal handlers on thread %s", threading.current_thread().name)
signal(SIGTERM, self.terminate) # capture sigterm and exit gracefully
def start(self):
debug("Performing startup checks...")
if self.config.single_instance:
self.check_single_instance() # don't allow more than one service at a time
if self._started:
raise RuntimeWarning("Not allowed to start Poller twice")
self._started = True
debug("Starting up queue managers...")
# initialize and start the worker pools
self.poller_manager = LibreNMS.QueueManager(self.config, 'poller', self.poll_device)
self.alerting_manager = LibreNMS.TimedQueueManager(self.config, 'alerting', self.poll_alerting,
self.dispatch_alerting)
self.services_manager = LibreNMS.TimedQueueManager(self.config, 'services', self.poll_services,
self.dispatch_services)
self.discovery_manager = LibreNMS.TimedQueueManager(self.config, 'discovery', self.discover_device,
self.dispatch_discovery)
self.billing_manager = LibreNMS.BillingQueueManager(self.config, self.poll_billing,
self.dispatch_poll_billing, self.dispatch_calculate_billing)
self.daily_timer.start()
self.stats_timer.start()
info("LibreNMS Service: {} started!".format(self.config.unique_name))
info("Poller group {}. Using Python {} and {} locks and queues"
.format('0 (default)' if self.config.group == [0] else self.config.group, python_version(),
'redis' if isinstance(self._lm, LibreNMS.RedisLock) else 'internal'))
info("Maintenance tasks will be run every {}".format(timedelta(seconds=self.config.update_frequency)))
# Main dispatcher loop
try:
while not self.terminate_flag:
master_lock = self._lm.lock('dispatch.master', self.config.unique_name, self.config.master_timeout, True)
if master_lock:
if not self.is_master:
info("{} is now the master dispatcher".format(self.config.name))
self.is_master = True
self.start_dispatch_timers()
devices = self.fetch_immediate_device_list()
for device in devices:
device_id = device[0]
group = device[1]
if device[2]: # polling
self.dispatch_immediate_polling(device_id, group)
if device[3]: # discovery
self.dispatch_immediate_discovery(device_id, group)
else:
if self.is_master:
info("{} is no longer the master dispatcher".format(self.config.name))
self.stop_dispatch_timers()
self.is_master = False # no longer master
sleep(self.config.master_resolution)
except KeyboardInterrupt:
pass
info("Dispatch loop terminated")
self.shutdown()
# ------------ Discovery ------------
def dispatch_immediate_discovery(self, device_id, group):
if self.discovery_manager.get_queue(group).empty() and not self.discovery_is_locked(device_id):
self.discovery_manager.post_work(device_id, group)
def dispatch_discovery(self):
devices = self.fetch_device_list()
for device in devices:
self.discovery_manager.post_work(device[0], device[1])
def discover_device(self, device_id):
if self.lock_discovery(device_id):
try:
with TimeitContext.start() as t:
info("Discovering device {}".format(device_id))
self.call_script('discovery.php', ('-h', device_id))
info('Discovery complete {}'.format(device_id))
self.report_execution_time(t.delta(), 'discovery')
except subprocess.CalledProcessError as e:
if e.returncode == 5:
info("Device {} is down, cannot discover, waiting {}s for retry"
.format(device_id, self.config.down_retry))
self.lock_discovery(device_id, True)
else:
self.unlock_discovery(device_id)
else:
self.unlock_discovery(device_id)
# ------------ Alerting ------------
def dispatch_alerting(self):
self.alerting_manager.post_work('alerts', 0)
def poll_alerting(self, _=None):
try:
info("Checking alerts")
self.call_script('alerts.php')
except subprocess.CalledProcessError as e:
if e.returncode == 1:
warning("There was an error issuing alerts: {}".format(e.output))
else:
raise
# ------------ Services ------------
def dispatch_services(self):
devices = self.fetch_services_device_list()
for device in devices:
self.services_manager.post_work(device[0], device[1])
def poll_services(self, device_id):
if self.lock_services(device_id):
try:
with TimeitContext.start() as t:
info("Checking services on device {}".format(device_id))
self.call_script('check-services.php', ('-h', device_id))
info('Services complete {}'.format(device_id))
self.report_execution_time(t.delta(), 'services')
except subprocess.CalledProcessError as e:
if e.returncode == 5:
info("Device {} is down, cannot poll service, waiting {}s for retry"
.format(device_id, self.config.down_retry))
self.lock_services(device_id, True)
else:
self.unlock_services(device_id)
else:
self.unlock_services(device_id)
# ------------ Billing ------------
def dispatch_calculate_billing(self):
self.billing_manager.post_work('calculate', 0)
def dispatch_poll_billing(self):
self.billing_manager.post_work('poll', 0)
def poll_billing(self, run_type):
if run_type == 'poll':
info("Polling billing")
self.call_script('poll-billing.php')
info("Polling billing complete")
else: # run_type == 'calculate'
info("Calculating billing")
self.call_script('billing-calculate.php')
info("Calculating billing complete")
# ------------ Polling ------------
def dispatch_immediate_polling(self, device_id, group):
if self.poller_manager.get_queue(group).empty() and not self.polling_is_locked(device_id):
self.poller_manager.post_work(device_id, group)
if self.config.debug:
cur_time = time.time()
elapsed = cur_time - self.last_poll.get(device_id, cur_time)
self.last_poll[device_id] = time.time()
# arbitrary limit to reduce spam
if elapsed > (self.config.poller.frequency - self.config.master_resolution):
debug("Dispatching polling for device {}, time since last poll {:.2f}s"
.format(device_id, elapsed))
def poll_device(self, device_id):
if self.lock_polling(device_id):
info('Polling device {}'.format(device_id))
try:
with TimeitContext.start() as t:
self.call_script('poller.php', ('-h', device_id))
self.report_execution_time(t.delta(), 'poller')
except subprocess.CalledProcessError as e:
if e.returncode == 6:
warning('Polling device {} unreachable, waiting {}s for retry'.format(device_id, self.config.down_retry))
# re-lock to set retry timer
self.lock_polling(device_id, True)
else:
error('Polling device {} failed! {}'.format(device_id, e))
self.unlock_polling(device_id)
else:
info('Polling complete {}'.format(device_id))
# self.polling_unlock(device_id)
else:
debug('Tried to poll {}, but it is locked'.format(device_id))
def fetch_services_device_list(self):
return self._services_db.query("SELECT DISTINCT(`device_id`), `poller_group` FROM `services`"
" LEFT JOIN `devices` USING (`device_id`) WHERE `disabled`=0")
def fetch_device_list(self):
return self._discovery_db.query("SELECT `device_id`, `poller_group` FROM `devices` WHERE `disabled`=0")
def fetch_immediate_device_list(self):
poller_find_time = self.config.poller.frequency - 1
discovery_find_time = self.config.discovery.frequency - 1
return self._db.query('''SELECT `device_id`,
`poller_group`,
COALESCE(`last_polled` <= DATE_ADD(DATE_ADD(NOW(), INTERVAL -%s SECOND), INTERVAL `last_polled_timetaken` SECOND), 1) AS `poll`,
COALESCE(`last_discovered` <= DATE_ADD(DATE_ADD(NOW(), INTERVAL -%s SECOND), INTERVAL `last_discovered_timetaken` SECOND), 1) AS `discover`
FROM `devices`
WHERE `disabled` = 0 AND (
`last_polled` IS NULL OR
`last_discovered` IS NULL OR
`last_polled` <= DATE_ADD(DATE_ADD(NOW(), INTERVAL -%s SECOND), INTERVAL `last_polled_timetaken` SECOND) OR
`last_discovered` <= DATE_ADD(DATE_ADD(NOW(), INTERVAL -%s SECOND), INTERVAL `last_discovered_timetaken` SECOND)
)
ORDER BY `last_polled_timetaken` DESC''', (poller_find_time, discovery_find_time, poller_find_time, discovery_find_time))
def run_maintenance(self):
"""
Runs update and cleanup tasks by calling daily.sh. Reloads the python script after the update.
Sets a schema-update lock so no distributed pollers will update until the schema has been updated.
"""
attempt = 0
wait = 5
max_runtime = 86100
max_tries = int(max_runtime / wait)
info("Waiting for schema lock")
while not self._lm.lock('schema-update', self.config.unique_name, max_runtime):
attempt += 1
if attempt >= max_tries: # don't get stuck indefinitely
warning('Reached max wait for other pollers to update, updating now')
break
sleep(wait)
info("Running maintenance tasks")
output = self.call_script('daily.sh')
info("Maintenance tasks complete\n{}".format(output))
self.restart()
# Lock Helpers #
def lock_discovery(self, device_id, retry=False):
lock_name = self.gen_lock_name('discovery', device_id)
timeout = self.config.down_retry if retry else LibreNMS.normalize_wait(self.config.discovery.frequency)
return self._lm.lock(lock_name, self.gen_lock_owner(), timeout, retry)
def unlock_discovery(self, device_id):
lock_name = self.gen_lock_name('discovery', device_id)
return self._lm.unlock(lock_name, self.gen_lock_owner())
def discovery_is_locked(self, device_id):
lock_name = self.gen_lock_name('discovery', device_id)
return self._lm.check_lock(lock_name)
def lock_polling(self, device_id, retry=False):
lock_name = self.gen_lock_name('polling', device_id)
timeout = self.config.down_retry if retry else self.config.poller.frequency
return self._lm.lock(lock_name, self.gen_lock_owner(), timeout, retry)
def unlock_polling(self, device_id):
lock_name = self.gen_lock_name('polling', device_id)
return self._lm.unlock(lock_name, self.gen_lock_owner())
def polling_is_locked(self, device_id):
lock_name = self.gen_lock_name('polling', device_id)
return self._lm.check_lock(lock_name)
def lock_services(self, device_id, retry=False):
lock_name = self.gen_lock_name('services', device_id)
timeout = self.config.down_retry if retry else self.config.services.frequency
return self._lm.lock(lock_name, self.gen_lock_owner(), timeout, retry)
def unlock_services(self, device_id):
lock_name = self.gen_lock_name('services', device_id)
return self._lm.unlock(lock_name, self.gen_lock_owner())
def services_is_locked(self, device_id):
lock_name = self.gen_lock_name('services', device_id)
return self._lm.check_lock(lock_name)
@staticmethod
def gen_lock_name(lock_class, device_id):
return '{}.device.{}'.format(lock_class, device_id)
def gen_lock_owner(self):
return "{}-{}".format(self.config.unique_name, threading.current_thread().name)
def call_script(self, script, args=()):
"""
Run a LibreNMS script. Captures all output and throws an exception if a non-zero
status is returned. Blocks parent signals (like SIGINT and SIGTERM).
:param script: the name of the executable relative to the base directory
:param args: a tuple of arguments to send to the command
:returns the output of the command
"""
if script.endswith('.php'):
# save calling the sh process
base = ('/usr/bin/env', 'php')
else:
base = ()
cmd = base + ("{}/{}".format(self.config.BASE_DIR, script),) + tuple(map(str, args))
# preexec_fn=os.setsid here keeps process signals from propagating
return subprocess.check_output(cmd, stderr=subprocess.STDOUT, preexec_fn=os.setsid, close_fds=True).decode()
def create_lock_manager(self):
"""
Create a new LockManager. Tries to create a Redis LockManager, but falls
back to python's internal threading lock implementation.
Exits if distributing poller is enabled and a Redis LockManager cannot be created.
:return: Instance of LockManager
"""
try:
return LibreNMS.RedisLock(namespace='librenms.lock',
host=self.config.redis_host,
port=self.config.redis_port,
db=self.config.redis_db,
password=self.config.redis_pass,
unix_socket_path=self.config.redis_socket)
except ImportError:
if self.config.distributed:
critical("ERROR: Redis connection required for distributed polling")
critical("Please install redis-py, either through your os software repository or from PyPI")
sys.exit(2)
except Exception as e:
if self.config.distributed:
critical("ERROR: Redis connection required for distributed polling")
critical("Could not connect to Redis. {}".format(e))
sys.exit(2)
return LibreNMS.ThreadingLock()
def restart(self):
"""
Stop then recreate this entire process by re-calling the original script.
Has the effect of reloading the python files from disk.
"""
if sys.version_info < (3, 4, 0):
warning("Skipping restart as running under an incompatible interpreter")
warning("Please restart manually")
return
info('Restarting service... ')
self._stop_managers_and_wait()
self._lm.unlock('dispatch.master', self.config.unique_name)
python = sys.executable
os.execl(python, python, *sys.argv)
def terminate(self, _unused=None, _=None):
"""
Handle a set the terminate flag to begin a clean shutdown
:param _unused:
:param _:
"""
info("Received SIGTERM on thead %s, handling", threading.current_thread().name)
self.terminate_flag = True
def shutdown(self, _unused=None, _=None):
"""
Stop and exit, waiting for all child processes to exit.
:param _unused:
:param _:
"""
info('Shutting down, waiting for running jobs to complete...')
self.stop_dispatch_timers()
self._lm.unlock('dispatch.master', self.config.unique_name)
self.daily_timer.stop()
self.stats_timer.stop()
self._stop_managers_and_wait()
# try to release master lock
info('Shutdown of %s/%s complete', os.getpid(), threading.current_thread().name)
sys.exit(0)
def start_dispatch_timers(self):
"""
Start all dispatch timers and begin pushing events into queues.
This should only be started when we are the master dispatcher.
"""
self.alerting_manager.start_dispatch()
self.billing_manager.start_dispatch()
self.services_manager.start_dispatch()
self.discovery_manager.start_dispatch()
def stop_dispatch_timers(self):
"""
Stop all dispatch timers, this should be called when we are no longer the master dispatcher.
"""
self.alerting_manager.stop_dispatch()
self.billing_manager.stop_dispatch()
self.services_manager.stop_dispatch()
self.discovery_manager.stop_dispatch()
def _stop_managers_and_wait(self):
"""
Stop all QueueManagers, and wait for their processing threads to complete.
We send the stop signal to all QueueManagers first, then wait for them to finish.
"""
self.discovery_manager.stop()
self.poller_manager.stop()
self.services_manager.stop()
self.billing_manager.stop()
self.discovery_manager.stop_and_wait()
self.poller_manager.stop_and_wait()
self.services_manager.stop_and_wait()
self.billing_manager.stop_and_wait()
def check_single_instance(self):
"""
Check that there is only one instance of the service running on this computer.
We do this be creating a file in the base directory (.lock.service) if it doesn't exist and
obtaining an exclusive lock on that file.
"""
lock_file = "{}/{}".format(self.config.BASE_DIR, '.lock.service')
import fcntl
self._fp = open(lock_file, 'w') # keep a reference so the file handle isn't garbage collected
self._fp.flush()
try:
fcntl.lockf(self._fp, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError:
warning("Another instance is already running, quitting.")
exit(2)
def report_execution_time(self, time, activity):
self.performance_stats[activity].add(time)
def log_performance_stats(self):
info("Counting up time spent polling")
try:
# Report on the poller instance as a whole
self._db.query('INSERT INTO poller_cluster(node_id, poller_name, poller_version, poller_groups, last_report, master) '
'values("{0}", "{1}", "{2}", "{3}", NOW(), {4}) '
'ON DUPLICATE KEY UPDATE poller_version="{2}", poller_groups="{3}", last_report=NOW(), master={4}; '
.format(self.config.node_id, self.config.name, "librenms-service", ','.join(str(g) for g in self.config.group), 1 if self.is_master else 0))
# Find our ID
self._db.query('SELECT id INTO @parent_poller_id FROM poller_cluster WHERE node_id="{0}"; '.format(self.config.node_id))
for worker_type, counter in self.performance_stats.items():
worker_seconds, devices = counter.reset()
# Record the queue state
self._db.query('INSERT INTO poller_cluster_stats(parent_poller, poller_type, depth, devices, worker_seconds, workers, frequency) '
'values(@parent_poller_id, "{0}", {1}, {2}, {3}, {4}, {5}) '
'ON DUPLICATE KEY UPDATE depth={1}, devices={2}, worker_seconds={3}, workers={4}, frequency={5}; '
.format(worker_type,
sum([getattr(self, ''.join([worker_type, '_manager'])).get_queue(group).qsize() for group in self.config.group]),
devices,
worker_seconds,
getattr(self.config, worker_type).workers,
getattr(self.config, worker_type).frequency)
)
except Exception:
exception("Unable to log performance statistics - is the database still online?")
| gpl-3.0 |
mmp2/megaman | doc/sphinxext/numpy_ext/edit_on_github.py | 11 | 5895 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
This extension makes it easy to edit documentation on github.
It adds links associated with each docstring that go to the
corresponding view source page on Github. From there, the user can
push the "Edit" button, edit the docstring, and submit a pull request.
It has the following configuration options (to be set in the project's
``conf.py``):
* ``edit_on_github_project``
The name of the github project, in the form
"username/projectname".
* ``edit_on_github_branch``
The name of the branch to edit. If this is a released version,
this should be a git tag referring to that version. For a
dev version, it often makes sense for it to be "master". It
may also be a git hash.
* ``edit_on_github_source_root``
The location within the source tree of the root of the
Python package. Defaults to "lib".
* ``edit_on_github_doc_root``
The location within the source tree of the root of the
documentation source. Defaults to "doc", but it may make sense to
set it to "doc/source" if the project uses a separate source
directory.
* ``edit_on_github_docstring_message``
The phrase displayed in the links to edit a docstring. Defaults
to "[edit on github]".
* ``edit_on_github_page_message``
The phrase displayed in the links to edit a RST page. Defaults
to "[edit this page on github]".
* ``edit_on_github_help_message``
The phrase displayed as a tooltip on the edit links. Defaults to
"Push the Edit button on the next page"
* ``edit_on_github_skip_regex``
When the path to the .rst file matches this regular expression,
no "edit this page on github" link will be added. Defaults to
``"_.*"``.
"""
import inspect
import os
import re
import sys
from docutils import nodes
from sphinx import addnodes
def import_object(modname, name):
"""
Import the object given by *modname* and *name* and return it.
If not found, or the import fails, returns None.
"""
try:
__import__(modname)
mod = sys.modules[modname]
obj = mod
for part in name.split('.'):
obj = getattr(obj, part)
return obj
except:
return None
def get_url_base(app):
return 'http://github.com/%s/tree/%s/' % (
app.config.edit_on_github_project,
app.config.edit_on_github_branch)
def doctree_read(app, doctree):
# Get the configuration parameters
if app.config.edit_on_github_project == 'REQUIRED':
raise ValueError(
"The edit_on_github_project configuration variable must be "
"provided in the conf.py")
source_root = app.config.edit_on_github_source_root
url = get_url_base(app)
docstring_message = app.config.edit_on_github_docstring_message
# Handle the docstring-editing links
for objnode in doctree.traverse(addnodes.desc):
if objnode.get('domain') != 'py':
continue
names = set()
for signode in objnode:
if not isinstance(signode, addnodes.desc_signature):
continue
modname = signode.get('module')
if not modname:
continue
fullname = signode.get('fullname')
if fullname in names:
# only one link per name, please
continue
names.add(fullname)
obj = import_object(modname, fullname)
anchor = None
if obj is not None:
try:
lines, lineno = inspect.getsourcelines(obj)
except:
pass
else:
anchor = '#L%d' % lineno
if anchor:
real_modname = inspect.getmodule(obj).__name__
path = '%s%s%s.py%s' % (
url, source_root, real_modname.replace('.', '/'), anchor)
onlynode = addnodes.only(expr='html')
onlynode += nodes.reference(
reftitle=app.config.edit_on_github_help_message,
refuri=path)
onlynode[0] += nodes.inline(
'', '', nodes.raw('', ' ', format='html'),
nodes.Text(docstring_message),
classes=['edit-on-github', 'viewcode-link'])
signode += onlynode
def html_page_context(app, pagename, templatename, context, doctree):
if (templatename == 'page.html' and
not re.match(app.config.edit_on_github_skip_regex, pagename)):
doc_root = app.config.edit_on_github_doc_root
if doc_root != '' and not doc_root.endswith('/'):
doc_root += '/'
doc_path = os.path.relpath(doctree.get('source'), app.builder.srcdir)
url = get_url_base(app)
page_message = app.config.edit_on_github_page_message
context['edit_on_github'] = url + doc_root + doc_path
context['edit_on_github_page_message'] = (
app.config.edit_on_github_page_message)
def setup(app):
app.add_config_value('edit_on_github_project', 'REQUIRED', True)
app.add_config_value('edit_on_github_branch', 'master', True)
app.add_config_value('edit_on_github_source_root', 'lib', True)
app.add_config_value('edit_on_github_doc_root', 'doc', True)
app.add_config_value('edit_on_github_docstring_message',
'[edit on github]', True)
app.add_config_value('edit_on_github_page_message',
'Edit This Page on Github', True)
app.add_config_value('edit_on_github_help_message',
'Push the Edit button on the next page', True)
app.add_config_value('edit_on_github_skip_regex',
'_.*', True)
app.connect('doctree-read', doctree_read)
app.connect('html-page-context', html_page_context)
| bsd-2-clause |
mbareta/edx-platform-ft | common/lib/xmodule/xmodule/modulestore/mixed.py | 10 | 43011 | """
MixedModuleStore allows for aggregation between multiple modulestores.
In this way, courses can be served up via either SplitMongoModuleStore or MongoModuleStore.
"""
import logging
from contextlib import contextmanager
import itertools
import functools
from contracts import contract, new_contract
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey, AssetKey
from opaque_keys.edx.locator import LibraryLocator
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from xmodule.assetstore import AssetMetadata
from . import ModuleStoreWriteBase, ModuleStoreEnum, XMODULE_FIELDS_WITH_USAGE_KEYS
from .exceptions import ItemNotFoundError, DuplicateCourseError
from .draft_and_published import ModuleStoreDraftAndPublished
from .split_migrator import SplitMigrator
new_contract('CourseKey', CourseKey)
new_contract('AssetKey', AssetKey)
new_contract('AssetMetadata', AssetMetadata)
new_contract('LibraryLocator', LibraryLocator)
new_contract('long', long)
log = logging.getLogger(__name__)
def strip_key(func):
"""
A decorator for stripping version and branch information from return values that are, or contain, UsageKeys or
CourseKeys.
Additionally, the decorated function is called with an optional 'field_decorator' parameter that can be used
to strip any location(-containing) fields, which are not directly returned by the function.
The behavior can be controlled by passing 'remove_version' and 'remove_branch' booleans to the decorated
function's kwargs.
"""
@functools.wraps(func)
def inner(*args, **kwargs):
"""
Supported kwargs:
remove_version - If True, calls 'version_agnostic' on all return values, including those in lists and dicts.
remove_branch - If True, calls 'for_branch(None)' on all return values, including those in lists and dicts.
Note: The 'field_decorator' parameter passed to the decorated function is a function that honors the
values of these kwargs.
"""
# remove version and branch, by default
rem_vers = kwargs.pop('remove_version', True)
rem_branch = kwargs.pop('remove_branch', True)
# helper function for stripping individual values
def strip_key_func(val):
"""
Strips the version and branch information according to the settings of rem_vers and rem_branch.
Recursively calls this function if the given value has a 'location' attribute.
"""
retval = val
if rem_vers and hasattr(retval, 'version_agnostic'):
retval = retval.version_agnostic()
if rem_branch and hasattr(retval, 'for_branch'):
retval = retval.for_branch(None)
for field_name in XMODULE_FIELDS_WITH_USAGE_KEYS:
if hasattr(retval, field_name):
setattr(retval, field_name, strip_key_func(getattr(retval, field_name)))
return retval
# function for stripping both, collection of, and individual, values
def strip_key_collection(field_value):
"""
Calls strip_key_func for each element in the given value.
"""
if rem_vers or rem_branch:
if isinstance(field_value, list):
field_value = [strip_key_func(fv) for fv in field_value]
elif isinstance(field_value, dict):
for key, val in field_value.iteritems():
field_value[key] = strip_key_func(val)
else:
field_value = strip_key_func(field_value)
return field_value
# call the decorated function
retval = func(field_decorator=strip_key_collection, *args, **kwargs)
# strip the return value
return strip_key_collection(retval)
return inner
def prepare_asides(func):
"""
A decorator to handle optional asides param
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
"""
Supported kwargs:
asides - list with connected asides data for the passed block
"""
if 'asides' in kwargs:
kwargs['asides'] = prepare_asides_to_store(kwargs['asides'])
return func(*args, **kwargs)
return wrapper
def prepare_asides_to_store(asides_source):
"""
Convert Asides Xblocks objects to the list of dicts (to store this information in MongoDB)
"""
asides = None
if asides_source:
asides = []
for asd in asides_source:
aside_fields = {}
for asd_field_key, asd_field_val in asd.fields.iteritems():
aside_fields[asd_field_key] = asd_field_val.read_from(asd)
asides.append({
'aside_type': asd.scope_ids.block_type,
'fields': aside_fields
})
return asides
class MixedModuleStore(ModuleStoreDraftAndPublished, ModuleStoreWriteBase):
"""
ModuleStore knows how to route requests to the right persistence ms
"""
def __init__(
self,
contentstore,
mappings,
stores,
i18n_service=None,
fs_service=None,
user_service=None,
create_modulestore_instance=None,
signal_handler=None,
**kwargs
):
"""
Initialize a MixedModuleStore. Here we look into our passed in kwargs which should be a
collection of other modulestore configuration information
"""
super(MixedModuleStore, self).__init__(contentstore, **kwargs)
if create_modulestore_instance is None:
raise ValueError('MixedModuleStore constructor must be passed a create_modulestore_instance function')
self.modulestores = []
self.mappings = {}
for course_id, store_name in mappings.iteritems():
try:
self.mappings[CourseKey.from_string(course_id)] = store_name
except InvalidKeyError:
try:
self.mappings[SlashSeparatedCourseKey.from_deprecated_string(course_id)] = store_name
except InvalidKeyError:
log.exception("Invalid MixedModuleStore configuration. Unable to parse course_id %r", course_id)
continue
for store_settings in stores:
key = store_settings['NAME']
store = create_modulestore_instance(
store_settings['ENGINE'],
self.contentstore,
store_settings.get('DOC_STORE_CONFIG', {}),
store_settings.get('OPTIONS', {}),
i18n_service=i18n_service,
fs_service=fs_service,
user_service=user_service,
signal_handler=signal_handler,
)
# replace all named pointers to the store into actual pointers
for course_key, store_name in self.mappings.iteritems():
if store_name == key:
self.mappings[course_key] = store
self.modulestores.append(store)
def _clean_locator_for_mapping(self, locator):
"""
In order for mapping to work, the locator must be minimal--no version, no branch--
as we never store one version or one branch in one ms and another in another ms.
:param locator: the CourseKey
"""
if hasattr(locator, 'version_agnostic'):
locator = locator.version_agnostic()
if hasattr(locator, 'branch'):
locator = locator.replace(branch=None)
return locator
def _get_modulestore_for_courselike(self, locator=None):
"""
For a given locator, look in the mapping table and see if it has been pinned
to a particular modulestore
If locator is None, returns the first (ordered) store as the default
"""
if locator is not None:
locator = self._clean_locator_for_mapping(locator)
mapping = self.mappings.get(locator, None)
if mapping is not None:
return mapping
else:
if isinstance(locator, LibraryLocator):
has_locator = lambda store: hasattr(store, 'has_library') and store.has_library(locator)
else:
has_locator = lambda store: store.has_course(locator)
for store in self.modulestores:
if has_locator(store):
self.mappings[locator] = store
return store
# return the default store
return self.default_modulestore
def _get_modulestore_by_type(self, modulestore_type):
"""
This method should only really be used by tests and migration scripts when necessary.
Returns the module store as requested by type. The type can be a value from ModuleStoreEnum.Type.
"""
for store in self.modulestores:
if store.get_modulestore_type() == modulestore_type:
return store
return None
def fill_in_run(self, course_key):
"""
Some course_keys are used without runs. This function calls the corresponding
fill_in_run function on the appropriate modulestore.
"""
store = self._get_modulestore_for_courselike(course_key)
if not hasattr(store, 'fill_in_run'):
return course_key
return store.fill_in_run(course_key)
def has_item(self, usage_key, **kwargs):
"""
Does the course include the xblock who's id is reference?
"""
store = self._get_modulestore_for_courselike(usage_key.course_key)
return store.has_item(usage_key, **kwargs)
@strip_key
def get_item(self, usage_key, depth=0, **kwargs):
"""
see parent doc
"""
store = self._get_modulestore_for_courselike(usage_key.course_key)
return store.get_item(usage_key, depth, **kwargs)
@strip_key
def get_items(self, course_key, **kwargs):
"""
Returns:
list of XModuleDescriptor instances for the matching items within the course with
the given course_key
NOTE: don't use this to look for courses
as the course_key is required. Use get_courses.
Args:
course_key (CourseKey): the course identifier
kwargs:
settings (dict): fields to look for which have settings scope. Follows same syntax
and rules as kwargs below
content (dict): fields to look for which have content scope. Follows same syntax and
rules as kwargs below.
qualifiers (dict): what to look for within the course.
Common qualifiers are ``category`` or any field name. if the target field is a list,
then it searches for the given value in the list not list equivalence.
Substring matching pass a regex object.
For some modulestores, ``name`` is another commonly provided key (Location based stores)
For some modulestores,
you can search by ``edited_by``, ``edited_on`` providing either a datetime for == (probably
useless) or a function accepting one arg to do inequality
"""
if not isinstance(course_key, CourseKey):
raise Exception("Must pass in a course_key when calling get_items()")
store = self._get_modulestore_for_courselike(course_key)
return store.get_items(course_key, **kwargs)
@strip_key
def get_course_summaries(self, **kwargs):
"""
Returns a list containing the course information in CourseSummary objects.
Information contains `location`, `display_name`, `locator` of the courses in this modulestore.
"""
course_summaries = {}
for store in self.modulestores:
for course_summary in store.get_course_summaries(**kwargs):
course_id = self._clean_locator_for_mapping(locator=course_summary.id)
# Check if course is indeed unique. Save it in result if unique
if course_id in course_summaries:
log.warning(
u"Modulestore %s have duplicate courses %s; skipping from result.", store, course_id
)
else:
course_summaries[course_id] = course_summary
return course_summaries.values()
@strip_key
def get_courses(self, **kwargs):
'''
Returns a list containing the top level XModuleDescriptors of the courses in this modulestore.
'''
courses = {}
for store in self.modulestores:
# filter out ones which were fetched from earlier stores but locations may not be ==
for course in store.get_courses(**kwargs):
course_id = self._clean_locator_for_mapping(course.id)
if course_id not in courses:
# course is indeed unique. save it in result
courses[course_id] = course
return courses.values()
@strip_key
def get_libraries(self, **kwargs):
"""
Returns a list containing the top level XBlock of the libraries (LibraryRoot) in this modulestore.
"""
libraries = {}
for store in self.modulestores:
if not hasattr(store, 'get_libraries'):
continue
# filter out ones which were fetched from earlier stores but locations may not be ==
for library in store.get_libraries(**kwargs):
library_id = self._clean_locator_for_mapping(library.location)
if library_id not in libraries:
# library is indeed unique. save it in result
libraries[library_id] = library
return libraries.values()
def make_course_key(self, org, course, run):
"""
Return a valid :class:`~opaque_keys.edx.keys.CourseKey` for this modulestore
that matches the supplied `org`, `course`, and `run`.
This key may represent a course that doesn't exist in this modulestore.
"""
# If there is a mapping that match this org/course/run, use that
for course_id, store in self.mappings.iteritems():
candidate_key = store.make_course_key(org, course, run)
if candidate_key == course_id:
return candidate_key
# Otherwise, return the key created by the default store
return self.default_modulestore.make_course_key(org, course, run)
def make_course_usage_key(self, course_key):
"""
Return a valid :class:`~opaque_keys.edx.keys.UsageKey` for the modulestore
that matches the supplied course_key.
"""
assert isinstance(course_key, CourseKey)
store = self._get_modulestore_for_courselike(course_key)
return store.make_course_usage_key(course_key)
@strip_key
def get_course(self, course_key, depth=0, **kwargs):
"""
returns the course module associated with the course_id. If no such course exists,
it returns None
:param course_key: must be a CourseKey
"""
assert isinstance(course_key, CourseKey)
store = self._get_modulestore_for_courselike(course_key)
try:
return store.get_course(course_key, depth=depth, **kwargs)
except ItemNotFoundError:
return None
@strip_key
@contract(library_key='LibraryLocator')
def get_library(self, library_key, depth=0, **kwargs):
"""
returns the library block associated with the given key. If no such library exists,
it returns None
:param library_key: must be a LibraryLocator
"""
try:
store = self._verify_modulestore_support(library_key, 'get_library')
return store.get_library(library_key, depth=depth, **kwargs)
except NotImplementedError:
log.exception("Modulestore configured for %s does not have get_library method", library_key)
return None
except ItemNotFoundError:
return None
@strip_key
def has_course(self, course_id, ignore_case=False, **kwargs):
"""
returns the course_id of the course if it was found, else None
Note: we return the course_id instead of a boolean here since the found course may have
a different id than the given course_id when ignore_case is True.
Args:
* course_id (CourseKey)
* ignore_case (bool): If True, do a case insensitive search. If
False, do a case sensitive search
"""
assert isinstance(course_id, CourseKey)
store = self._get_modulestore_for_courselike(course_id)
return store.has_course(course_id, ignore_case, **kwargs)
def delete_course(self, course_key, user_id):
"""
See xmodule.modulestore.__init__.ModuleStoreWrite.delete_course
"""
assert isinstance(course_key, CourseKey)
store = self._get_modulestore_for_courselike(course_key)
return store.delete_course(course_key, user_id)
@contract(asset_metadata='AssetMetadata', user_id='int|long', import_only=bool)
def save_asset_metadata(self, asset_metadata, user_id, import_only=False):
"""
Saves the asset metadata for a particular course's asset.
Args:
asset_metadata (AssetMetadata): data about the course asset data
user_id (int|long): user ID saving the asset metadata
import_only (bool): True if importing without editing, False if editing
Returns:
True if info save was successful, else False
"""
store = self._get_modulestore_for_courselike(asset_metadata.asset_id.course_key)
return store.save_asset_metadata(asset_metadata, user_id, import_only)
@contract(asset_metadata_list='list(AssetMetadata)', user_id='int|long', import_only=bool)
def save_asset_metadata_list(self, asset_metadata_list, user_id, import_only=False):
"""
Saves the asset metadata for each asset in a list of asset metadata.
Optimizes the saving of many assets.
Args:
asset_metadata_list (list(AssetMetadata)): list of data about several course assets
user_id (int|long): user ID saving the asset metadata
import_only (bool): True if importing without editing, False if editing
Returns:
True if info save was successful, else False
"""
if len(asset_metadata_list) == 0:
return True
store = self._get_modulestore_for_courselike(asset_metadata_list[0].asset_id.course_key)
return store.save_asset_metadata_list(asset_metadata_list, user_id, import_only)
@strip_key
@contract(asset_key='AssetKey')
def find_asset_metadata(self, asset_key, **kwargs):
"""
Find the metadata for a particular course asset.
Args:
asset_key (AssetKey): locator containing original asset filename
Returns:
asset metadata (AssetMetadata) -or- None if not found
"""
store = self._get_modulestore_for_courselike(asset_key.course_key)
return store.find_asset_metadata(asset_key, **kwargs)
@strip_key
@contract(course_key='CourseKey', asset_type='None | basestring', start=int, maxresults=int, sort='tuple|None')
def get_all_asset_metadata(self, course_key, asset_type, start=0, maxresults=-1, sort=None, **kwargs):
"""
Returns a list of static assets for a course.
By default all assets are returned, but start and maxresults can be provided to limit the query.
Args:
course_key (CourseKey): course identifier
asset_type (str): type of asset, such as 'asset', 'video', etc. If None, return assets of all types.
start (int): optional - start at this asset number
maxresults (int): optional - return at most this many, -1 means no limit
sort (array): optional - None means no sort
(sort_by (str), sort_order (str))
sort_by - one of 'uploadDate' or 'displayname'
sort_order - one of 'ascending' or 'descending'
Returns:
List of AssetMetadata objects.
"""
store = self._get_modulestore_for_courselike(course_key)
return store.get_all_asset_metadata(course_key, asset_type, start, maxresults, sort, **kwargs)
@contract(asset_key='AssetKey', user_id='int|long')
def delete_asset_metadata(self, asset_key, user_id):
"""
Deletes a single asset's metadata.
Arguments:
asset_id (AssetKey): locator containing original asset filename
user_id (int_long): user deleting the metadata
Returns:
Number of asset metadata entries deleted (0 or 1)
"""
store = self._get_modulestore_for_courselike(asset_key.course_key)
return store.delete_asset_metadata(asset_key, user_id)
@contract(source_course_key='CourseKey', dest_course_key='CourseKey', user_id='int|long')
def copy_all_asset_metadata(self, source_course_key, dest_course_key, user_id):
"""
Copy all the course assets from source_course_key to dest_course_key.
Arguments:
source_course_key (CourseKey): identifier of course to copy from
dest_course_key (CourseKey): identifier of course to copy to
user_id (int|long): user copying the asset metadata
"""
source_store = self._get_modulestore_for_courselike(source_course_key)
dest_store = self._get_modulestore_for_courselike(dest_course_key)
if source_store != dest_store:
with self.bulk_operations(dest_course_key):
# Get all the asset metadata in the source course.
all_assets = source_store.get_all_asset_metadata(source_course_key, 'asset')
# Store it all in the dest course.
for asset in all_assets:
new_asset_key = dest_course_key.make_asset_key('asset', asset.asset_id.path)
copied_asset = AssetMetadata(new_asset_key)
copied_asset.from_storable(asset.to_storable())
dest_store.save_asset_metadata(copied_asset, user_id)
else:
# Courses in the same modulestore can be handled by the modulestore itself.
source_store.copy_all_asset_metadata(source_course_key, dest_course_key, user_id)
@contract(asset_key='AssetKey', attr=str, user_id='int|long')
def set_asset_metadata_attr(self, asset_key, attr, value, user_id):
"""
Add/set the given attr on the asset at the given location. Value can be any type which pymongo accepts.
Arguments:
asset_key (AssetKey): asset identifier
attr (str): which attribute to set
value: the value to set it to (any type pymongo accepts such as datetime, number, string)
user_id: (int|long): user setting the attribute
Raises:
NotFoundError if no such item exists
AttributeError is attr is one of the build in attrs.
"""
store = self._get_modulestore_for_courselike(asset_key.course_key)
return store.set_asset_metadata_attrs(asset_key, {attr: value}, user_id)
@contract(asset_key='AssetKey', attr_dict=dict, user_id='int|long')
def set_asset_metadata_attrs(self, asset_key, attr_dict, user_id):
"""
Add/set the given dict of attrs on the asset at the given location. Value can be any type which pymongo accepts.
Arguments:
asset_key (AssetKey): asset identifier
attr_dict (dict): attribute/value pairs to set
user_id: (int|long): user setting the attributes
Raises:
NotFoundError if no such item exists
AttributeError is attr is one of the build in attrs.
"""
store = self._get_modulestore_for_courselike(asset_key.course_key)
return store.set_asset_metadata_attrs(asset_key, attr_dict, user_id)
@strip_key
def get_parent_location(self, location, **kwargs):
"""
returns the parent locations for a given location
"""
store = self._get_modulestore_for_courselike(location.course_key)
return store.get_parent_location(location, **kwargs)
def get_block_original_usage(self, usage_key):
"""
If a block was inherited into another structure using copy_from_template,
this will return the original block usage locator from which the
copy was inherited.
"""
try:
store = self._verify_modulestore_support(usage_key.course_key, 'get_block_original_usage')
return store.get_block_original_usage(usage_key)
except NotImplementedError:
return None, None
def get_modulestore_type(self, course_id):
"""
Returns a type which identifies which modulestore is servicing the given course_id.
The return can be one of:
"xml" (for XML based courses),
"mongo" for old-style MongoDB backed courses,
"split" for new-style split MongoDB backed courses.
"""
return self._get_modulestore_for_courselike(course_id).get_modulestore_type()
@strip_key
def get_orphans(self, course_key, **kwargs):
"""
Get all of the xblocks in the given course which have no parents and are not of types which are
usually orphaned. NOTE: may include xblocks which still have references via xblocks which don't
use children to point to their dependents.
"""
store = self._get_modulestore_for_courselike(course_key)
return store.get_orphans(course_key, **kwargs)
def get_errored_courses(self):
"""
Return a dictionary of course_dir -> [(msg, exception_str)], for each
course_dir where course loading failed.
"""
errs = {}
for store in self.modulestores:
errs.update(store.get_errored_courses())
return errs
@strip_key
def create_course(self, org, course, run, user_id, **kwargs):
"""
Creates and returns the course.
Args:
org (str): the organization that owns the course
course (str): the name of the course
run (str): the name of the run
user_id: id of the user creating the course
fields (dict): Fields to set on the course at initialization
kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation
Returns: a CourseDescriptor
"""
# first make sure an existing course doesn't already exist in the mapping
course_key = self.make_course_key(org, course, run)
if course_key in self.mappings and self.mappings[course_key].has_course(course_key):
raise DuplicateCourseError(course_key, course_key)
# create the course
store = self._verify_modulestore_support(None, 'create_course')
course = store.create_course(org, course, run, user_id, **kwargs)
# add new course to the mapping
self.mappings[course_key] = store
return course
@strip_key
def create_library(self, org, library, user_id, fields, **kwargs):
"""
Creates and returns a new library.
Args:
org (str): the organization that owns the course
library (str): the code/number/name of the library
user_id: id of the user creating the course
fields (dict): Fields to set on the course at initialization - e.g. display_name
kwargs: Any optional arguments understood by a subset of modulestores to customize instantiation
Returns: a LibraryRoot
"""
# first make sure an existing course/lib doesn't already exist in the mapping
lib_key = LibraryLocator(org=org, library=library)
if lib_key in self.mappings:
raise DuplicateCourseError(lib_key, lib_key)
# create the library
store = self._verify_modulestore_support(None, 'create_library')
library = store.create_library(org, library, user_id, fields, **kwargs)
# add new library to the mapping
self.mappings[lib_key] = store
return library
@strip_key
def clone_course(self, source_course_id, dest_course_id, user_id, fields=None, **kwargs):
"""
See the superclass for the general documentation.
If cloning w/in a store, delegates to that store's clone_course which, in order to be self-
sufficient, should handle the asset copying (call the same method as this one does)
If cloning between stores,
* copy the assets
* migrate the courseware
"""
source_modulestore = self._get_modulestore_for_courselike(source_course_id)
# for a temporary period of time, we may want to hardcode dest_modulestore as split if there's a split
# to have only course re-runs go to split. This code, however, uses the config'd priority
dest_modulestore = self._get_modulestore_for_courselike(dest_course_id)
if source_modulestore == dest_modulestore:
return source_modulestore.clone_course(source_course_id, dest_course_id, user_id, fields, **kwargs)
if dest_modulestore.get_modulestore_type() == ModuleStoreEnum.Type.split:
split_migrator = SplitMigrator(dest_modulestore, source_modulestore)
split_migrator.migrate_mongo_course(source_course_id, user_id, dest_course_id.org,
dest_course_id.course, dest_course_id.run, fields, **kwargs)
# the super handles assets and any other necessities
super(MixedModuleStore, self).clone_course(source_course_id, dest_course_id, user_id, fields, **kwargs)
else:
raise NotImplementedError("No code for cloning from {} to {}".format(
source_modulestore, dest_modulestore
))
@strip_key
@prepare_asides
def create_item(self, user_id, course_key, block_type, block_id=None, fields=None, **kwargs):
"""
Creates and saves a new item in a course.
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
course_key: A :class:`~opaque_keys.edx.CourseKey` identifying which course to create
this item in
block_type: The typo of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
fields (dict): A dictionary specifying initial values for some or all fields
in the newly created block
"""
modulestore = self._verify_modulestore_support(course_key, 'create_item')
return modulestore.create_item(user_id, course_key, block_type, block_id=block_id, fields=fields, **kwargs)
@strip_key
@prepare_asides
def create_child(self, user_id, parent_usage_key, block_type, block_id=None, fields=None, **kwargs):
"""
Creates and saves a new xblock that is a child of the specified block
Returns the newly created item.
Args:
user_id: ID of the user creating and saving the xmodule
parent_usage_key: a :class:`~opaque_key.edx.UsageKey` identifying the
block that this item should be parented under
block_type: The typo of block to create
block_id: a unique identifier for the new item. If not supplied,
a new identifier will be generated
fields (dict): A dictionary specifying initial values for some or all fields
in the newly created block
"""
modulestore = self._verify_modulestore_support(parent_usage_key.course_key, 'create_child')
return modulestore.create_child(user_id, parent_usage_key, block_type, block_id=block_id, fields=fields, **kwargs)
@strip_key
@prepare_asides
def import_xblock(self, user_id, course_key, block_type, block_id, fields=None, runtime=None, **kwargs):
"""
See :py:meth `ModuleStoreDraftAndPublished.import_xblock`
Defer to the course's modulestore if it supports this method
"""
store = self._verify_modulestore_support(course_key, 'import_xblock')
return store.import_xblock(user_id, course_key, block_type, block_id, fields, runtime, **kwargs)
@strip_key
def copy_from_template(self, source_keys, dest_key, user_id, **kwargs):
"""
See :py:meth `SplitMongoModuleStore.copy_from_template`
"""
store = self._verify_modulestore_support(dest_key.course_key, 'copy_from_template')
return store.copy_from_template(source_keys, dest_key, user_id)
@strip_key
@prepare_asides
def update_item(self, xblock, user_id, allow_not_found=False, **kwargs):
"""
Update the xblock persisted to be the same as the given for all types of fields
(content, children, and metadata) attribute the change to the given user.
"""
store = self._verify_modulestore_support(xblock.location.course_key, 'update_item')
return store.update_item(xblock, user_id, allow_not_found, **kwargs)
@strip_key
def delete_item(self, location, user_id, **kwargs):
"""
Delete the given item from persistence. kwargs allow modulestore specific parameters.
"""
store = self._verify_modulestore_support(location.course_key, 'delete_item')
return store.delete_item(location, user_id=user_id, **kwargs)
def revert_to_published(self, location, user_id):
"""
Reverts an item to its last published version (recursively traversing all of its descendants).
If no published version exists, an InvalidVersionError is thrown.
If a published version exists but there is no draft version of this item or any of its descendants, this
method is a no-op.
:raises InvalidVersionError: if no published version exists for the location specified
"""
store = self._verify_modulestore_support(location.course_key, 'revert_to_published')
return store.revert_to_published(location, user_id)
def close_all_connections(self):
"""
Close all db connections
"""
for modulestore in self.modulestores:
modulestore.close_connections()
def _drop_database(self, database=True, collections=True, connections=True):
"""
A destructive operation to drop the underlying database and close all connections.
Intended to be used by test code for cleanup.
If database is True, then this should drop the entire database.
Otherwise, if collections is True, then this should drop all of the collections used
by this modulestore.
Otherwise, the modulestore should remove all data from the collections.
If connections is True, then close the connection to the database as well.
"""
for modulestore in self.modulestores:
# drop database if the store supports it (read-only stores do not)
if hasattr(modulestore, '_drop_database'):
modulestore._drop_database(database, collections, connections) # pylint: disable=protected-access
@strip_key
def create_xblock(self, runtime, course_key, block_type, block_id=None, fields=None, **kwargs):
"""
Create the new xmodule but don't save it. Returns the new module.
Args:
runtime: :py:class `xblock.runtime` from another xblock in the same course. Providing this
significantly speeds up processing (inheritance and subsequent persistence)
course_key: :py:class `opaque_keys.CourseKey`
block_type: :py:class `string`: the string identifying the xblock type
block_id: the string uniquely identifying the block within the given course
fields: :py:class `dict` field_name, value pairs for initializing the xblock fields. Values
should be the pythonic types not the json serialized ones.
"""
store = self._verify_modulestore_support(course_key, 'create_xblock')
return store.create_xblock(runtime, course_key, block_type, block_id, fields or {}, **kwargs)
@strip_key
def get_courses_for_wiki(self, wiki_slug, **kwargs):
"""
Return the list of courses which use this wiki_slug
:param wiki_slug: the course wiki root slug
:return: list of course keys
"""
courses = []
for modulestore in self.modulestores:
courses.extend(modulestore.get_courses_for_wiki(wiki_slug, **kwargs))
return courses
def heartbeat(self):
"""
Delegate to each modulestore and package the results for the caller.
"""
# could be done in parallel threads if needed
return dict(
itertools.chain.from_iterable(
store.heartbeat().iteritems()
for store in self.modulestores
)
)
def has_published_version(self, xblock):
"""
Returns whether this xblock is draft, public, or private.
Returns:
PublishState.draft - content is in the process of being edited, but still has a previous
version deployed to LMS
PublishState.public - content is locked and deployed to LMS
PublishState.private - content is editable and not deployed to LMS
"""
course_id = xblock.scope_ids.usage_id.course_key
store = self._get_modulestore_for_courselike(course_id)
return store.has_published_version(xblock)
@strip_key
def publish(self, location, user_id, **kwargs):
"""
Save a current draft to the underlying modulestore
Returns the newly published item.
"""
store = self._verify_modulestore_support(location.course_key, 'publish')
return store.publish(location, user_id, **kwargs)
@strip_key
def unpublish(self, location, user_id, **kwargs):
"""
Save a current draft to the underlying modulestore
Returns the newly unpublished item.
"""
store = self._verify_modulestore_support(location.course_key, 'unpublish')
return store.unpublish(location, user_id, **kwargs)
def convert_to_draft(self, location, user_id):
"""
Create a copy of the source and mark its revision as draft.
Note: This method is to support the Mongo Modulestore and may be deprecated.
:param location: the location of the source (its revision must be None)
"""
store = self._verify_modulestore_support(location.course_key, 'convert_to_draft')
return store.convert_to_draft(location, user_id)
def has_changes(self, xblock):
"""
Checks if the given block has unpublished changes
:param xblock: the block to check
:return: True if the draft and published versions differ
"""
store = self._verify_modulestore_support(xblock.location.course_key, 'has_changes')
return store.has_changes(xblock)
def check_supports(self, course_key, method):
"""
Verifies that the modulestore for a particular course supports a feature.
Returns True/false based on this.
"""
try:
self._verify_modulestore_support(course_key, method)
return True
except NotImplementedError:
return False
def _verify_modulestore_support(self, course_key, method):
"""
Finds and returns the store that contains the course for the given location, and verifying
that the store supports the given method.
Raises NotImplementedError if the found store does not support the given method.
"""
store = self._get_modulestore_for_courselike(course_key)
if hasattr(store, method):
return store
else:
raise NotImplementedError(u"Cannot call {} on store {}".format(method, store))
@property
def default_modulestore(self):
"""
Return the default modulestore
"""
thread_local_default_store = getattr(self.thread_cache, 'default_store', None)
if thread_local_default_store:
# return the thread-local cache, if found
return thread_local_default_store
else:
# else return the default store
return self.modulestores[0]
@contextmanager
def default_store(self, store_type):
"""
A context manager for temporarily changing the default store in the Mixed modulestore to the given store type
"""
# find the store corresponding to the given type
store = next((store for store in self.modulestores if store.get_modulestore_type() == store_type), None)
if not store:
raise Exception(u"Cannot find store of type {}".format(store_type))
prev_thread_local_store = getattr(self.thread_cache, 'default_store', None)
try:
self.thread_cache.default_store = store
yield
finally:
self.thread_cache.default_store = prev_thread_local_store
@contextmanager
def branch_setting(self, branch_setting, course_id=None):
"""
A context manager for temporarily setting the branch value for the given course' store
to the given branch_setting. If course_id is None, the default store is used.
"""
store = self._verify_modulestore_support(course_id, 'branch_setting')
with store.branch_setting(branch_setting, course_id):
yield
@contextmanager
def bulk_operations(self, course_id, emit_signals=True):
"""
A context manager for notifying the store of bulk operations.
If course_id is None, the default store is used.
"""
store = self._get_modulestore_for_courselike(course_id)
with store.bulk_operations(course_id, emit_signals):
yield
def ensure_indexes(self):
"""
Ensure that all appropriate indexes are created that are needed by this modulestore, or raise
an exception if unable to.
This method is intended for use by tests and administrative commands, and not
to be run during server startup.
"""
for store in self.modulestores:
store.ensure_indexes()
| agpl-3.0 |
cezary12/blaze | blaze/catalog/array_provider.py | 13 | 5440 | from __future__ import absolute_import, division, print_function
import os
from os import path
import glob
import shutil
import tempfile
from dynd import nd, ndt
from .. import array
def load_json_file_array(root, array_name):
# Load the datashape
dsfile = root + '.datashape'
if not path.isfile(dsfile):
dsfile = path.dirname(root) + '.datashape'
if not path.isfile(dsfile):
raise Exception('No datashape file found for array %s'
% array_name)
with open(dsfile) as f:
dt = ndt.type(f.read())
# Load the JSON
# TODO: Add stream support to parse_json for compressed JSON, etc.
arr = nd.parse_json(dt, nd.memmap(root + '.json'))
return array(arr)
def load_json_directory_array(root, array_name):
# Load the datashape
dsfile = root + '.datashape'
if not path.isfile(dsfile):
raise Exception('No datashape file found for array %s' % array_name)
with open(dsfile) as f:
dt = ndt.type(f.read())
# Scan for JSON files, assuming they're just #.json
# Sort them numerically
files = sorted([(int(path.splitext(path.basename(x))[0]), x)
for x in glob.glob(path.join(root, '*.json'))])
files = [x[1] for x in files]
# Make an array with an extra fixed dimension, then
# read a JSON file into each element of that array
dt = ndt.make_fixed_dim(len(files), dt)
arr = nd.empty(dt)
for i, fname in enumerate(files):
nd.parse_json(arr[i], nd.memmap(fname))
arr.flag_as_immutable()
return array(arr)
def load_json_file_list_array(root, array_name):
# Load the datashape
dsfile = root + '.datashape'
if not path.isfile(dsfile):
raise Exception('No datashape file found for array %s' % array_name)
with open(dsfile) as f:
dt = ndt.type(f.read())
# Scan for JSON files -- no assumption on file suffix
#open list of files and load into python list
files = root + '.files'
with open(files) as f:
l_files = [fs.strip() for fs in f]
# Make an array with an extra fixed dimension, then
# read a JSON file into each element of that array
dt = ndt.make_fixed_dim(len(l_files), dt)
arr = nd.empty(dt)
for i, fname in enumerate(l_files):
with open(fname) as f:
nd.parse_json(arr[i], f.read())
arr.flag_as_immutable()
return array(arr)
class json_array_provider:
def __init__(self, root_dir):
if not path.isdir(root_dir):
raise ValueError('%s is not a valid directory' % root_dir)
self.root_dir = root_dir
self.array_cache = {}
self.session_dirs = {}
def __call__(self, array_name):
# First check that the .json file at the requested address exists
root = path.join(self.root_dir, array_name[1:])
if (not path.isfile(root + '.json') and
not path.isfile(root + '.deferred.json') and
not path.isfile(root + '.files') and
not path.isdir(root)):
return None
# If we've already read this array into cache, just return it
print('Cache has keys %s' % self.array_cache.keys())
print('Checking cache for %s' % array_name)
if array_name in self.array_cache:
print('Returning cached array %s' % array_name)
return self.array_cache[array_name]
if path.isfile(root + '.json'):
print('Loading array %s from file %s'
% (array_name, root + '.json'))
arr = load_json_file_array(root, array_name)
elif path.isfile(root + '.deferred.json'):
print('Loading deferred array %s from file %s'
% (array_name, root + '.deferred.json'))
with open(root + '.deferred.json') as f:
print(f.read())
raise RuntimeError('TODO: Deferred loading not implemented!')
elif path.isfile(root + '.files'):
print('Loading files from file list: %s' % (root + '.files'))
arr = load_json_file_list_array(root, array_name)
else:
print('Loading array %s from directory %s' % (array_name, root))
arr = load_json_directory_array(root, array_name)
self.array_cache[array_name] = arr
return arr
def create_session_dir(self):
d = tempfile.mkdtemp(prefix='.session_', dir=self.root_dir)
session_name = '/' + os.path.basename(d)
if type(session_name) is unicode:
session_name = session_name.encode('utf-8')
self.session_dirs[session_name] = d
return session_name, d
def delete_session_dir(self, session_name):
shutil.rmtree(self.session_dirs[session_name])
del self.session_dirs[session_name]
def create_deferred_array_filename(self, session_name,
prefix, cache_array):
d = tempfile.mkstemp(suffix='.deferred.json', prefix=prefix,
dir=self.session_dirs[session_name], text=True)
array_name = os.path.basename(d[1])
array_name = session_name + '/' + array_name[:array_name.find('.')]
if type(array_name) is unicode:
array_name = array_name.encode('utf-8')
if cache_array is not None:
self.array_cache[array_name] = cache_array
return (os.fdopen(d[0], "w"), array_name, d[1])
| bsd-3-clause |
rpgplanet/django-avatar | avatar/settings.py | 37 | 1294 | from django.conf import settings
try:
from PIL import Image
dir(Image) # Placate PyFlakes
except ImportError:
import Image
AVATAR_DEFAULT_SIZE = getattr(settings, 'AVATAR_DEFAULT_SIZE', 80)
AUTO_GENERATE_AVATAR_SIZES = getattr(settings, 'AUTO_GENERATE_AVATAR_SIZES', (AVATAR_DEFAULT_SIZE,))
AVATAR_RESIZE_METHOD = getattr(settings, 'AVATAR_RESIZE_METHOD', Image.ANTIALIAS)
AVATAR_STORAGE_DIR = getattr(settings, 'AVATAR_STORAGE_DIR', 'avatars')
AVATAR_GRAVATAR_BACKUP = getattr(settings, 'AVATAR_GRAVATAR_BACKUP', True)
AVATAR_GRAVATAR_DEFAULT = getattr(settings, 'AVATAR_GRAVATAR_DEFAULT', None)
AVATAR_DEFAULT_URL = getattr(settings, 'AVATAR_DEFAULT_URL', 'avatar/img/default.jpg')
AVATAR_MAX_AVATARS_PER_USER = getattr(settings, 'AVATAR_MAX_AVATARS_PER_USER', 42)
AVATAR_MAX_SIZE = getattr(settings, 'AVATAR_MAX_SIZE', 1024 * 1024)
AVATAR_THUMB_FORMAT = getattr(settings, 'AVATAR_THUMB_FORMAT', "JPEG")
AVATAR_THUMB_QUALITY = getattr(settings, 'AVATAR_THUMB_QUALITY', 85)
AVATAR_HASH_FILENAMES = getattr(settings, 'AVATAR_HASH_FILENAMES', False)
AVATAR_HASH_USERDIRNAMES = getattr(settings, 'AVATAR_HASH_USERDIRNAMES', False)
AVATAR_ALLOWED_FILE_EXTS = getattr(settings, 'AVATAR_ALLOWED_FILE_EXTS', None)
AVATAR_CACHE_TIMEOUT = getattr(settings, 'AVATAR_CACHE_TIMEOUT', 60*60)
| bsd-3-clause |
cclauss/heroku-buildpack-python | vendor/pip-pop/pip/_vendor/requests/packages/chardet/hebrewprober.py | 2929 | 13359 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.